diff --git a/docs/versioned-plugins/codecs-index.asciidoc b/docs/versioned-plugins/codecs-index.asciidoc new file mode 100644 index 000000000..02e8a7ba7 --- /dev/null +++ b/docs/versioned-plugins/codecs-index.asciidoc @@ -0,0 +1,9 @@ +:type: codec +:type_uc: Codec + +include::include/plugin-intro.asciidoc[] + +include::codecs/cef-index.asciidoc[] +include::codecs/json-index.asciidoc[] +include::codecs/rubydebug-index.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/cef-index.asciidoc b/docs/versioned-plugins/codecs/cef-index.asciidoc new file mode 100644 index 000000000..4f2fbc35e --- /dev/null +++ b/docs/versioned-plugins/codecs/cef-index.asciidoc @@ -0,0 +1,20 @@ +:plugin: cef +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-08-01 +| <> | 2017-08-18 +| <> | 2017-06-23 +|======================================================================= + +include::cef-v5.0.2.asciidoc[] +include::cef-v5.0.1.asciidoc[] +include::cef-v5.0.0.asciidoc[] +include::cef-v4.1.4.asciidoc[] +include::cef-v4.1.3.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/cef-v4.1.3.asciidoc b/docs/versioned-plugins/codecs/cef-v4.1.3.asciidoc new file mode 100644 index 000000000..91c65488f --- /dev/null +++ b/docs/versioned-plugins/codecs/cef-v4.1.3.asciidoc @@ -0,0 +1,164 @@ +:plugin: cef +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.1.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-cef/blob/v4.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cef codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Implementation of a Logstash codec for the ArcSight Common Event Format (CEF) +Based on Revision 20 of Implementing ArcSight CEF, dated from June 05, 2013 +https://community.saas.hpe.com/dcvta86296/attachments/dcvta86296/connector-documentation/1116/1/CommonEventFormatv23.pdf + +If this codec receives a payload from an input that is not a valid CEF message, then it will +produce an event with the payload as the 'message' field and a '_cefparsefailure' tag. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cef Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-product>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-severity>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-signature>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-vendor>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +If your input puts a delimiter between each CEF event, you'll want to set +this to be that delimiter. + +For example, with the TCP input, you probably want to put this: + + input { + tcp { + codec => cef { delimiter => "\r\n" } + # ... + } + } + +This setting allows the following character sequences to have special meaning: + +* `\\r` (backslash "r") - means carriage return (ASCII 0x0D) +* `\\n` (backslash "n") - means newline (ASCII 0x0A) + +[id="{version}-plugins-{type}s-{plugin}-deprecated_v1_fields"] +===== `deprecated_v1_fields` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Set this flag if you want to have both v1 and v2 fields indexed at the same time. Note that this option will increase +the index size and data stored in outputs like Elasticsearch +This option is available to ease transition to new schema + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Fields to be included in CEV extension part as key/value pairs + +[id="{version}-plugins-{type}s-{plugin}-name"] +===== `name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Logstash"` + +Name field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-product"] +===== `product` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Logstash"` + +Device product field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-sev"] +===== `sev` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Deprecated severity field for CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +This field is used only if :severity is unchanged set to the default value. + +Defined as field of type string to allow sprintf. The value will be validated +to be an integer in the range from 0 to 10 (including). +All invalid values will be mapped to the default of 6. + +[id="{version}-plugins-{type}s-{plugin}-severity"] +===== `severity` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"6"` + +Severity field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +Defined as field of type string to allow sprintf. The value will be validated +to be an integer in the range from 0 to 10 (including). +All invalid values will be mapped to the default of 6. + +[id="{version}-plugins-{type}s-{plugin}-signature"] +===== `signature` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Logstash"` + +Signature ID field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-vendor"] +===== `vendor` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Elasticsearch"` + +Device vendor field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"1.0"` + +Device version field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + + diff --git a/docs/versioned-plugins/codecs/cef-v4.1.4.asciidoc b/docs/versioned-plugins/codecs/cef-v4.1.4.asciidoc new file mode 100644 index 000000000..238113395 --- /dev/null +++ b/docs/versioned-plugins/codecs/cef-v4.1.4.asciidoc @@ -0,0 +1,164 @@ +:plugin: cef +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.1.4 +:release_date: 2017-08-18 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-cef/blob/v4.1.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cef codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Implementation of a Logstash codec for the ArcSight Common Event Format (CEF) +Based on Revision 20 of Implementing ArcSight CEF, dated from June 05, 2013 +https://community.saas.hpe.com/dcvta86296/attachments/dcvta86296/connector-documentation/1116/1/CommonEventFormatv23.pdf + +If this codec receives a payload from an input that is not a valid CEF message, then it will +produce an event with the payload as the 'message' field and a '_cefparsefailure' tag. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cef Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-product>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-severity>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-signature>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-vendor>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +If your input puts a delimiter between each CEF event, you'll want to set +this to be that delimiter. + +For example, with the TCP input, you probably want to put this: + + input { + tcp { + codec => cef { delimiter => "\r\n" } + # ... + } + } + +This setting allows the following character sequences to have special meaning: + +* `\\r` (backslash "r") - means carriage return (ASCII 0x0D) +* `\\n` (backslash "n") - means newline (ASCII 0x0A) + +[id="{version}-plugins-{type}s-{plugin}-deprecated_v1_fields"] +===== `deprecated_v1_fields` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Set this flag if you want to have both v1 and v2 fields indexed at the same time. Note that this option will increase +the index size and data stored in outputs like Elasticsearch +This option is available to ease transition to new schema + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Fields to be included in CEV extension part as key/value pairs + +[id="{version}-plugins-{type}s-{plugin}-name"] +===== `name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Logstash"` + +Name field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-product"] +===== `product` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Logstash"` + +Device product field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-sev"] +===== `sev` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Deprecated severity field for CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +This field is used only if :severity is unchanged set to the default value. + +Defined as field of type string to allow sprintf. The value will be validated +to be an integer in the range from 0 to 10 (including). +All invalid values will be mapped to the default of 6. + +[id="{version}-plugins-{type}s-{plugin}-severity"] +===== `severity` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"6"` + +Severity field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +Defined as field of type string to allow sprintf. The value will be validated +to be an integer in the range from 0 to 10 (including). +All invalid values will be mapped to the default of 6. + +[id="{version}-plugins-{type}s-{plugin}-signature"] +===== `signature` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Logstash"` + +Signature ID field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-vendor"] +===== `vendor` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Elasticsearch"` + +Device vendor field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"1.0"` + +Device version field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + + diff --git a/docs/versioned-plugins/codecs/cef-v5.0.0.asciidoc b/docs/versioned-plugins/codecs/cef-v5.0.0.asciidoc new file mode 100644 index 000000000..4efedcba3 --- /dev/null +++ b/docs/versioned-plugins/codecs/cef-v5.0.0.asciidoc @@ -0,0 +1,153 @@ +:plugin: cef +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.0 +:release_date: 2017-08-01 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-cef/blob/v5.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cef codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Implementation of a Logstash codec for the ArcSight Common Event Format (CEF) +Based on Revision 20 of Implementing ArcSight CEF, dated from June 05, 2013 +https://community.saas.hpe.com/dcvta86296/attachments/dcvta86296/connector-documentation/1116/1/CommonEventFormatv23.pdf + +If this codec receives a payload from an input that is not a valid CEF message, then it will +produce an event with the payload as the 'message' field and a '_cefparsefailure' tag. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cef Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-product>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-severity>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-signature>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-vendor>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +If your input puts a delimiter between each CEF event, you'll want to set +this to be that delimiter. + +For example, with the TCP input, you probably want to put this: + + input { + tcp { + codec => cef { delimiter => "\r\n" } + # ... + } + } + +This setting allows the following character sequences to have special meaning: + +* `\\r` (backslash "r") - means carriage return (ASCII 0x0D) +* `\\n` (backslash "n") - means newline (ASCII 0x0A) + +[id="{version}-plugins-{type}s-{plugin}-deprecated_v1_fields"] +===== `deprecated_v1_fields` (OBSOLETE) + + * OBSOLETE WARNING: This configuration item is obsolete and will prevent the pipeline from starting if used + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Fields to be included in CEV extension part as key/value pairs + +[id="{version}-plugins-{type}s-{plugin}-name"] +===== `name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Logstash"` + +Name field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-product"] +===== `product` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Logstash"` + +Device product field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-sev"] +===== `sev` (OBSOLETE) + + * OBSOLETE WARNING: This configuration item is obsolete and will prevent the pipeline from starting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Obsolete severity field for CEF header use :severity instead. + +[id="{version}-plugins-{type}s-{plugin}-severity"] +===== `severity` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"6"` + +Severity field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +Defined as field of type string to allow sprintf. The value will be validated +to be an integer in the range from 0 to 10 (including). +All invalid values will be mapped to the default of 6. + +[id="{version}-plugins-{type}s-{plugin}-signature"] +===== `signature` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Logstash"` + +Signature ID field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-vendor"] +===== `vendor` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Elasticsearch"` + +Device vendor field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"1.0"` + +Device version field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + + diff --git a/docs/versioned-plugins/codecs/cef-v5.0.1.asciidoc b/docs/versioned-plugins/codecs/cef-v5.0.1.asciidoc new file mode 100644 index 000000000..b0597a374 --- /dev/null +++ b/docs/versioned-plugins/codecs/cef-v5.0.1.asciidoc @@ -0,0 +1,153 @@ +:plugin: cef +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.1 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-cef/blob/v5.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cef codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Implementation of a Logstash codec for the ArcSight Common Event Format (CEF) +Based on Revision 20 of Implementing ArcSight CEF, dated from June 05, 2013 +https://community.saas.hpe.com/dcvta86296/attachments/dcvta86296/connector-documentation/1116/1/CommonEventFormatv23.pdf + +If this codec receives a payload from an input that is not a valid CEF message, then it will +produce an event with the payload as the 'message' field and a '_cefparsefailure' tag. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cef Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-product>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-severity>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-signature>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-vendor>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +If your input puts a delimiter between each CEF event, you'll want to set +this to be that delimiter. + +For example, with the TCP input, you probably want to put this: + + input { + tcp { + codec => cef { delimiter => "\r\n" } + # ... + } + } + +This setting allows the following character sequences to have special meaning: + +* `\\r` (backslash "r") - means carriage return (ASCII 0x0D) +* `\\n` (backslash "n") - means newline (ASCII 0x0A) + +[id="{version}-plugins-{type}s-{plugin}-deprecated_v1_fields"] +===== `deprecated_v1_fields` (OBSOLETE) + + * OBSOLETE WARNING: This configuration item is obsolete and will prevent the pipeline from starting if used + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Fields to be included in CEV extension part as key/value pairs + +[id="{version}-plugins-{type}s-{plugin}-name"] +===== `name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Logstash"` + +Name field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-product"] +===== `product` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Logstash"` + +Device product field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-sev"] +===== `sev` (OBSOLETE) + + * OBSOLETE WARNING: This configuration item is obsolete and will prevent the pipeline from starting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Obsolete severity field for CEF header use :severity instead. + +[id="{version}-plugins-{type}s-{plugin}-severity"] +===== `severity` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"6"` + +Severity field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +Defined as field of type string to allow sprintf. The value will be validated +to be an integer in the range from 0 to 10 (including). +All invalid values will be mapped to the default of 6. + +[id="{version}-plugins-{type}s-{plugin}-signature"] +===== `signature` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Logstash"` + +Signature ID field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-vendor"] +===== `vendor` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Elasticsearch"` + +Device vendor field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"1.0"` + +Device version field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + + diff --git a/docs/versioned-plugins/codecs/cef-v5.0.2.asciidoc b/docs/versioned-plugins/codecs/cef-v5.0.2.asciidoc new file mode 100644 index 000000000..74d8782a3 --- /dev/null +++ b/docs/versioned-plugins/codecs/cef-v5.0.2.asciidoc @@ -0,0 +1,153 @@ +:plugin: cef +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.2 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-cef/blob/v5.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cef codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Implementation of a Logstash codec for the ArcSight Common Event Format (CEF) +Based on Revision 20 of Implementing ArcSight CEF, dated from June 05, 2013 +https://community.saas.hpe.com/dcvta86296/attachments/dcvta86296/connector-documentation/1116/1/CommonEventFormatv23.pdf + +If this codec receives a payload from an input that is not a valid CEF message, then it will +produce an event with the payload as the 'message' field and a '_cefparsefailure' tag. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cef Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-product>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-severity>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-signature>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-vendor>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +If your input puts a delimiter between each CEF event, you'll want to set +this to be that delimiter. + +For example, with the TCP input, you probably want to put this: + + input { + tcp { + codec => cef { delimiter => "\r\n" } + # ... + } + } + +This setting allows the following character sequences to have special meaning: + +* `\\r` (backslash "r") - means carriage return (ASCII 0x0D) +* `\\n` (backslash "n") - means newline (ASCII 0x0A) + +[id="{version}-plugins-{type}s-{plugin}-deprecated_v1_fields"] +===== `deprecated_v1_fields` (OBSOLETE) + + * OBSOLETE WARNING: This configuration item is obsolete and will prevent the pipeline from starting if used + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Fields to be included in CEV extension part as key/value pairs + +[id="{version}-plugins-{type}s-{plugin}-name"] +===== `name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Logstash"` + +Name field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-product"] +===== `product` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Logstash"` + +Device product field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-sev"] +===== `sev` (OBSOLETE) + + * OBSOLETE WARNING: This configuration item is obsolete and will prevent the pipeline from starting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Obsolete severity field for CEF header use :severity instead. + +[id="{version}-plugins-{type}s-{plugin}-severity"] +===== `severity` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"6"` + +Severity field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +Defined as field of type string to allow sprintf. The value will be validated +to be an integer in the range from 0 to 10 (including). +All invalid values will be mapped to the default of 6. + +[id="{version}-plugins-{type}s-{plugin}-signature"] +===== `signature` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Logstash"` + +Signature ID field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-vendor"] +===== `vendor` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"Elasticsearch"` + +Device vendor field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"1.0"` + +Device version field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + + diff --git a/docs/versioned-plugins/codecs/json-index.asciidoc b/docs/versioned-plugins/codecs/json-index.asciidoc new file mode 100644 index 000000000..eb1c490cb --- /dev/null +++ b/docs/versioned-plugins/codecs/json-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: json +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::json-v3.0.5.asciidoc[] +include::json-v3.0.4.asciidoc[] +include::json-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/json-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/json-v3.0.3.asciidoc new file mode 100644 index 000000000..9fce12219 --- /dev/null +++ b/docs/versioned-plugins/codecs/json-v3.0.3.asciidoc @@ -0,0 +1,62 @@ +:plugin: json +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-json/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Json codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec may be used to decode (via inputs) and encode (via outputs) +full JSON messages. If the data being sent is a JSON array at its root multiple events will be created (one per element). + +If you are streaming JSON messages delimited +by '\n' then see the `json_lines` codec. + +Encoding will result in a compact JSON representation (no line terminators or indentation) + +If this codec recieves a payload from an input that is not valid JSON, then +it will fall back to plain text and add a tag `_jsonparsefailure`. Upon a JSON +failure, the payload will be stored in the `message` field. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Json Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this codec. Examples include "UTF-8" and +"CP1252". + +JSON requires valid UTF-8 strings, but in some cases, software that +emits JSON does so in another encoding (nxlog, for example). In +weird cases like this, you can set the `charset` setting to the +actual encoding of the text and Logstash will convert it for you. + +For nxlog users, you may to set this to "CP1252". + + diff --git a/docs/versioned-plugins/codecs/json-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/json-v3.0.4.asciidoc new file mode 100644 index 000000000..45eff46e5 --- /dev/null +++ b/docs/versioned-plugins/codecs/json-v3.0.4.asciidoc @@ -0,0 +1,62 @@ +:plugin: json +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-json/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Json codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec may be used to decode (via inputs) and encode (via outputs) +full JSON messages. If the data being sent is a JSON array at its root multiple events will be created (one per element). + +If you are streaming JSON messages delimited +by '\n' then see the `json_lines` codec. + +Encoding will result in a compact JSON representation (no line terminators or indentation) + +If this codec recieves a payload from an input that is not valid JSON, then +it will fall back to plain text and add a tag `_jsonparsefailure`. Upon a JSON +failure, the payload will be stored in the `message` field. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Json Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this codec. Examples include "UTF-8" and +"CP1252". + +JSON requires valid UTF-8 strings, but in some cases, software that +emits JSON does so in another encoding (nxlog, for example). In +weird cases like this, you can set the `charset` setting to the +actual encoding of the text and Logstash will convert it for you. + +For nxlog users, you may to set this to "CP1252". + + diff --git a/docs/versioned-plugins/codecs/json-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/json-v3.0.5.asciidoc new file mode 100644 index 000000000..96dc1ca76 --- /dev/null +++ b/docs/versioned-plugins/codecs/json-v3.0.5.asciidoc @@ -0,0 +1,62 @@ +:plugin: json +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-json/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Json codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec may be used to decode (via inputs) and encode (via outputs) +full JSON messages. If the data being sent is a JSON array at its root multiple events will be created (one per element). + +If you are streaming JSON messages delimited +by '\n' then see the `json_lines` codec. + +Encoding will result in a compact JSON representation (no line terminators or indentation) + +If this codec recieves a payload from an input that is not valid JSON, then +it will fall back to plain text and add a tag `_jsonparsefailure`. Upon a JSON +failure, the payload will be stored in the `message` field. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Json Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this codec. Examples include "UTF-8" and +"CP1252". + +JSON requires valid UTF-8 strings, but in some cases, software that +emits JSON does so in another encoding (nxlog, for example). In +weird cases like this, you can set the `charset` setting to the +actual encoding of the text and Logstash will convert it for you. + +For nxlog users, you may to set this to "CP1252". + + diff --git a/docs/versioned-plugins/codecs/rubydebug-index.asciidoc b/docs/versioned-plugins/codecs/rubydebug-index.asciidoc new file mode 100644 index 000000000..338dc8afd --- /dev/null +++ b/docs/versioned-plugins/codecs/rubydebug-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: rubydebug +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-14 +| <> | 2017-08-21 +| <> | 2017-06-23 +|======================================================================= + +include::rubydebug-v3.0.5.asciidoc[] +include::rubydebug-v3.0.4.asciidoc[] +include::rubydebug-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/rubydebug-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/rubydebug-v3.0.3.asciidoc new file mode 100644 index 000000000..c0868bfd1 --- /dev/null +++ b/docs/versioned-plugins/codecs/rubydebug-v3.0.3.asciidoc @@ -0,0 +1,46 @@ +:plugin: rubydebug +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-rubydebug/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rubydebug codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The rubydebug codec will output your Logstash event data using +the Ruby Awesome Print library. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rubydebug Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-metadata>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-metadata"] +===== `metadata` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Should the event's metadata be included? + + diff --git a/docs/versioned-plugins/codecs/rubydebug-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/rubydebug-v3.0.4.asciidoc new file mode 100644 index 000000000..32642161c --- /dev/null +++ b/docs/versioned-plugins/codecs/rubydebug-v3.0.4.asciidoc @@ -0,0 +1,46 @@ +:plugin: rubydebug +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-21 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-rubydebug/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rubydebug codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The rubydebug codec will output your Logstash event data using +the Ruby Awesome Print library. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rubydebug Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-metadata>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-metadata"] +===== `metadata` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Should the event's metadata be included? + + diff --git a/docs/versioned-plugins/codecs/rubydebug-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/rubydebug-v3.0.5.asciidoc new file mode 100644 index 000000000..2bd9155be --- /dev/null +++ b/docs/versioned-plugins/codecs/rubydebug-v3.0.5.asciidoc @@ -0,0 +1,46 @@ +:plugin: rubydebug +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-14 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-rubydebug/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rubydebug codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The rubydebug codec will output your Logstash event data using +the Ruby Awesome Print library. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rubydebug Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-metadata>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-metadata"] +===== `metadata` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Should the event's metadata be included? + + diff --git a/docs/versioned-plugins/filters-index.asciidoc b/docs/versioned-plugins/filters-index.asciidoc new file mode 100644 index 000000000..9158a052d --- /dev/null +++ b/docs/versioned-plugins/filters-index.asciidoc @@ -0,0 +1,10 @@ +:type: filter +:type_uc: Filter + +include::include/plugin-intro.asciidoc[] + +include::filters/grok-index.asciidoc[] +include::filters/mutate-index.asciidoc[] +include::filters/ruby-index.asciidoc[] +include::filters/sleep-index.asciidoc[] + diff --git a/docs/versioned-plugins/filters/grok-index.asciidoc b/docs/versioned-plugins/filters/grok-index.asciidoc new file mode 100644 index 000000000..7dcfee5c2 --- /dev/null +++ b/docs/versioned-plugins/filters/grok-index.asciidoc @@ -0,0 +1,22 @@ +:plugin: grok +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-18 +| <> | 2017-11-27 +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +| <> | 2017-05-10 +|======================================================================= + +include::grok-v4.0.1.asciidoc[] +include::grok-v4.0.0.asciidoc[] +include::grok-v3.4.4.asciidoc[] +include::grok-v3.4.3.asciidoc[] +include::grok-v3.4.2.asciidoc[] +include::grok-v3.4.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/grok-v3.4.1.asciidoc b/docs/versioned-plugins/filters/grok-v3.4.1.asciidoc new file mode 100644 index 000000000..ef95be7a3 --- /dev/null +++ b/docs/versioned-plugins/filters/grok-v3.4.1.asciidoc @@ -0,0 +1,332 @@ +:plugin: grok +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.4.1 +:release_date: 2017-05-10 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-grok/blob/v3.4.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Grok + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Parse arbitrary text and structure it. + +Grok is currently the best way in logstash to parse crappy unstructured log +data into something structured and queryable. + +This tool is perfect for syslog logs, apache and other webserver logs, mysql +logs, and in general, any log format that is generally written for humans +and not computer consumption. + +Logstash ships with about 120 patterns by default. You can find them here: +. You can add +your own trivially. (See the `patterns_dir` setting) + +If you need help building patterns to match your logs, you will find the + and applications quite useful! + +==== Grok Basics + +Grok works by combining text patterns into something that matches your +logs. + +The syntax for a grok pattern is `%{SYNTAX:SEMANTIC}` + +The `SYNTAX` is the name of the pattern that will match your text. For +example, `3.44` will be matched by the `NUMBER` pattern and `55.3.244.1` will +be matched by the `IP` pattern. The syntax is how you match. + +The `SEMANTIC` is the identifier you give to the piece of text being matched. +For example, `3.44` could be the duration of an event, so you could call it +simply `duration`. Further, a string `55.3.244.1` might identify the `client` +making a request. + +For the above example, your grok filter would look something like this: +[source,ruby] +%{NUMBER:duration} %{IP:client} + +Optionally you can add a data type conversion to your grok pattern. By default +all semantics are saved as strings. If you wish to convert a semantic's data type, +for example change a string to an integer then suffix it with the target data type. +For example `%{NUMBER:num:int}` which converts the `num` semantic from a string to an +integer. Currently the only supported conversions are `int` and `float`. + +.Examples: + +With that idea of a syntax and semantic, we can pull out useful fields from a +sample log like this fictional http request log: +[source,ruby] + 55.3.244.1 GET /index.html 15824 0.043 + +The pattern for this could be: +[source,ruby] + %{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration} + +A more realistic example, let's read these logs from a file: +[source,ruby] + input { + file { + path => "/var/log/http.log" + } + } + filter { + grok { + match => { "message" => "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}" } + } + } + +After the grok filter, the event will have a few extra fields in it: + +* `client: 55.3.244.1` +* `method: GET` +* `request: /index.html` +* `bytes: 15824` +* `duration: 0.043` + +==== Regular Expressions + +Grok sits on top of regular expressions, so any regular expressions are valid +in grok as well. The regular expression library is Oniguruma, and you can see +the full supported regexp syntax https://github.com/kkos/oniguruma/blob/master/doc/RE[on the Oniguruma +site]. + +==== Custom Patterns + +Sometimes logstash doesn't have a pattern you need. For this, you have +a few options. + +First, you can use the Oniguruma syntax for named capture which will +let you match a piece of text and save it as a field: +[source,ruby] + (?the pattern here) + +For example, postfix logs have a `queue id` that is an 10 or 11-character +hexadecimal value. I can capture that easily like this: +[source,ruby] + (?[0-9A-F]{10,11}) + +Alternately, you can create a custom patterns file. + +* Create a directory called `patterns` with a file in it called `extra` + (the file name doesn't matter, but name it meaningfully for yourself) +* In that file, write the pattern you need as the pattern name, a space, then + the regexp for that pattern. + +For example, doing the postfix queue id example as above: +[source,ruby] + # contents of ./patterns/postfix: + POSTFIX_QUEUEID [0-9A-F]{10,11} + +Then use the `patterns_dir` setting in this plugin to tell logstash where +your custom patterns directory is. Here's a full example with a sample log: +[source,ruby] + Jan 1 06:25:43 mailserver14 postfix/cleanup[21403]: BEF25A72965: message-id=<20130101142543.5828399CCAF@mailserver14.example.com> +[source,ruby] + filter { + grok { + patterns_dir => ["./patterns"] + match => { "message" => "%{SYSLOGBASE} %{POSTFIX_QUEUEID:queue_id}: %{GREEDYDATA:syslog_message}" } + } + } + +The above will match and result in the following fields: + +* `timestamp: Jan 1 06:25:43` +* `logsource: mailserver14` +* `program: postfix/cleanup` +* `pid: 21403` +* `queue_id: BEF25A72965` +* `syslog_message: message-id=<20130101142543.5828399CCAF@mailserver14.example.com>` + +The `timestamp`, `logsource`, `program`, and `pid` fields come from the +`SYSLOGBASE` pattern which itself is defined by other patterns. + +Another option is to define patterns _inline_ in the filter using `pattern_definitions`. +This is mostly for convenience and allows user to define a pattern which can be used just in that +filter. This newly defined patterns in `pattern_definitions` will not be available outside of that particular `grok` filter. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Grok Filter Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-match>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-break_on_match"] +===== `break_on_match` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Break on first match. The first successful match by grok will result in the +filter being finished. If you want grok to try all patterns (maybe you are +parsing different things), then set this to false. + +[id="{version}-plugins-{type}s-{plugin}-keep_empty_captures"] +===== `keep_empty_captures` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +If `true`, keep empty captures as event fields. + +[id="{version}-plugins-{type}s-{plugin}-match"] +===== `match` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +A hash of matches of field => value + +For example: +[source,ruby] + filter { + grok { match => { "message" => "Duration: %{NUMBER:duration}" } } + } + +If you need to match multiple patterns against a single field, the value can be an array of patterns +[source,ruby] + filter { + grok { match => { "message" => [ "Duration: %{NUMBER:duration}", "Speed: %{NUMBER:speed}" ] } } + } + + +[id="{version}-plugins-{type}s-{plugin}-named_captures_only"] +===== `named_captures_only` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +If `true`, only store named captures from grok. + +[id="{version}-plugins-{type}s-{plugin}-overwrite"] +===== `overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +The fields to overwrite. + +This allows you to overwrite a value in a field that already exists. + +For example, if you have a syslog line in the `message` field, you can +overwrite the `message` field with part of the match like so: +[source,ruby] + filter { + grok { + match => { "message" => "%{SYSLOGBASE} %{DATA:message}" } + overwrite => [ "message" ] + } + } + +In this case, a line like `May 29 16:37:11 sadness logger: hello world` +will be parsed and `hello world` will overwrite the original message. + +[id="{version}-plugins-{type}s-{plugin}-pattern_definitions"] +===== `pattern_definitions` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +A hash of pattern-name and pattern tuples defining custom patterns to be used by +the current filter. Patterns matching existing names will override the pre-existing +definition. Think of this as inline patterns available just for this definition of +grok + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. You can point to multiple pattern directories using this setting. +Note that Grok will read all files in the directory matching the patterns_files_glob +and assume it's a pattern file (including any tilde backup files). +[source,ruby] + patterns_dir => ["/opt/logstash/patterns", "/opt/logstash/extra_patterns"] + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +The patterns are loaded when the pipeline is created. + +[id="{version}-plugins-{type}s-{plugin}-patterns_files_glob"] +===== `patterns_files_glob` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"*"` + +Glob pattern, used to select the pattern files in the directories +specified by patterns_dir + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `["_grokparsefailure"]` + +Append values to the `tags` field when there has been no +successful match + +[id="{version}-plugins-{type}s-{plugin}-tag_on_timeout"] +===== `tag_on_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"_groktimeout"` + +Tag to apply if a grok regexp times out. + +[id="{version}-plugins-{type}s-{plugin}-timeout_millis"] +===== `timeout_millis` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `30000` + +Attempt to terminate regexps after this amount of time. +This applies per pattern if multiple patterns are applied +This will never timeout early, but may take a little longer to timeout. +Actual timeout is approximate based on a 250ms quantization. +Set to 0 to disable timeouts + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/grok-v3.4.2.asciidoc b/docs/versioned-plugins/filters/grok-v3.4.2.asciidoc new file mode 100644 index 000000000..d18dc9cea --- /dev/null +++ b/docs/versioned-plugins/filters/grok-v3.4.2.asciidoc @@ -0,0 +1,333 @@ +:plugin: grok +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.4.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-grok/blob/v3.4.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Grok filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Parse arbitrary text and structure it. + +Grok is currently the best way in logstash to parse crappy unstructured log +data into something structured and queryable. + +This tool is perfect for syslog logs, apache and other webserver logs, mysql +logs, and in general, any log format that is generally written for humans +and not computer consumption. + +Logstash ships with about 120 patterns by default. You can find them here: +. You can add +your own trivially. (See the `patterns_dir` setting) + +If you need help building patterns to match your logs, you will find the + and applications quite useful! + +==== Grok Basics + +Grok works by combining text patterns into something that matches your +logs. + +The syntax for a grok pattern is `%{SYNTAX:SEMANTIC}` + +The `SYNTAX` is the name of the pattern that will match your text. For +example, `3.44` will be matched by the `NUMBER` pattern and `55.3.244.1` will +be matched by the `IP` pattern. The syntax is how you match. + +The `SEMANTIC` is the identifier you give to the piece of text being matched. +For example, `3.44` could be the duration of an event, so you could call it +simply `duration`. Further, a string `55.3.244.1` might identify the `client` +making a request. + +For the above example, your grok filter would look something like this: +[source,ruby] +%{NUMBER:duration} %{IP:client} + +Optionally you can add a data type conversion to your grok pattern. By default +all semantics are saved as strings. If you wish to convert a semantic's data type, +for example change a string to an integer then suffix it with the target data type. +For example `%{NUMBER:num:int}` which converts the `num` semantic from a string to an +integer. Currently the only supported conversions are `int` and `float`. + +.Examples: + +With that idea of a syntax and semantic, we can pull out useful fields from a +sample log like this fictional http request log: +[source,ruby] + 55.3.244.1 GET /index.html 15824 0.043 + +The pattern for this could be: +[source,ruby] + %{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration} + +A more realistic example, let's read these logs from a file: +[source,ruby] + input { + file { + path => "/var/log/http.log" + } + } + filter { + grok { + match => { "message" => "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}" } + } + } + +After the grok filter, the event will have a few extra fields in it: + +* `client: 55.3.244.1` +* `method: GET` +* `request: /index.html` +* `bytes: 15824` +* `duration: 0.043` + +==== Regular Expressions + +Grok sits on top of regular expressions, so any regular expressions are valid +in grok as well. The regular expression library is Oniguruma, and you can see +the full supported regexp syntax https://github.com/kkos/oniguruma/blob/master/doc/RE[on the Oniguruma +site]. + +==== Custom Patterns + +Sometimes logstash doesn't have a pattern you need. For this, you have +a few options. + +First, you can use the Oniguruma syntax for named capture which will +let you match a piece of text and save it as a field: +[source,ruby] + (?the pattern here) + +For example, postfix logs have a `queue id` that is an 10 or 11-character +hexadecimal value. I can capture that easily like this: +[source,ruby] + (?[0-9A-F]{10,11}) + +Alternately, you can create a custom patterns file. + +* Create a directory called `patterns` with a file in it called `extra` + (the file name doesn't matter, but name it meaningfully for yourself) +* In that file, write the pattern you need as the pattern name, a space, then + the regexp for that pattern. + +For example, doing the postfix queue id example as above: +[source,ruby] + # contents of ./patterns/postfix: + POSTFIX_QUEUEID [0-9A-F]{10,11} + +Then use the `patterns_dir` setting in this plugin to tell logstash where +your custom patterns directory is. Here's a full example with a sample log: +[source,ruby] + Jan 1 06:25:43 mailserver14 postfix/cleanup[21403]: BEF25A72965: message-id=<20130101142543.5828399CCAF@mailserver14.example.com> +[source,ruby] + filter { + grok { + patterns_dir => ["./patterns"] + match => { "message" => "%{SYSLOGBASE} %{POSTFIX_QUEUEID:queue_id}: %{GREEDYDATA:syslog_message}" } + } + } + +The above will match and result in the following fields: + +* `timestamp: Jan 1 06:25:43` +* `logsource: mailserver14` +* `program: postfix/cleanup` +* `pid: 21403` +* `queue_id: BEF25A72965` +* `syslog_message: message-id=<20130101142543.5828399CCAF@mailserver14.example.com>` + +The `timestamp`, `logsource`, `program`, and `pid` fields come from the +`SYSLOGBASE` pattern which itself is defined by other patterns. + +Another option is to define patterns _inline_ in the filter using `pattern_definitions`. +This is mostly for convenience and allows user to define a pattern which can be used just in that +filter. This newly defined patterns in `pattern_definitions` will not be available outside of that particular `grok` filter. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Grok Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-match>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-break_on_match"] +===== `break_on_match` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Break on first match. The first successful match by grok will result in the +filter being finished. If you want grok to try all patterns (maybe you are +parsing different things), then set this to false. + +[id="{version}-plugins-{type}s-{plugin}-keep_empty_captures"] +===== `keep_empty_captures` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +If `true`, keep empty captures as event fields. + +[id="{version}-plugins-{type}s-{plugin}-match"] +===== `match` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +A hash of matches of field => value + +For example: +[source,ruby] + filter { + grok { match => { "message" => "Duration: %{NUMBER:duration}" } } + } + +If you need to match multiple patterns against a single field, the value can be an array of patterns +[source,ruby] + filter { + grok { match => { "message" => [ "Duration: %{NUMBER:duration}", "Speed: %{NUMBER:speed}" ] } } + } + + +[id="{version}-plugins-{type}s-{plugin}-named_captures_only"] +===== `named_captures_only` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +If `true`, only store named captures from grok. + +[id="{version}-plugins-{type}s-{plugin}-overwrite"] +===== `overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +The fields to overwrite. + +This allows you to overwrite a value in a field that already exists. + +For example, if you have a syslog line in the `message` field, you can +overwrite the `message` field with part of the match like so: +[source,ruby] + filter { + grok { + match => { "message" => "%{SYSLOGBASE} %{DATA:message}" } + overwrite => [ "message" ] + } + } + +In this case, a line like `May 29 16:37:11 sadness logger: hello world` +will be parsed and `hello world` will overwrite the original message. + +[id="{version}-plugins-{type}s-{plugin}-pattern_definitions"] +===== `pattern_definitions` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +A hash of pattern-name and pattern tuples defining custom patterns to be used by +the current filter. Patterns matching existing names will override the pre-existing +definition. Think of this as inline patterns available just for this definition of +grok + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. You can point to multiple pattern directories using this setting. +Note that Grok will read all files in the directory matching the patterns_files_glob +and assume it's a pattern file (including any tilde backup files). +[source,ruby] + patterns_dir => ["/opt/logstash/patterns", "/opt/logstash/extra_patterns"] + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +The patterns are loaded when the pipeline is created. + +[id="{version}-plugins-{type}s-{plugin}-patterns_files_glob"] +===== `patterns_files_glob` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"*"` + +Glob pattern, used to select the pattern files in the directories +specified by patterns_dir + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `["_grokparsefailure"]` + +Append values to the `tags` field when there has been no +successful match + +[id="{version}-plugins-{type}s-{plugin}-tag_on_timeout"] +===== `tag_on_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"_groktimeout"` + +Tag to apply if a grok regexp times out. + +[id="{version}-plugins-{type}s-{plugin}-timeout_millis"] +===== `timeout_millis` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `30000` + +Attempt to terminate regexps after this amount of time. +This applies per pattern if multiple patterns are applied +This will never timeout early, but may take a little longer to timeout. +Actual timeout is approximate based on a 250ms quantization. +Set to 0 to disable timeouts + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/grok-v3.4.3.asciidoc b/docs/versioned-plugins/filters/grok-v3.4.3.asciidoc new file mode 100644 index 000000000..2a3341ebc --- /dev/null +++ b/docs/versioned-plugins/filters/grok-v3.4.3.asciidoc @@ -0,0 +1,332 @@ +:plugin: grok +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.4.3 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-grok/blob/v3.4.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Grok filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Parse arbitrary text and structure it. + +Grok is a great way to parse unstructured log data into something structured and queryable. + +This tool is perfect for syslog logs, apache and other webserver logs, mysql +logs, and in general, any log format that is generally written for humans +and not computer consumption. + +Logstash ships with about 120 patterns by default. You can find them here: +. You can add +your own trivially. (See the `patterns_dir` setting) + +If you need help building patterns to match your logs, you will find the + and applications quite useful! + +==== Grok Basics + +Grok works by combining text patterns into something that matches your +logs. + +The syntax for a grok pattern is `%{SYNTAX:SEMANTIC}` + +The `SYNTAX` is the name of the pattern that will match your text. For +example, `3.44` will be matched by the `NUMBER` pattern and `55.3.244.1` will +be matched by the `IP` pattern. The syntax is how you match. + +The `SEMANTIC` is the identifier you give to the piece of text being matched. +For example, `3.44` could be the duration of an event, so you could call it +simply `duration`. Further, a string `55.3.244.1` might identify the `client` +making a request. + +For the above example, your grok filter would look something like this: +[source,ruby] +%{NUMBER:duration} %{IP:client} + +Optionally you can add a data type conversion to your grok pattern. By default +all semantics are saved as strings. If you wish to convert a semantic's data type, +for example change a string to an integer then suffix it with the target data type. +For example `%{NUMBER:num:int}` which converts the `num` semantic from a string to an +integer. Currently the only supported conversions are `int` and `float`. + +.Examples: + +With that idea of a syntax and semantic, we can pull out useful fields from a +sample log like this fictional http request log: +[source,ruby] + 55.3.244.1 GET /index.html 15824 0.043 + +The pattern for this could be: +[source,ruby] + %{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration} + +A more realistic example, let's read these logs from a file: +[source,ruby] + input { + file { + path => "/var/log/http.log" + } + } + filter { + grok { + match => { "message" => "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}" } + } + } + +After the grok filter, the event will have a few extra fields in it: + +* `client: 55.3.244.1` +* `method: GET` +* `request: /index.html` +* `bytes: 15824` +* `duration: 0.043` + +==== Regular Expressions + +Grok sits on top of regular expressions, so any regular expressions are valid +in grok as well. The regular expression library is Oniguruma, and you can see +the full supported regexp syntax https://github.com/kkos/oniguruma/blob/master/doc/RE[on the Oniguruma +site]. + +==== Custom Patterns + +Sometimes logstash doesn't have a pattern you need. For this, you have +a few options. + +First, you can use the Oniguruma syntax for named capture which will +let you match a piece of text and save it as a field: +[source,ruby] + (?the pattern here) + +For example, postfix logs have a `queue id` that is an 10 or 11-character +hexadecimal value. I can capture that easily like this: +[source,ruby] + (?[0-9A-F]{10,11}) + +Alternately, you can create a custom patterns file. + +* Create a directory called `patterns` with a file in it called `extra` + (the file name doesn't matter, but name it meaningfully for yourself) +* In that file, write the pattern you need as the pattern name, a space, then + the regexp for that pattern. + +For example, doing the postfix queue id example as above: +[source,ruby] + # contents of ./patterns/postfix: + POSTFIX_QUEUEID [0-9A-F]{10,11} + +Then use the `patterns_dir` setting in this plugin to tell logstash where +your custom patterns directory is. Here's a full example with a sample log: +[source,ruby] + Jan 1 06:25:43 mailserver14 postfix/cleanup[21403]: BEF25A72965: message-id=<20130101142543.5828399CCAF@mailserver14.example.com> +[source,ruby] + filter { + grok { + patterns_dir => ["./patterns"] + match => { "message" => "%{SYSLOGBASE} %{POSTFIX_QUEUEID:queue_id}: %{GREEDYDATA:syslog_message}" } + } + } + +The above will match and result in the following fields: + +* `timestamp: Jan 1 06:25:43` +* `logsource: mailserver14` +* `program: postfix/cleanup` +* `pid: 21403` +* `queue_id: BEF25A72965` +* `syslog_message: message-id=<20130101142543.5828399CCAF@mailserver14.example.com>` + +The `timestamp`, `logsource`, `program`, and `pid` fields come from the +`SYSLOGBASE` pattern which itself is defined by other patterns. + +Another option is to define patterns _inline_ in the filter using `pattern_definitions`. +This is mostly for convenience and allows user to define a pattern which can be used just in that +filter. This newly defined patterns in `pattern_definitions` will not be available outside of that particular `grok` filter. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Grok Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-match>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-break_on_match"] +===== `break_on_match` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Break on first match. The first successful match by grok will result in the +filter being finished. If you want grok to try all patterns (maybe you are +parsing different things), then set this to false. + +[id="{version}-plugins-{type}s-{plugin}-keep_empty_captures"] +===== `keep_empty_captures` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +If `true`, keep empty captures as event fields. + +[id="{version}-plugins-{type}s-{plugin}-match"] +===== `match` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +A hash of matches of field => value + +For example: +[source,ruby] + filter { + grok { match => { "message" => "Duration: %{NUMBER:duration}" } } + } + +If you need to match multiple patterns against a single field, the value can be an array of patterns +[source,ruby] + filter { + grok { match => { "message" => [ "Duration: %{NUMBER:duration}", "Speed: %{NUMBER:speed}" ] } } + } + + +[id="{version}-plugins-{type}s-{plugin}-named_captures_only"] +===== `named_captures_only` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +If `true`, only store named captures from grok. + +[id="{version}-plugins-{type}s-{plugin}-overwrite"] +===== `overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +The fields to overwrite. + +This allows you to overwrite a value in a field that already exists. + +For example, if you have a syslog line in the `message` field, you can +overwrite the `message` field with part of the match like so: +[source,ruby] + filter { + grok { + match => { "message" => "%{SYSLOGBASE} %{DATA:message}" } + overwrite => [ "message" ] + } + } + +In this case, a line like `May 29 16:37:11 sadness logger: hello world` +will be parsed and `hello world` will overwrite the original message. + +[id="{version}-plugins-{type}s-{plugin}-pattern_definitions"] +===== `pattern_definitions` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +A hash of pattern-name and pattern tuples defining custom patterns to be used by +the current filter. Patterns matching existing names will override the pre-existing +definition. Think of this as inline patterns available just for this definition of +grok + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. You can point to multiple pattern directories using this setting. +Note that Grok will read all files in the directory matching the patterns_files_glob +and assume it's a pattern file (including any tilde backup files). +[source,ruby] + patterns_dir => ["/opt/logstash/patterns", "/opt/logstash/extra_patterns"] + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +The patterns are loaded when the pipeline is created. + +[id="{version}-plugins-{type}s-{plugin}-patterns_files_glob"] +===== `patterns_files_glob` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"*"` + +Glob pattern, used to select the pattern files in the directories +specified by patterns_dir + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `["_grokparsefailure"]` + +Append values to the `tags` field when there has been no +successful match + +[id="{version}-plugins-{type}s-{plugin}-tag_on_timeout"] +===== `tag_on_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"_groktimeout"` + +Tag to apply if a grok regexp times out. + +[id="{version}-plugins-{type}s-{plugin}-timeout_millis"] +===== `timeout_millis` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `30000` + +Attempt to terminate regexps after this amount of time. +This applies per pattern if multiple patterns are applied +This will never timeout early, but may take a little longer to timeout. +Actual timeout is approximate based on a 250ms quantization. +Set to 0 to disable timeouts + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/grok-v3.4.4.asciidoc b/docs/versioned-plugins/filters/grok-v3.4.4.asciidoc new file mode 100644 index 000000000..eb266e249 --- /dev/null +++ b/docs/versioned-plugins/filters/grok-v3.4.4.asciidoc @@ -0,0 +1,332 @@ +:plugin: grok +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.4.4 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-grok/blob/v3.4.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Grok filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Parse arbitrary text and structure it. + +Grok is a great way to parse unstructured log data into something structured and queryable. + +This tool is perfect for syslog logs, apache and other webserver logs, mysql +logs, and in general, any log format that is generally written for humans +and not computer consumption. + +Logstash ships with about 120 patterns by default. You can find them here: +. You can add +your own trivially. (See the `patterns_dir` setting) + +If you need help building patterns to match your logs, you will find the + and applications quite useful! + +==== Grok Basics + +Grok works by combining text patterns into something that matches your +logs. + +The syntax for a grok pattern is `%{SYNTAX:SEMANTIC}` + +The `SYNTAX` is the name of the pattern that will match your text. For +example, `3.44` will be matched by the `NUMBER` pattern and `55.3.244.1` will +be matched by the `IP` pattern. The syntax is how you match. + +The `SEMANTIC` is the identifier you give to the piece of text being matched. +For example, `3.44` could be the duration of an event, so you could call it +simply `duration`. Further, a string `55.3.244.1` might identify the `client` +making a request. + +For the above example, your grok filter would look something like this: +[source,ruby] +%{NUMBER:duration} %{IP:client} + +Optionally you can add a data type conversion to your grok pattern. By default +all semantics are saved as strings. If you wish to convert a semantic's data type, +for example change a string to an integer then suffix it with the target data type. +For example `%{NUMBER:num:int}` which converts the `num` semantic from a string to an +integer. Currently the only supported conversions are `int` and `float`. + +.Examples: + +With that idea of a syntax and semantic, we can pull out useful fields from a +sample log like this fictional http request log: +[source,ruby] + 55.3.244.1 GET /index.html 15824 0.043 + +The pattern for this could be: +[source,ruby] + %{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration} + +A more realistic example, let's read these logs from a file: +[source,ruby] + input { + file { + path => "/var/log/http.log" + } + } + filter { + grok { + match => { "message" => "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}" } + } + } + +After the grok filter, the event will have a few extra fields in it: + +* `client: 55.3.244.1` +* `method: GET` +* `request: /index.html` +* `bytes: 15824` +* `duration: 0.043` + +==== Regular Expressions + +Grok sits on top of regular expressions, so any regular expressions are valid +in grok as well. The regular expression library is Oniguruma, and you can see +the full supported regexp syntax https://github.com/kkos/oniguruma/blob/master/doc/RE[on the Oniguruma +site]. + +==== Custom Patterns + +Sometimes logstash doesn't have a pattern you need. For this, you have +a few options. + +First, you can use the Oniguruma syntax for named capture which will +let you match a piece of text and save it as a field: +[source,ruby] + (?the pattern here) + +For example, postfix logs have a `queue id` that is an 10 or 11-character +hexadecimal value. I can capture that easily like this: +[source,ruby] + (?[0-9A-F]{10,11}) + +Alternately, you can create a custom patterns file. + +* Create a directory called `patterns` with a file in it called `extra` + (the file name doesn't matter, but name it meaningfully for yourself) +* In that file, write the pattern you need as the pattern name, a space, then + the regexp for that pattern. + +For example, doing the postfix queue id example as above: +[source,ruby] + # contents of ./patterns/postfix: + POSTFIX_QUEUEID [0-9A-F]{10,11} + +Then use the `patterns_dir` setting in this plugin to tell logstash where +your custom patterns directory is. Here's a full example with a sample log: +[source,ruby] + Jan 1 06:25:43 mailserver14 postfix/cleanup[21403]: BEF25A72965: message-id=<20130101142543.5828399CCAF@mailserver14.example.com> +[source,ruby] + filter { + grok { + patterns_dir => ["./patterns"] + match => { "message" => "%{SYSLOGBASE} %{POSTFIX_QUEUEID:queue_id}: %{GREEDYDATA:syslog_message}" } + } + } + +The above will match and result in the following fields: + +* `timestamp: Jan 1 06:25:43` +* `logsource: mailserver14` +* `program: postfix/cleanup` +* `pid: 21403` +* `queue_id: BEF25A72965` +* `syslog_message: message-id=<20130101142543.5828399CCAF@mailserver14.example.com>` + +The `timestamp`, `logsource`, `program`, and `pid` fields come from the +`SYSLOGBASE` pattern which itself is defined by other patterns. + +Another option is to define patterns _inline_ in the filter using `pattern_definitions`. +This is mostly for convenience and allows user to define a pattern which can be used just in that +filter. This newly defined patterns in `pattern_definitions` will not be available outside of that particular `grok` filter. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Grok Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-match>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-break_on_match"] +===== `break_on_match` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Break on first match. The first successful match by grok will result in the +filter being finished. If you want grok to try all patterns (maybe you are +parsing different things), then set this to false. + +[id="{version}-plugins-{type}s-{plugin}-keep_empty_captures"] +===== `keep_empty_captures` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +If `true`, keep empty captures as event fields. + +[id="{version}-plugins-{type}s-{plugin}-match"] +===== `match` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +A hash of matches of field => value + +For example: +[source,ruby] + filter { + grok { match => { "message" => "Duration: %{NUMBER:duration}" } } + } + +If you need to match multiple patterns against a single field, the value can be an array of patterns +[source,ruby] + filter { + grok { match => { "message" => [ "Duration: %{NUMBER:duration}", "Speed: %{NUMBER:speed}" ] } } + } + + +[id="{version}-plugins-{type}s-{plugin}-named_captures_only"] +===== `named_captures_only` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +If `true`, only store named captures from grok. + +[id="{version}-plugins-{type}s-{plugin}-overwrite"] +===== `overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +The fields to overwrite. + +This allows you to overwrite a value in a field that already exists. + +For example, if you have a syslog line in the `message` field, you can +overwrite the `message` field with part of the match like so: +[source,ruby] + filter { + grok { + match => { "message" => "%{SYSLOGBASE} %{DATA:message}" } + overwrite => [ "message" ] + } + } + +In this case, a line like `May 29 16:37:11 sadness logger: hello world` +will be parsed and `hello world` will overwrite the original message. + +[id="{version}-plugins-{type}s-{plugin}-pattern_definitions"] +===== `pattern_definitions` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +A hash of pattern-name and pattern tuples defining custom patterns to be used by +the current filter. Patterns matching existing names will override the pre-existing +definition. Think of this as inline patterns available just for this definition of +grok + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. You can point to multiple pattern directories using this setting. +Note that Grok will read all files in the directory matching the patterns_files_glob +and assume it's a pattern file (including any tilde backup files). +[source,ruby] + patterns_dir => ["/opt/logstash/patterns", "/opt/logstash/extra_patterns"] + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +The patterns are loaded when the pipeline is created. + +[id="{version}-plugins-{type}s-{plugin}-patterns_files_glob"] +===== `patterns_files_glob` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"*"` + +Glob pattern, used to select the pattern files in the directories +specified by patterns_dir + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `["_grokparsefailure"]` + +Append values to the `tags` field when there has been no +successful match + +[id="{version}-plugins-{type}s-{plugin}-tag_on_timeout"] +===== `tag_on_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"_groktimeout"` + +Tag to apply if a grok regexp times out. + +[id="{version}-plugins-{type}s-{plugin}-timeout_millis"] +===== `timeout_millis` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `30000` + +Attempt to terminate regexps after this amount of time. +This applies per pattern if multiple patterns are applied +This will never timeout early, but may take a little longer to timeout. +Actual timeout is approximate based on a 250ms quantization. +Set to 0 to disable timeouts + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/grok-v4.0.0.asciidoc b/docs/versioned-plugins/filters/grok-v4.0.0.asciidoc new file mode 100644 index 000000000..81b51548c --- /dev/null +++ b/docs/versioned-plugins/filters/grok-v4.0.0.asciidoc @@ -0,0 +1,332 @@ +:plugin: grok +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.0 +:release_date: 2017-11-27 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-grok/blob/v4.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Grok filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Parse arbitrary text and structure it. + +Grok is a great way to parse unstructured log data into something structured and queryable. + +This tool is perfect for syslog logs, apache and other webserver logs, mysql +logs, and in general, any log format that is generally written for humans +and not computer consumption. + +Logstash ships with about 120 patterns by default. You can find them here: +. You can add +your own trivially. (See the `patterns_dir` setting) + +If you need help building patterns to match your logs, you will find the + and applications quite useful! + +==== Grok Basics + +Grok works by combining text patterns into something that matches your +logs. + +The syntax for a grok pattern is `%{SYNTAX:SEMANTIC}` + +The `SYNTAX` is the name of the pattern that will match your text. For +example, `3.44` will be matched by the `NUMBER` pattern and `55.3.244.1` will +be matched by the `IP` pattern. The syntax is how you match. + +The `SEMANTIC` is the identifier you give to the piece of text being matched. +For example, `3.44` could be the duration of an event, so you could call it +simply `duration`. Further, a string `55.3.244.1` might identify the `client` +making a request. + +For the above example, your grok filter would look something like this: +[source,ruby] +%{NUMBER:duration} %{IP:client} + +Optionally you can add a data type conversion to your grok pattern. By default +all semantics are saved as strings. If you wish to convert a semantic's data type, +for example change a string to an integer then suffix it with the target data type. +For example `%{NUMBER:num:int}` which converts the `num` semantic from a string to an +integer. Currently the only supported conversions are `int` and `float`. + +.Examples: + +With that idea of a syntax and semantic, we can pull out useful fields from a +sample log like this fictional http request log: +[source,ruby] + 55.3.244.1 GET /index.html 15824 0.043 + +The pattern for this could be: +[source,ruby] + %{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration} + +A more realistic example, let's read these logs from a file: +[source,ruby] + input { + file { + path => "/var/log/http.log" + } + } + filter { + grok { + match => { "message" => "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}" } + } + } + +After the grok filter, the event will have a few extra fields in it: + +* `client: 55.3.244.1` +* `method: GET` +* `request: /index.html` +* `bytes: 15824` +* `duration: 0.043` + +==== Regular Expressions + +Grok sits on top of regular expressions, so any regular expressions are valid +in grok as well. The regular expression library is Oniguruma, and you can see +the full supported regexp syntax https://github.com/kkos/oniguruma/blob/master/doc/RE[on the Oniguruma +site]. + +==== Custom Patterns + +Sometimes logstash doesn't have a pattern you need. For this, you have +a few options. + +First, you can use the Oniguruma syntax for named capture which will +let you match a piece of text and save it as a field: +[source,ruby] + (?the pattern here) + +For example, postfix logs have a `queue id` that is an 10 or 11-character +hexadecimal value. I can capture that easily like this: +[source,ruby] + (?[0-9A-F]{10,11}) + +Alternately, you can create a custom patterns file. + +* Create a directory called `patterns` with a file in it called `extra` + (the file name doesn't matter, but name it meaningfully for yourself) +* In that file, write the pattern you need as the pattern name, a space, then + the regexp for that pattern. + +For example, doing the postfix queue id example as above: +[source,ruby] + # contents of ./patterns/postfix: + POSTFIX_QUEUEID [0-9A-F]{10,11} + +Then use the `patterns_dir` setting in this plugin to tell logstash where +your custom patterns directory is. Here's a full example with a sample log: +[source,ruby] + Jan 1 06:25:43 mailserver14 postfix/cleanup[21403]: BEF25A72965: message-id=<20130101142543.5828399CCAF@mailserver14.example.com> +[source,ruby] + filter { + grok { + patterns_dir => ["./patterns"] + match => { "message" => "%{SYSLOGBASE} %{POSTFIX_QUEUEID:queue_id}: %{GREEDYDATA:syslog_message}" } + } + } + +The above will match and result in the following fields: + +* `timestamp: Jan 1 06:25:43` +* `logsource: mailserver14` +* `program: postfix/cleanup` +* `pid: 21403` +* `queue_id: BEF25A72965` +* `syslog_message: message-id=<20130101142543.5828399CCAF@mailserver14.example.com>` + +The `timestamp`, `logsource`, `program`, and `pid` fields come from the +`SYSLOGBASE` pattern which itself is defined by other patterns. + +Another option is to define patterns _inline_ in the filter using `pattern_definitions`. +This is mostly for convenience and allows user to define a pattern which can be used just in that +filter. This newly defined patterns in `pattern_definitions` will not be available outside of that particular `grok` filter. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Grok Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-match>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-break_on_match"] +===== `break_on_match` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Break on first match. The first successful match by grok will result in the +filter being finished. If you want grok to try all patterns (maybe you are +parsing different things), then set this to false. + +[id="{version}-plugins-{type}s-{plugin}-keep_empty_captures"] +===== `keep_empty_captures` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +If `true`, keep empty captures as event fields. + +[id="{version}-plugins-{type}s-{plugin}-match"] +===== `match` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +A hash of matches of field => value + +For example: +[source,ruby] + filter { + grok { match => { "message" => "Duration: %{NUMBER:duration}" } } + } + +If you need to match multiple patterns against a single field, the value can be an array of patterns +[source,ruby] + filter { + grok { match => { "message" => [ "Duration: %{NUMBER:duration}", "Speed: %{NUMBER:speed}" ] } } + } + + +[id="{version}-plugins-{type}s-{plugin}-named_captures_only"] +===== `named_captures_only` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +If `true`, only store named captures from grok. + +[id="{version}-plugins-{type}s-{plugin}-overwrite"] +===== `overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +The fields to overwrite. + +This allows you to overwrite a value in a field that already exists. + +For example, if you have a syslog line in the `message` field, you can +overwrite the `message` field with part of the match like so: +[source,ruby] + filter { + grok { + match => { "message" => "%{SYSLOGBASE} %{DATA:message}" } + overwrite => [ "message" ] + } + } + +In this case, a line like `May 29 16:37:11 sadness logger: hello world` +will be parsed and `hello world` will overwrite the original message. + +[id="{version}-plugins-{type}s-{plugin}-pattern_definitions"] +===== `pattern_definitions` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +A hash of pattern-name and pattern tuples defining custom patterns to be used by +the current filter. Patterns matching existing names will override the pre-existing +definition. Think of this as inline patterns available just for this definition of +grok + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. You can point to multiple pattern directories using this setting. +Note that Grok will read all files in the directory matching the patterns_files_glob +and assume it's a pattern file (including any tilde backup files). +[source,ruby] + patterns_dir => ["/opt/logstash/patterns", "/opt/logstash/extra_patterns"] + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +The patterns are loaded when the pipeline is created. + +[id="{version}-plugins-{type}s-{plugin}-patterns_files_glob"] +===== `patterns_files_glob` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"*"` + +Glob pattern, used to select the pattern files in the directories +specified by patterns_dir + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `["_grokparsefailure"]` + +Append values to the `tags` field when there has been no +successful match + +[id="{version}-plugins-{type}s-{plugin}-tag_on_timeout"] +===== `tag_on_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"_groktimeout"` + +Tag to apply if a grok regexp times out. + +[id="{version}-plugins-{type}s-{plugin}-timeout_millis"] +===== `timeout_millis` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `30000` + +Attempt to terminate regexps after this amount of time. +This applies per pattern if multiple patterns are applied +This will never timeout early, but may take a little longer to timeout. +Actual timeout is approximate based on a 250ms quantization. +Set to 0 to disable timeouts + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/grok-v4.0.1.asciidoc b/docs/versioned-plugins/filters/grok-v4.0.1.asciidoc new file mode 100644 index 000000000..43e802c99 --- /dev/null +++ b/docs/versioned-plugins/filters/grok-v4.0.1.asciidoc @@ -0,0 +1,332 @@ +:plugin: grok +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.1 +:release_date: 2017-12-18 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-grok/blob/v4.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Grok filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Parse arbitrary text and structure it. + +Grok is a great way to parse unstructured log data into something structured and queryable. + +This tool is perfect for syslog logs, apache and other webserver logs, mysql +logs, and in general, any log format that is generally written for humans +and not computer consumption. + +Logstash ships with about 120 patterns by default. You can find them here: +. You can add +your own trivially. (See the `patterns_dir` setting) + +If you need help building patterns to match your logs, you will find the + and applications quite useful! + +==== Grok Basics + +Grok works by combining text patterns into something that matches your +logs. + +The syntax for a grok pattern is `%{SYNTAX:SEMANTIC}` + +The `SYNTAX` is the name of the pattern that will match your text. For +example, `3.44` will be matched by the `NUMBER` pattern and `55.3.244.1` will +be matched by the `IP` pattern. The syntax is how you match. + +The `SEMANTIC` is the identifier you give to the piece of text being matched. +For example, `3.44` could be the duration of an event, so you could call it +simply `duration`. Further, a string `55.3.244.1` might identify the `client` +making a request. + +For the above example, your grok filter would look something like this: +[source,ruby] +%{NUMBER:duration} %{IP:client} + +Optionally you can add a data type conversion to your grok pattern. By default +all semantics are saved as strings. If you wish to convert a semantic's data type, +for example change a string to an integer then suffix it with the target data type. +For example `%{NUMBER:num:int}` which converts the `num` semantic from a string to an +integer. Currently the only supported conversions are `int` and `float`. + +.Examples: + +With that idea of a syntax and semantic, we can pull out useful fields from a +sample log like this fictional http request log: +[source,ruby] + 55.3.244.1 GET /index.html 15824 0.043 + +The pattern for this could be: +[source,ruby] + %{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration} + +A more realistic example, let's read these logs from a file: +[source,ruby] + input { + file { + path => "/var/log/http.log" + } + } + filter { + grok { + match => { "message" => "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}" } + } + } + +After the grok filter, the event will have a few extra fields in it: + +* `client: 55.3.244.1` +* `method: GET` +* `request: /index.html` +* `bytes: 15824` +* `duration: 0.043` + +==== Regular Expressions + +Grok sits on top of regular expressions, so any regular expressions are valid +in grok as well. The regular expression library is Oniguruma, and you can see +the full supported regexp syntax https://github.com/kkos/oniguruma/blob/master/doc/RE[on the Oniguruma +site]. + +==== Custom Patterns + +Sometimes logstash doesn't have a pattern you need. For this, you have +a few options. + +First, you can use the Oniguruma syntax for named capture which will +let you match a piece of text and save it as a field: +[source,ruby] + (?the pattern here) + +For example, postfix logs have a `queue id` that is an 10 or 11-character +hexadecimal value. I can capture that easily like this: +[source,ruby] + (?[0-9A-F]{10,11}) + +Alternately, you can create a custom patterns file. + +* Create a directory called `patterns` with a file in it called `extra` + (the file name doesn't matter, but name it meaningfully for yourself) +* In that file, write the pattern you need as the pattern name, a space, then + the regexp for that pattern. + +For example, doing the postfix queue id example as above: +[source,ruby] + # contents of ./patterns/postfix: + POSTFIX_QUEUEID [0-9A-F]{10,11} + +Then use the `patterns_dir` setting in this plugin to tell logstash where +your custom patterns directory is. Here's a full example with a sample log: +[source,ruby] + Jan 1 06:25:43 mailserver14 postfix/cleanup[21403]: BEF25A72965: message-id=<20130101142543.5828399CCAF@mailserver14.example.com> +[source,ruby] + filter { + grok { + patterns_dir => ["./patterns"] + match => { "message" => "%{SYSLOGBASE} %{POSTFIX_QUEUEID:queue_id}: %{GREEDYDATA:syslog_message}" } + } + } + +The above will match and result in the following fields: + +* `timestamp: Jan 1 06:25:43` +* `logsource: mailserver14` +* `program: postfix/cleanup` +* `pid: 21403` +* `queue_id: BEF25A72965` +* `syslog_message: message-id=<20130101142543.5828399CCAF@mailserver14.example.com>` + +The `timestamp`, `logsource`, `program`, and `pid` fields come from the +`SYSLOGBASE` pattern which itself is defined by other patterns. + +Another option is to define patterns _inline_ in the filter using `pattern_definitions`. +This is mostly for convenience and allows user to define a pattern which can be used just in that +filter. This newly defined patterns in `pattern_definitions` will not be available outside of that particular `grok` filter. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Grok Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-match>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-break_on_match"] +===== `break_on_match` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Break on first match. The first successful match by grok will result in the +filter being finished. If you want grok to try all patterns (maybe you are +parsing different things), then set this to false. + +[id="{version}-plugins-{type}s-{plugin}-keep_empty_captures"] +===== `keep_empty_captures` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +If `true`, keep empty captures as event fields. + +[id="{version}-plugins-{type}s-{plugin}-match"] +===== `match` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +A hash of matches of field => value + +For example: +[source,ruby] + filter { + grok { match => { "message" => "Duration: %{NUMBER:duration}" } } + } + +If you need to match multiple patterns against a single field, the value can be an array of patterns +[source,ruby] + filter { + grok { match => { "message" => [ "Duration: %{NUMBER:duration}", "Speed: %{NUMBER:speed}" ] } } + } + + +[id="{version}-plugins-{type}s-{plugin}-named_captures_only"] +===== `named_captures_only` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +If `true`, only store named captures from grok. + +[id="{version}-plugins-{type}s-{plugin}-overwrite"] +===== `overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +The fields to overwrite. + +This allows you to overwrite a value in a field that already exists. + +For example, if you have a syslog line in the `message` field, you can +overwrite the `message` field with part of the match like so: +[source,ruby] + filter { + grok { + match => { "message" => "%{SYSLOGBASE} %{DATA:message}" } + overwrite => [ "message" ] + } + } + +In this case, a line like `May 29 16:37:11 sadness logger: hello world` +will be parsed and `hello world` will overwrite the original message. + +[id="{version}-plugins-{type}s-{plugin}-pattern_definitions"] +===== `pattern_definitions` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +A hash of pattern-name and pattern tuples defining custom patterns to be used by +the current filter. Patterns matching existing names will override the pre-existing +definition. Think of this as inline patterns available just for this definition of +grok + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. You can point to multiple pattern directories using this setting. +Note that Grok will read all files in the directory matching the patterns_files_glob +and assume it's a pattern file (including any tilde backup files). +[source,ruby] + patterns_dir => ["/opt/logstash/patterns", "/opt/logstash/extra_patterns"] + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +The patterns are loaded when the pipeline is created. + +[id="{version}-plugins-{type}s-{plugin}-patterns_files_glob"] +===== `patterns_files_glob` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"*"` + +Glob pattern, used to select the pattern files in the directories +specified by patterns_dir + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `["_grokparsefailure"]` + +Append values to the `tags` field when there has been no +successful match + +[id="{version}-plugins-{type}s-{plugin}-tag_on_timeout"] +===== `tag_on_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"_groktimeout"` + +Tag to apply if a grok regexp times out. + +[id="{version}-plugins-{type}s-{plugin}-timeout_millis"] +===== `timeout_millis` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `30000` + +Attempt to terminate regexps after this amount of time. +This applies per pattern if multiple patterns are applied +This will never timeout early, but may take a little longer to timeout. +Actual timeout is approximate based on a 250ms quantization. +Set to 0 to disable timeouts + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/mutate-index.asciidoc b/docs/versioned-plugins/filters/mutate-index.asciidoc new file mode 100644 index 000000000..244760df3 --- /dev/null +++ b/docs/versioned-plugins/filters/mutate-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: mutate +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-28 +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::mutate-v3.2.0.asciidoc[] +include::mutate-v3.1.7.asciidoc[] +include::mutate-v3.1.6.asciidoc[] +include::mutate-v3.1.5.asciidoc[] + diff --git a/docs/versioned-plugins/filters/mutate-v3.1.5.asciidoc b/docs/versioned-plugins/filters/mutate-v3.1.5.asciidoc new file mode 100644 index 000000000..6d0a3ccc0 --- /dev/null +++ b/docs/versioned-plugins/filters/mutate-v3.1.5.asciidoc @@ -0,0 +1,282 @@ +:plugin: mutate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-mutate/blob/v3.1.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Mutate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The mutate filter allows you to perform general mutations on fields. You +can rename, remove, replace, and modify fields in your events. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Mutate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-convert>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-copy>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-gsub>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-join>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-lowercase>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-merge>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-rename>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-replace>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-split>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-strip>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-update>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-uppercase>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-convert"] +===== `convert` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Convert a field's value to a different type, like turning a string to an +integer. If the field value is an array, all members will be converted. +If the field is a hash, no action will be taken. + +If the conversion type is `boolean`, the acceptable values are: + +* **True:** `true`, `t`, `yes`, `y`, and `1` +* **False:** `false`, `f`, `no`, `n`, and `0` + +If a value other than these is provided, it will pass straight through +and log a warning message. + +Valid conversion targets are: integer, float, string, and boolean. + +Example: +[source,ruby] + filter { + mutate { + convert => { "fieldname" => "integer" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-copy"] +===== `copy` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Copy an existing field to another field. Existing target field will be overriden. + +Example: +[source,ruby] + filter { + mutate { + copy => { "source_field" => "dest_field" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-gsub"] +===== `gsub` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * There is no default value for this setting. + +Convert a string field by applying a regular expression and a replacement. +If the field is not a string, no action will be taken. + +This configuration takes an array consisting of 3 elements per +field/substitution. + +Be aware of escaping any backslash in the config file. + +Example: +[source,ruby] + filter { + mutate { + gsub => [ + # replace all forward slashes with underscore + "fieldname", "/", "_", + # replace backslashes, question marks, hashes, and minuses + # with a dot "." + "fieldname2", "[\\?#-]", "." + ] + } + } + + +[id="{version}-plugins-{type}s-{plugin}-join"] +===== `join` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Join an array with a separator character. Does nothing on non-array fields. + +Example: +[source,ruby] + filter { + mutate { + join => { "fieldname" => "," } + } + } + +[id="{version}-plugins-{type}s-{plugin}-lowercase"] +===== `lowercase` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * There is no default value for this setting. + +Convert a string to its lowercase equivalent. + +Example: +[source,ruby] + filter { + mutate { + lowercase => [ "fieldname" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-merge"] +===== `merge` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Merge two fields of arrays or hashes. +String fields will be automatically be converted into an array, so: +========================== + `array` + `string` will work + `string` + `string` will result in an 2 entry array in `dest_field` + `array` and `hash` will not work +========================== +Example: +[source,ruby] + filter { + mutate { + merge => { "dest_field" => "added_field" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-rename"] +===== `rename` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Rename one or more fields. + +Example: +[source,ruby] + filter { + mutate { + # Renames the 'HOSTORIP' field to 'client_ip' + rename => { "HOSTORIP" => "client_ip" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-replace"] +===== `replace` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Replace a field with a new value. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +Example: +[source,ruby] + filter { + mutate { + replace => { "message" => "%{source_host}: My new message" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-split"] +===== `split` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Split a field to an array using a separator character. Only works on string +fields. + +Example: +[source,ruby] + filter { + mutate { + split => { "fieldname" => "," } + } + } + +[id="{version}-plugins-{type}s-{plugin}-strip"] +===== `strip` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * There is no default value for this setting. + +Strip whitespace from field. NOTE: this only works on leading and trailing whitespace. + +Example: +[source,ruby] + filter { + mutate { + strip => ["field1", "field2"] + } + } + +[id="{version}-plugins-{type}s-{plugin}-update"] +===== `update` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Update an existing field with a new value. If the field does not exist, +then no action will be taken. + +Example: +[source,ruby] + filter { + mutate { + update => { "sample" => "My new message" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-uppercase"] +===== `uppercase` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * There is no default value for this setting. + +Convert a string to its uppercase equivalent. + +Example: +[source,ruby] + filter { + mutate { + uppercase => [ "fieldname" ] + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/mutate-v3.1.6.asciidoc b/docs/versioned-plugins/filters/mutate-v3.1.6.asciidoc new file mode 100644 index 000000000..f8e360d43 --- /dev/null +++ b/docs/versioned-plugins/filters/mutate-v3.1.6.asciidoc @@ -0,0 +1,283 @@ +:plugin: mutate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.6 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-mutate/blob/v3.1.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Mutate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The mutate filter allows you to perform general mutations on fields. You +can rename, remove, replace, and modify fields in your events. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Mutate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-convert>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-copy>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-gsub>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-join>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-lowercase>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-merge>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-rename>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-replace>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-split>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-strip>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-update>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-uppercase>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-convert"] +===== `convert` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Convert a field's value to a different type, like turning a string to an +integer. If the field value is an array, all members will be converted. +If the field is a hash, no action will be taken. + +If the conversion type is `boolean`, the acceptable values are: + +* **True:** `true`, `t`, `yes`, `y`, and `1` +* **False:** `false`, `f`, `no`, `n`, and `0` + +If a value other than these is provided, it will pass straight through +and log a warning message. + +Valid conversion targets are: integer, float, string, and boolean. + +Example: +[source,ruby] + filter { + mutate { + convert => { "fieldname" => "integer" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-copy"] +===== `copy` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Copy an existing field to another field. Existing target field will be overriden. + +Example: +[source,ruby] + filter { + mutate { + copy => { "source_field" => "dest_field" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-gsub"] +===== `gsub` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * There is no default value for this setting. + +Match a regular expression against a field value and replace all matches +with a replacement string. Only fields that are strings or arrays of +strings are supported. For other kinds of fields no action will be taken. + +This configuration takes an array consisting of 3 elements per +field/substitution. + +Be aware of escaping any backslash in the config file. + +Example: +[source,ruby] + filter { + mutate { + gsub => [ + # replace all forward slashes with underscore + "fieldname", "/", "_", + # replace backslashes, question marks, hashes, and minuses + # with a dot "." + "fieldname2", "[\\?#-]", "." + ] + } + } + + +[id="{version}-plugins-{type}s-{plugin}-join"] +===== `join` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Join an array with a separator character. Does nothing on non-array fields. + +Example: +[source,ruby] + filter { + mutate { + join => { "fieldname" => "," } + } + } + +[id="{version}-plugins-{type}s-{plugin}-lowercase"] +===== `lowercase` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * There is no default value for this setting. + +Convert a string to its lowercase equivalent. + +Example: +[source,ruby] + filter { + mutate { + lowercase => [ "fieldname" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-merge"] +===== `merge` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Merge two fields of arrays or hashes. +String fields will be automatically be converted into an array, so: +========================== + `array` + `string` will work + `string` + `string` will result in an 2 entry array in `dest_field` + `array` and `hash` will not work +========================== +Example: +[source,ruby] + filter { + mutate { + merge => { "dest_field" => "added_field" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-rename"] +===== `rename` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Rename one or more fields. + +Example: +[source,ruby] + filter { + mutate { + # Renames the 'HOSTORIP' field to 'client_ip' + rename => { "HOSTORIP" => "client_ip" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-replace"] +===== `replace` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Replace a field with a new value. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +Example: +[source,ruby] + filter { + mutate { + replace => { "message" => "%{source_host}: My new message" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-split"] +===== `split` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Split a field to an array using a separator character. Only works on string +fields. + +Example: +[source,ruby] + filter { + mutate { + split => { "fieldname" => "," } + } + } + +[id="{version}-plugins-{type}s-{plugin}-strip"] +===== `strip` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * There is no default value for this setting. + +Strip whitespace from field. NOTE: this only works on leading and trailing whitespace. + +Example: +[source,ruby] + filter { + mutate { + strip => ["field1", "field2"] + } + } + +[id="{version}-plugins-{type}s-{plugin}-update"] +===== `update` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Update an existing field with a new value. If the field does not exist, +then no action will be taken. + +Example: +[source,ruby] + filter { + mutate { + update => { "sample" => "My new message" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-uppercase"] +===== `uppercase` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * There is no default value for this setting. + +Convert a string to its uppercase equivalent. + +Example: +[source,ruby] + filter { + mutate { + uppercase => [ "fieldname" ] + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/mutate-v3.1.7.asciidoc b/docs/versioned-plugins/filters/mutate-v3.1.7.asciidoc new file mode 100644 index 000000000..0be673a5c --- /dev/null +++ b/docs/versioned-plugins/filters/mutate-v3.1.7.asciidoc @@ -0,0 +1,283 @@ +:plugin: mutate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.7 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-mutate/blob/v3.1.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Mutate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The mutate filter allows you to perform general mutations on fields. You +can rename, remove, replace, and modify fields in your events. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Mutate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-convert>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-copy>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-gsub>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-join>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-lowercase>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-merge>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-rename>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-replace>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-split>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-strip>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-update>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-uppercase>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-convert"] +===== `convert` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Convert a field's value to a different type, like turning a string to an +integer. If the field value is an array, all members will be converted. +If the field is a hash, no action will be taken. + +If the conversion type is `boolean`, the acceptable values are: + +* **True:** `true`, `t`, `yes`, `y`, and `1` +* **False:** `false`, `f`, `no`, `n`, and `0` + +If a value other than these is provided, it will pass straight through +and log a warning message. + +Valid conversion targets are: integer, float, string, and boolean. + +Example: +[source,ruby] + filter { + mutate { + convert => { "fieldname" => "integer" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-copy"] +===== `copy` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Copy an existing field to another field. Existing target field will be overriden. + +Example: +[source,ruby] + filter { + mutate { + copy => { "source_field" => "dest_field" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-gsub"] +===== `gsub` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * There is no default value for this setting. + +Match a regular expression against a field value and replace all matches +with a replacement string. Only fields that are strings or arrays of +strings are supported. For other kinds of fields no action will be taken. + +This configuration takes an array consisting of 3 elements per +field/substitution. + +Be aware of escaping any backslash in the config file. + +Example: +[source,ruby] + filter { + mutate { + gsub => [ + # replace all forward slashes with underscore + "fieldname", "/", "_", + # replace backslashes, question marks, hashes, and minuses + # with a dot "." + "fieldname2", "[\\?#-]", "." + ] + } + } + + +[id="{version}-plugins-{type}s-{plugin}-join"] +===== `join` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Join an array with a separator character. Does nothing on non-array fields. + +Example: +[source,ruby] + filter { + mutate { + join => { "fieldname" => "," } + } + } + +[id="{version}-plugins-{type}s-{plugin}-lowercase"] +===== `lowercase` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * There is no default value for this setting. + +Convert a string to its lowercase equivalent. + +Example: +[source,ruby] + filter { + mutate { + lowercase => [ "fieldname" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-merge"] +===== `merge` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Merge two fields of arrays or hashes. +String fields will be automatically be converted into an array, so: +========================== + `array` + `string` will work + `string` + `string` will result in an 2 entry array in `dest_field` + `array` and `hash` will not work +========================== +Example: +[source,ruby] + filter { + mutate { + merge => { "dest_field" => "added_field" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-rename"] +===== `rename` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Rename one or more fields. + +Example: +[source,ruby] + filter { + mutate { + # Renames the 'HOSTORIP' field to 'client_ip' + rename => { "HOSTORIP" => "client_ip" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-replace"] +===== `replace` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Replace a field with a new value. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +Example: +[source,ruby] + filter { + mutate { + replace => { "message" => "%{source_host}: My new message" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-split"] +===== `split` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Split a field to an array using a separator character. Only works on string +fields. + +Example: +[source,ruby] + filter { + mutate { + split => { "fieldname" => "," } + } + } + +[id="{version}-plugins-{type}s-{plugin}-strip"] +===== `strip` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * There is no default value for this setting. + +Strip whitespace from field. NOTE: this only works on leading and trailing whitespace. + +Example: +[source,ruby] + filter { + mutate { + strip => ["field1", "field2"] + } + } + +[id="{version}-plugins-{type}s-{plugin}-update"] +===== `update` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Update an existing field with a new value. If the field does not exist, +then no action will be taken. + +Example: +[source,ruby] + filter { + mutate { + update => { "sample" => "My new message" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-uppercase"] +===== `uppercase` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * There is no default value for this setting. + +Convert a string to its uppercase equivalent. + +Example: +[source,ruby] + filter { + mutate { + uppercase => [ "fieldname" ] + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/mutate-v3.2.0.asciidoc b/docs/versioned-plugins/filters/mutate-v3.2.0.asciidoc new file mode 100644 index 000000000..b55d1273e --- /dev/null +++ b/docs/versioned-plugins/filters/mutate-v3.2.0.asciidoc @@ -0,0 +1,287 @@ +:plugin: mutate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.0 +:release_date: 2017-11-28 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-mutate/blob/v3.2.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Mutate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The mutate filter allows you to perform general mutations on fields. You +can rename, remove, replace, and modify fields in your events. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Mutate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-convert>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-copy>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-gsub>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-join>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-lowercase>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-merge>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-rename>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-replace>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-split>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-strip>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-update>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-uppercase>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-convert"] +===== `convert` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Convert a field's value to a different type, like turning a string to an +integer. If the field value is an array, all members will be converted. +If the field is a hash no action will be taken. + +If the conversion type is `boolean`, the acceptable values are: + +* **True:** `true`, `t`, `yes`, `y`, and `1` +* **False:** `false`, `f`, `no`, `n`, and `0` + +If a value other than these is provided, it will pass straight through +and log a warning message. + +If the conversion type is `integer` and the value is a boolean, it will be converted as: +* **True:** `1` +* **False:** `0` + +Valid conversion targets are: integer, float, string, and boolean. + +Example: +[source,ruby] + filter { + mutate { + convert => { "fieldname" => "integer" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-copy"] +===== `copy` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Copy an existing field to another field. Existing target field will be overriden. + +Example: +[source,ruby] + filter { + mutate { + copy => { "source_field" => "dest_field" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-gsub"] +===== `gsub` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * There is no default value for this setting. + +Match a regular expression against a field value and replace all matches +with a replacement string. Only fields that are strings or arrays of +strings are supported. For other kinds of fields no action will be taken. + +This configuration takes an array consisting of 3 elements per +field/substitution. + +Be aware of escaping any backslash in the config file. + +Example: +[source,ruby] + filter { + mutate { + gsub => [ + # replace all forward slashes with underscore + "fieldname", "/", "_", + # replace backslashes, question marks, hashes, and minuses + # with a dot "." + "fieldname2", "[\\?#-]", "." + ] + } + } + + +[id="{version}-plugins-{type}s-{plugin}-join"] +===== `join` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Join an array with a separator character. Does nothing on non-array fields. + +Example: +[source,ruby] + filter { + mutate { + join => { "fieldname" => "," } + } + } + +[id="{version}-plugins-{type}s-{plugin}-lowercase"] +===== `lowercase` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * There is no default value for this setting. + +Convert a string to its lowercase equivalent. + +Example: +[source,ruby] + filter { + mutate { + lowercase => [ "fieldname" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-merge"] +===== `merge` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Merge two fields of arrays or hashes. +String fields will be automatically be converted into an array, so: +========================== + `array` + `string` will work + `string` + `string` will result in an 2 entry array in `dest_field` + `array` and `hash` will not work +========================== +Example: +[source,ruby] + filter { + mutate { + merge => { "dest_field" => "added_field" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-rename"] +===== `rename` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Rename one or more fields. + +Example: +[source,ruby] + filter { + mutate { + # Renames the 'HOSTORIP' field to 'client_ip' + rename => { "HOSTORIP" => "client_ip" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-replace"] +===== `replace` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Replace a field with a new value. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +Example: +[source,ruby] + filter { + mutate { + replace => { "message" => "%{source_host}: My new message" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-split"] +===== `split` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Split a field to an array using a separator character. Only works on string +fields. + +Example: +[source,ruby] + filter { + mutate { + split => { "fieldname" => "," } + } + } + +[id="{version}-plugins-{type}s-{plugin}-strip"] +===== `strip` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * There is no default value for this setting. + +Strip whitespace from field. NOTE: this only works on leading and trailing whitespace. + +Example: +[source,ruby] + filter { + mutate { + strip => ["field1", "field2"] + } + } + +[id="{version}-plugins-{type}s-{plugin}-update"] +===== `update` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Update an existing field with a new value. If the field does not exist, +then no action will be taken. + +Example: +[source,ruby] + filter { + mutate { + update => { "sample" => "My new message" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-uppercase"] +===== `uppercase` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * There is no default value for this setting. + +Convert a string to its uppercase equivalent. + +Example: +[source,ruby] + filter { + mutate { + uppercase => [ "fieldname" ] + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/ruby-index.asciidoc b/docs/versioned-plugins/filters/ruby-index.asciidoc new file mode 100644 index 000000000..b8c9bd579 --- /dev/null +++ b/docs/versioned-plugins/filters/ruby-index.asciidoc @@ -0,0 +1,22 @@ +:plugin: ruby +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-06 +| <> | 2017-11-28 +| <> | 2017-11-07 +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::ruby-v3.1.3.asciidoc[] +include::ruby-v3.1.2.asciidoc[] +include::ruby-v3.1.1.asciidoc[] +include::ruby-v3.1.0.asciidoc[] +include::ruby-v3.0.4.asciidoc[] +include::ruby-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/filters/ruby-v3.0.3.asciidoc b/docs/versioned-plugins/filters/ruby-v3.0.3.asciidoc new file mode 100644 index 000000000..dc0347fe9 --- /dev/null +++ b/docs/versioned-plugins/filters/ruby-v3.0.3.asciidoc @@ -0,0 +1,82 @@ +:plugin: ruby +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-ruby/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Ruby filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Execute ruby code. + +For example, to cancel 90% of events, you can do this: +[source,ruby] + filter { + ruby { + # Cancel 90% of events + code => "event.cancel if rand <= 0.90" + } + } + +If you need to create additional events, it cannot be done as in other filters where you would use `yield`, +you must use a specific syntax `new_event_block.call(event)` like in this example duplicating the input event +[source,ruby] +filter { + ruby { + code => "new_event_block.call(event.clone)" + } +} + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Ruby Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-code>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-init>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-code"] +===== `code` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The code to execute for every event. +You will have an `event` variable available that is the event itself. See the {logstash-ref}/event-api.html[Event API] for more information. + +[id="{version}-plugins-{type}s-{plugin}-init"] +===== `init` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Any code to execute at logstash startup-time + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/ruby-v3.0.4.asciidoc b/docs/versioned-plugins/filters/ruby-v3.0.4.asciidoc new file mode 100644 index 000000000..caa33fe6a --- /dev/null +++ b/docs/versioned-plugins/filters/ruby-v3.0.4.asciidoc @@ -0,0 +1,82 @@ +:plugin: ruby +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-ruby/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Ruby filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Execute ruby code. + +For example, to cancel 90% of events, you can do this: +[source,ruby] + filter { + ruby { + # Cancel 90% of events + code => "event.cancel if rand <= 0.90" + } + } + +If you need to create additional events, it cannot be done as in other filters where you would use `yield`, +you must use a specific syntax `new_event_block.call(event)` like in this example duplicating the input event +[source,ruby] +filter { + ruby { + code => "new_event_block.call(event.clone)" + } +} + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Ruby Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-code>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-init>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-code"] +===== `code` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The code to execute for every event. +You will have an `event` variable available that is the event itself. See the {logstash-ref}/event-api.html[Event API] for more information. + +[id="{version}-plugins-{type}s-{plugin}-init"] +===== `init` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Any code to execute at logstash startup-time + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/ruby-v3.1.0.asciidoc b/docs/versioned-plugins/filters/ruby-v3.1.0.asciidoc new file mode 100644 index 000000000..dd6681a06 --- /dev/null +++ b/docs/versioned-plugins/filters/ruby-v3.1.0.asciidoc @@ -0,0 +1,192 @@ +:plugin: ruby +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.0 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-ruby/blob/v3.1.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Ruby filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Execute ruby code. This filter accepts inline ruby code or a ruby file. +The two options are mutually exclusive and have slightly different ways of working, +which are described below. + +===== Inline ruby code + +To inline ruby in your filter, place all code in the `code` option. This code will be executed for every event the filter receives. You can also place ruby code in the `init` option - it will be executed only once during the plugin's register phase. + +For example, to cancel 90% of events, you can do this: +[source,ruby] + filter { + ruby { + # Cancel 90% of events + code => "event.cancel if rand <= 0.90" + } + } + +If you need to create additional events, you must use a specific syntax `new_event_block.call(event)` like in this example duplicating the input event +[source,ruby] +filter { + ruby { + code => "new_event_block.call(event.clone)" + } +} + +===== Using a Ruby script file + +As the inline code can become complex and hard to structure inside of a text string in `code`, it's then preferrable to place the Ruby code in a .rb file, using the `path` option. + +[source,ruby] + filter { + ruby { + # Cancel 90% of events + path => "/etc/logstash/drop_percentage.rb" + script_params => { "percentage" => 0.9 } + } + } + +The ruby script file should define the following methods: + + * `register(params)`: An optional register method that receives the key/value hash passed in the `script_params` configuration option + * `filter(event)`: A mandatory Ruby method that accepts a Logstash event and must return an array of events + +Below is an example implementation of the `drop_percentage.rb` ruby script that drops a configurable percentage of events: + +[source,ruby] +---- +# the value of `params` is the value of the hash passed to `script_params` +# in the logstash configuration +def register(params) + @drop_percentage = params["percentage"] +end + +# the filter method receives an event and must return a list of events. +# Dropping an event means not including it in the return array, +# while creating new ones only requires you to add a new instance of +# LogStash::Event to the returned array +def filter(event) + if rand >= @drop_percentage + return [event] + else + return [] # return empty array to cancel event + end +end +---- + +===== Testing the ruby script + +To validate the behaviour of the `filter` method you implemented, +the Ruby filter plugin provides an inline test framework where you +can assert expectations. +The tests you define will run when the pipeline is created and will +prevent it from starting if a test fails. + +You can also verify if the tests pass using the logstash `-t` flag. + +For example above, you can write at the bottom of the `drop_percentage.rb` +ruby script the following test: + +[source,ruby] +---- +def register(params) + # .. +end + +def filter(event) + # .. +end + +test "drop percentage 100%" do + parameters do + { "percentage" => 1 } + end + + in_event { { "message" => "hello" } } + + expect("drops the event") do |events| + events.size == 0 + end +end +---- + +We can now test that the ruby script we're using is implemented correctly: + +[source,shell] +---- +% bin/logstash -e "filter { ruby { path => '/etc/logstash/drop_percentage.rb' script_params => { 'drop_percentage' => 0.5 } } }" -t +[2017-10-13T13:44:29,723][INFO ][logstash.filters.ruby.script] Test run complete {:script_path=>"/etc/logstash/drop_percentage.rb", :results=>{:passed=>1, :failed=>0, :errored=>0}} +Configuration OK +[2017-10-13T13:44:29,887][INFO ][logstash.runner ] Using config.test_and_exit mode. Config Validation Result: OK. Exiting Logstash +---- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Ruby Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-code>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-init>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_params>> |{logstash-ref}/configuration-file-structure.html#hash[hash],{}|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_exception>> |{logstash-ref}/configuration-file-structure.html#string[string],_rubyexception|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-code"] +===== `code` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + * This setting cannot be used together with `path`. + +The code to execute for every event. +You will have an `event` variable available that is the event itself. See the {logstash-ref}/event-api.html[Event API] for more information. + +[id="{version}-plugins-{type}s-{plugin}-init"] +===== `init` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Any code to execute at logstash startup-time + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + * This setting cannot be used together with `code`. + +The path of the ruby script file that implements the `filter` method. + +[id="{version}-plugins-{type}s-{plugin}-script_params"] +===== `script_params` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +A key/value hash with parameters that are passed to the register method +of your ruby script file defined in `path`. + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/ruby-v3.1.1.asciidoc b/docs/versioned-plugins/filters/ruby-v3.1.1.asciidoc new file mode 100644 index 000000000..6b7e44c55 --- /dev/null +++ b/docs/versioned-plugins/filters/ruby-v3.1.1.asciidoc @@ -0,0 +1,192 @@ +:plugin: ruby +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.1 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-ruby/blob/v3.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Ruby filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Execute ruby code. This filter accepts inline ruby code or a ruby file. +The two options are mutually exclusive and have slightly different ways of working, +which are described below. + +===== Inline ruby code + +To inline ruby in your filter, place all code in the `code` option. This code will be executed for every event the filter receives. You can also place ruby code in the `init` option - it will be executed only once during the plugin's register phase. + +For example, to cancel 90% of events, you can do this: +[source,ruby] + filter { + ruby { + # Cancel 90% of events + code => "event.cancel if rand <= 0.90" + } + } + +If you need to create additional events, you must use a specific syntax `new_event_block.call(event)` like in this example duplicating the input event +[source,ruby] +filter { + ruby { + code => "new_event_block.call(event.clone)" + } +} + +===== Using a Ruby script file + +As the inline code can become complex and hard to structure inside of a text string in `code`, it's then preferrable to place the Ruby code in a .rb file, using the `path` option. + +[source,ruby] + filter { + ruby { + # Cancel 90% of events + path => "/etc/logstash/drop_percentage.rb" + script_params => { "percentage" => 0.9 } + } + } + +The ruby script file should define the following methods: + + * `register(params)`: An optional register method that receives the key/value hash passed in the `script_params` configuration option + * `filter(event)`: A mandatory Ruby method that accepts a Logstash event and must return an array of events + +Below is an example implementation of the `drop_percentage.rb` ruby script that drops a configurable percentage of events: + +[source,ruby] +---- +# the value of `params` is the value of the hash passed to `script_params` +# in the logstash configuration +def register(params) + @drop_percentage = params["percentage"] +end + +# the filter method receives an event and must return a list of events. +# Dropping an event means not including it in the return array, +# while creating new ones only requires you to add a new instance of +# LogStash::Event to the returned array +def filter(event) + if rand >= @drop_percentage + return [event] + else + return [] # return empty array to cancel event + end +end +---- + +===== Testing the ruby script + +To validate the behaviour of the `filter` method you implemented, +the Ruby filter plugin provides an inline test framework where you +can assert expectations. +The tests you define will run when the pipeline is created and will +prevent it from starting if a test fails. + +You can also verify if the tests pass using the logstash `-t` flag. + +For example above, you can write at the bottom of the `drop_percentage.rb` +ruby script the following test: + +[source,ruby] +---- +def register(params) + # .. +end + +def filter(event) + # .. +end + +test "drop percentage 100%" do + parameters do + { "percentage" => 1 } + end + + in_event { { "message" => "hello" } } + + expect("drops the event") do |events| + events.size == 0 + end +end +---- + +We can now test that the ruby script we're using is implemented correctly: + +[source,shell] +---- +% bin/logstash -e "filter { ruby { path => '/etc/logstash/drop_percentage.rb' script_params => { 'drop_percentage' => 0.5 } } }" -t +[2017-10-13T13:44:29,723][INFO ][logstash.filters.ruby.script] Test run complete {:script_path=>"/etc/logstash/drop_percentage.rb", :results=>{:passed=>1, :failed=>0, :errored=>0}} +Configuration OK +[2017-10-13T13:44:29,887][INFO ][logstash.runner ] Using config.test_and_exit mode. Config Validation Result: OK. Exiting Logstash +---- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Ruby Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-code>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-init>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_params>> |{logstash-ref}/configuration-file-structure.html#hash[hash],{}|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_exception>> |{logstash-ref}/configuration-file-structure.html#string[string],_rubyexception|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-code"] +===== `code` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + * This setting cannot be used together with `path`. + +The code to execute for every event. +You will have an `event` variable available that is the event itself. See the {logstash-ref}/event-api.html[Event API] for more information. + +[id="{version}-plugins-{type}s-{plugin}-init"] +===== `init` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Any code to execute at logstash startup-time + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + * This setting cannot be used together with `code`. + +The path of the ruby script file that implements the `filter` method. + +[id="{version}-plugins-{type}s-{plugin}-script_params"] +===== `script_params` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +A key/value hash with parameters that are passed to the register method +of your ruby script file defined in `path`. + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/ruby-v3.1.2.asciidoc b/docs/versioned-plugins/filters/ruby-v3.1.2.asciidoc new file mode 100644 index 000000000..12e93773d --- /dev/null +++ b/docs/versioned-plugins/filters/ruby-v3.1.2.asciidoc @@ -0,0 +1,192 @@ +:plugin: ruby +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.2 +:release_date: 2017-11-28 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-ruby/blob/v3.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Ruby filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Execute ruby code. This filter accepts inline ruby code or a ruby file. +The two options are mutually exclusive and have slightly different ways of working, +which are described below. + +===== Inline ruby code + +To inline ruby in your filter, place all code in the `code` option. This code will be executed for every event the filter receives. You can also place ruby code in the `init` option - it will be executed only once during the plugin's register phase. + +For example, to cancel 90% of events, you can do this: +[source,ruby] + filter { + ruby { + # Cancel 90% of events + code => "event.cancel if rand <= 0.90" + } + } + +If you need to create additional events, you must use a specific syntax `new_event_block.call(event)` like in this example duplicating the input event +[source,ruby] +filter { + ruby { + code => "new_event_block.call(event.clone)" + } +} + +===== Using a Ruby script file + +As the inline code can become complex and hard to structure inside of a text string in `code`, it's then preferrable to place the Ruby code in a .rb file, using the `path` option. + +[source,ruby] + filter { + ruby { + # Cancel 90% of events + path => "/etc/logstash/drop_percentage.rb" + script_params => { "percentage" => 0.9 } + } + } + +The ruby script file should define the following methods: + + * `register(params)`: An optional register method that receives the key/value hash passed in the `script_params` configuration option + * `filter(event)`: A mandatory Ruby method that accepts a Logstash event and must return an array of events + +Below is an example implementation of the `drop_percentage.rb` ruby script that drops a configurable percentage of events: + +[source,ruby] +---- +# the value of `params` is the value of the hash passed to `script_params` +# in the logstash configuration +def register(params) + @drop_percentage = params["percentage"] +end + +# the filter method receives an event and must return a list of events. +# Dropping an event means not including it in the return array, +# while creating new ones only requires you to add a new instance of +# LogStash::Event to the returned array +def filter(event) + if rand >= @drop_percentage + return [event] + else + return [] # return empty array to cancel event + end +end +---- + +===== Testing the ruby script + +To validate the behaviour of the `filter` method you implemented, +the Ruby filter plugin provides an inline test framework where you +can assert expectations. +The tests you define will run when the pipeline is created and will +prevent it from starting if a test fails. + +You can also verify if the tests pass using the logstash `-t` flag. + +For example above, you can write at the bottom of the `drop_percentage.rb` +ruby script the following test: + +[source,ruby] +---- +def register(params) + # .. +end + +def filter(event) + # .. +end + +test "drop percentage 100%" do + parameters do + { "percentage" => 1 } + end + + in_event { { "message" => "hello" } } + + expect("drops the event") do |events| + events.size == 0 + end +end +---- + +We can now test that the ruby script we're using is implemented correctly: + +[source,shell] +---- +% bin/logstash -e "filter { ruby { path => '/etc/logstash/drop_percentage.rb' script_params => { 'drop_percentage' => 0.5 } } }" -t +[2017-10-13T13:44:29,723][INFO ][logstash.filters.ruby.script] Test run complete {:script_path=>"/etc/logstash/drop_percentage.rb", :results=>{:passed=>1, :failed=>0, :errored=>0}} +Configuration OK +[2017-10-13T13:44:29,887][INFO ][logstash.runner ] Using config.test_and_exit mode. Config Validation Result: OK. Exiting Logstash +---- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Ruby Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-code>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-init>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_params>> |{logstash-ref}/configuration-file-structure.html#hash[hash],{}|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_exception>> |{logstash-ref}/configuration-file-structure.html#string[string],_rubyexception|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-code"] +===== `code` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + * This setting cannot be used together with `path`. + +The code to execute for every event. +You will have an `event` variable available that is the event itself. See the {logstash-ref}/event-api.html[Event API] for more information. + +[id="{version}-plugins-{type}s-{plugin}-init"] +===== `init` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Any code to execute at logstash startup-time + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + * This setting cannot be used together with `code`. + +The path of the ruby script file that implements the `filter` method. + +[id="{version}-plugins-{type}s-{plugin}-script_params"] +===== `script_params` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +A key/value hash with parameters that are passed to the register method +of your ruby script file defined in `path`. + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/ruby-v3.1.3.asciidoc b/docs/versioned-plugins/filters/ruby-v3.1.3.asciidoc new file mode 100644 index 000000000..ac5f1972d --- /dev/null +++ b/docs/versioned-plugins/filters/ruby-v3.1.3.asciidoc @@ -0,0 +1,201 @@ +:plugin: ruby +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-12-06 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-ruby/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Ruby filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Execute ruby code. This filter accepts inline ruby code or a ruby file. +The two options are mutually exclusive and have slightly different ways of working, +which are described below. + +===== Inline ruby code + +To inline ruby in your filter, place all code in the `code` option. This code will be executed for every event the filter receives. You can also place ruby code in the `init` option - it will be executed only once during the plugin's register phase. + +For example, to cancel 90% of events, you can do this: +[source,ruby] + filter { + ruby { + # Cancel 90% of events + code => "event.cancel if rand <= 0.90" + } + } + +If you need to create additional events, you must use a specific syntax `new_event_block.call(event)` like in this example duplicating the input event +[source,ruby] +filter { + ruby { + code => "new_event_block.call(event.clone)" + } +} + +===== Using a Ruby script file + +As the inline code can become complex and hard to structure inside of a text string in `code`, it's then preferrable to place the Ruby code in a .rb file, using the `path` option. + +[source,ruby] + filter { + ruby { + # Cancel 90% of events + path => "/etc/logstash/drop_percentage.rb" + script_params => { "percentage" => 0.9 } + } + } + +The ruby script file should define the following methods: + + * `register(params)`: An optional register method that receives the key/value hash passed in the `script_params` configuration option + * `filter(event)`: A mandatory Ruby method that accepts a Logstash event and must return an array of events + +Below is an example implementation of the `drop_percentage.rb` ruby script that drops a configurable percentage of events: + +[source,ruby] +---- +# the value of `params` is the value of the hash passed to `script_params` +# in the logstash configuration +def register(params) + @drop_percentage = params["percentage"] +end + +# the filter method receives an event and must return a list of events. +# Dropping an event means not including it in the return array, +# while creating new ones only requires you to add a new instance of +# LogStash::Event to the returned array +def filter(event) + if rand >= @drop_percentage + return [event] + else + return [] # return empty array to cancel event + end +end +---- + +===== Testing the ruby script + +To validate the behaviour of the `filter` method you implemented, +the Ruby filter plugin provides an inline test framework where you +can assert expectations. +The tests you define will run when the pipeline is created and will +prevent it from starting if a test fails. + +You can also verify if the tests pass using the logstash `-t` flag. + +For example above, you can write at the bottom of the `drop_percentage.rb` +ruby script the following test: + +[source,ruby] +---- +def register(params) + # .. +end + +def filter(event) + # .. +end + +test "drop percentage 100%" do + parameters do + { "percentage" => 1 } + end + + in_event { { "message" => "hello" } } + + expect("drops the event") do |events| + events.size == 0 + end +end +---- + +We can now test that the ruby script we're using is implemented correctly: + +[source,shell] +---- +% bin/logstash -e "filter { ruby { path => '/etc/logstash/drop_percentage.rb' script_params => { 'drop_percentage' => 0.5 } } }" -t +[2017-10-13T13:44:29,723][INFO ][logstash.filters.ruby.script] Test run complete {:script_path=>"/etc/logstash/drop_percentage.rb", :results=>{:passed=>1, :failed=>0, :errored=>0}} +Configuration OK +[2017-10-13T13:44:29,887][INFO ][logstash.runner ] Using config.test_and_exit mode. Config Validation Result: OK. Exiting Logstash +---- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Ruby Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-code>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-init>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_params>> |{logstash-ref}/configuration-file-structure.html#hash[hash],{}|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_exception>> |{logstash-ref}/configuration-file-structure.html#string[string],_rubyexception|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-code"] +===== `code` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + * This setting cannot be used together with `path`. + +The code to execute for every event. +You will have an `event` variable available that is the event itself. See the {logstash-ref}/event-api.html[Event API] for more information. + +[id="{version}-plugins-{type}s-{plugin}-init"] +===== `init` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Any code to execute at logstash startup-time + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + * This setting cannot be used together with `code`. + +The path of the ruby script file that implements the `filter` method. + +[id="{version}-plugins-{type}s-{plugin}-script_params"] +===== `script_params` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +A key/value hash with parameters that are passed to the register method +of your ruby script file defined in `path`. + +[id="{version}-plugins-{type}s-{plugin}-tag_on_exception"] +===== `tag_on_exception` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `_rubyexception` + +Tag to add to events in case the ruby code (either inline or file based) +causes an exception. + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/sleep-index.asciidoc b/docs/versioned-plugins/filters/sleep-index.asciidoc new file mode 100644 index 000000000..b5cf4bb8f --- /dev/null +++ b/docs/versioned-plugins/filters/sleep-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: sleep +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::sleep-v3.0.6.asciidoc[] +include::sleep-v3.0.5.asciidoc[] +include::sleep-v3.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/filters/sleep-v3.0.4.asciidoc b/docs/versioned-plugins/filters/sleep-v3.0.4.asciidoc new file mode 100644 index 000000000..c4b7829b7 --- /dev/null +++ b/docs/versioned-plugins/filters/sleep-v3.0.4.asciidoc @@ -0,0 +1,119 @@ +:plugin: sleep +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-sleep/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Sleep filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Sleep a given amount of time. This will cause logstash +to stall for the given amount of time. This is useful +for rate limiting, etc. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sleep Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-every>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-replay>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-time>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-every"] +===== `every` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `1` + +Sleep on every N'th. This option is ignored in replay mode. + +Example: +[source,ruby] + filter { + sleep { + time => "1" # Sleep 1 second + every => 10 # on every 10th event + } + } + +[id="{version}-plugins-{type}s-{plugin}-replay"] +===== `replay` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable replay mode. + +Replay mode tries to sleep based on timestamps in each event. + +The amount of time to sleep is computed by subtracting the +previous event's timestamp from the current event's timestamp. +This helps you replay events in the same timeline as original. + +If you specify a `time` setting as well, this filter will +use the `time` value as a speed modifier. For example, +a `time` value of 2 will replay at double speed, while a +value of 0.25 will replay at 1/4th speed. + +For example: +[source,ruby] + filter { + sleep { + time => 2 + replay => true + } + } + +The above will sleep in such a way that it will perform +replay 2-times faster than the original time speed. + +[id="{version}-plugins-{type}s-{plugin}-time"] +===== `time` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The length of time to sleep, in seconds, for every event. + +This can be a number (eg, 0.5), or a string (eg, `%{foo}`) +The second form (string with a field value) is useful if +you have an attribute of your event that you want to use +to indicate the amount of time to sleep. + +Example: +[source,ruby] + filter { + sleep { + # Sleep 1 second for every event. + time => "1" + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/sleep-v3.0.5.asciidoc b/docs/versioned-plugins/filters/sleep-v3.0.5.asciidoc new file mode 100644 index 000000000..9e6cd3527 --- /dev/null +++ b/docs/versioned-plugins/filters/sleep-v3.0.5.asciidoc @@ -0,0 +1,119 @@ +:plugin: sleep +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-sleep/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Sleep filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Sleep a given amount of time. This will cause logstash +to stall for the given amount of time. This is useful +for rate limiting, etc. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sleep Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-every>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-replay>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-time>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-every"] +===== `every` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `1` + +Sleep on every N'th. This option is ignored in replay mode. + +Example: +[source,ruby] + filter { + sleep { + time => "1" # Sleep 1 second + every => 10 # on every 10th event + } + } + +[id="{version}-plugins-{type}s-{plugin}-replay"] +===== `replay` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable replay mode. + +Replay mode tries to sleep based on timestamps in each event. + +The amount of time to sleep is computed by subtracting the +previous event's timestamp from the current event's timestamp. +This helps you replay events in the same timeline as original. + +If you specify a `time` setting as well, this filter will +use the `time` value as a speed modifier. For example, +a `time` value of 2 will replay at double speed, while a +value of 0.25 will replay at 1/4th speed. + +For example: +[source,ruby] + filter { + sleep { + time => 2 + replay => true + } + } + +The above will sleep in such a way that it will perform +replay 2-times faster than the original time speed. + +[id="{version}-plugins-{type}s-{plugin}-time"] +===== `time` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The length of time to sleep, in seconds, for every event. + +This can be a number (eg, 0.5), or a string (eg, `%{foo}`) +The second form (string with a field value) is useful if +you have an attribute of your event that you want to use +to indicate the amount of time to sleep. + +Example: +[source,ruby] + filter { + sleep { + # Sleep 1 second for every event. + time => "1" + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/sleep-v3.0.6.asciidoc b/docs/versioned-plugins/filters/sleep-v3.0.6.asciidoc new file mode 100644 index 000000000..f868a711d --- /dev/null +++ b/docs/versioned-plugins/filters/sleep-v3.0.6.asciidoc @@ -0,0 +1,119 @@ +:plugin: sleep +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-sleep/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Sleep filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Sleep a given amount of time. This will cause logstash +to stall for the given amount of time. This is useful +for rate limiting, etc. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sleep Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-every>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-replay>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-time>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-every"] +===== `every` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `1` + +Sleep on every N'th. This option is ignored in replay mode. + +Example: +[source,ruby] + filter { + sleep { + time => "1" # Sleep 1 second + every => 10 # on every 10th event + } + } + +[id="{version}-plugins-{type}s-{plugin}-replay"] +===== `replay` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable replay mode. + +Replay mode tries to sleep based on timestamps in each event. + +The amount of time to sleep is computed by subtracting the +previous event's timestamp from the current event's timestamp. +This helps you replay events in the same timeline as original. + +If you specify a `time` setting as well, this filter will +use the `time` value as a speed modifier. For example, +a `time` value of 2 will replay at double speed, while a +value of 0.25 will replay at 1/4th speed. + +For example: +[source,ruby] + filter { + sleep { + time => 2 + replay => true + } + } + +The above will sleep in such a way that it will perform +replay 2-times faster than the original time speed. + +[id="{version}-plugins-{type}s-{plugin}-time"] +===== `time` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The length of time to sleep, in seconds, for every event. + +This can be a number (eg, 0.5), or a string (eg, `%{foo}`) +The second form (string with a field value) is useful if +you have an attribute of your event that you want to use +to indicate the amount of time to sleep. + +Example: +[source,ruby] + filter { + sleep { + # Sleep 1 second for every event. + time => "1" + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/include/filter.asciidoc b/docs/versioned-plugins/include/filter.asciidoc new file mode 100644 index 000000000..cc01bba7c --- /dev/null +++ b/docs/versioned-plugins/include/filter.asciidoc @@ -0,0 +1,177 @@ +==== Common Options + +The following configuration options are supported by all filter plugins: + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-add_field>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-add_tag>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-enable_metric>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-periodic_flush>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-remove_field>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-remove_tag>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +|======================================================================= + +[id="{version}-plugins-{type}s-{plugin}-add_field"] +===== `add_field` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +If this filter is successful, add any arbitrary fields to this event. +Field names can be dynamic and include parts of the event using the `%{field}`. + +Example: + +["source","json",subs="attributes"] + filter { + {plugin} { + add_field => { "foo_%\{somefield\}" => "Hello world, from %\{host\}" } + } + } + +["source","json",subs="attributes"] + # You can also add multiple fields at once: + filter { + {plugin} { + add_field => { + "foo_%\{somefield\}" => "Hello world, from %\{host\}" + "new_field" => "new_static_value" + } + } + } + +If the event has field `"somefield" == "hello"` this filter, on success, +would add field `foo_hello` if it is present, with the +value above and the `%{host}` piece replaced with that value from the +event. The second example would also add a hardcoded field. + +[id="{version}-plugins-{type}s-{plugin}-add_tag"] +===== `add_tag` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +If this filter is successful, add arbitrary tags to the event. +Tags can be dynamic and include parts of the event using the `%{field}` +syntax. + +Example: + +["source","json",subs="attributes"] + filter { + {plugin} { + add_tag => [ "foo_%\{somefield\}" ] + } + } + +["source","json",subs="attributes"] + # You can also add multiple tags at once: + filter { + {plugin} { + add_tag => [ "foo_%\{somefield\}", "taggedy_tag"] + } + } + +If the event has field `"somefield" == "hello"` this filter, on success, +would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag). + +[id="{version}-plugins-{type}s-{plugin}-enable_metric"] +===== `enable_metric` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Disable or enable metric logging for this specific plugin instance +by default we record all the metrics we can, but you can disable metrics collection +for a specific plugin. + +[id="{version}-plugins-{type}s-{plugin}-id"] +===== `id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. +It is strongly recommended to set this ID in your configuration. This is particularly useful +when you have two or more plugins of the same type, for example, if you have 2 {plugin} filters. +Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs. + + +["source","json",subs="attributes"] + filter { + {plugin} { + id => "ABC" + } + } + +[id="{version}-plugins-{type}s-{plugin}-periodic_flush"] +===== `periodic_flush` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Call the filter flush method at regular interval. +Optional. + +[id="{version}-plugins-{type}s-{plugin}-remove_field"] +===== `remove_field` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +If this filter is successful, remove arbitrary fields from this event. +Fields names can be dynamic and include parts of the event using the %{field} +Example: + +["source","json",subs="attributes"] + filter { + {plugin} { + remove_field => [ "foo_%\{somefield\}" ] + } + } + +["source","json",subs="attributes"] + # You can also remove multiple fields at once: + filter { + {plugin} { + remove_field => [ "foo_%\{somefield\}", "my_extraneous_field" ] + } + } + +If the event has field `"somefield" == "hello"` this filter, on success, +would remove the field with name `foo_hello` if it is present. The second +example would remove an additional, non-dynamic field. + +[id="{version}-plugins-{type}s-{plugin}-remove_tag"] +===== `remove_tag` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +If this filter is successful, remove arbitrary tags from the event. +Tags can be dynamic and include parts of the event using the `%{field}` +syntax. + +Example: + +["source","json",subs="attributes"] + filter { + {plugin} { + remove_tag => [ "foo_%\{somefield\}" ] + } + } + +["source","json",subs="attributes"] + # You can also remove multiple tags at once: + filter { + {plugin} { + remove_tag => [ "foo_%\{somefield\}", "sad_unwanted_tag"] + } + } + +If the event has field `"somefield" == "hello"` this filter, on success, +would remove the tag `foo_hello` if it is present. The second example +would remove a sad, unwanted tag as well. diff --git a/docs/versioned-plugins/include/input.asciidoc b/docs/versioned-plugins/include/input.asciidoc new file mode 100644 index 000000000..698864401 --- /dev/null +++ b/docs/versioned-plugins/include/input.asciidoc @@ -0,0 +1,104 @@ +==== Common Options + +The following configuration options are supported by all input plugins: +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-add_field>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-codec>> |{logstash-ref}/configuration-file-structure.html#codec[codec]|No +| <<{version}-plugins-{type}s-{plugin}-enable_metric>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-tags>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + + +==== Details + +  + +[id="{version}-plugins-{type}s-{plugin}-add_field"] +===== `add_field` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +Add a field to an event + +[id="{version}-plugins-{type}s-{plugin}-codec"] +===== `codec` + + * Value type is {logstash-ref}/configuration-file-structure.html#codec[codec] + * Default value is `"plain"` + +The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline. + + +[id="{version}-plugins-{type}s-{plugin}-enable_metric"] +===== `enable_metric` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Disable or enable metric logging for this specific plugin instance +by default we record all the metrics we can, but you can disable metrics collection +for a specific plugin. + +[id="{version}-plugins-{type}s-{plugin}-id"] +===== `id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. +It is strongly recommended to set this ID in your configuration. This is particularly useful +when you have two or more plugins of the same type, for example, if you have 2 {plugin} inputs. +Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs. + +["source","json",subs="attributes"] +--------------------------------------------------------------------------------------------------- +input { + {plugin} { + id => "my_plugin_id" + } +} +--------------------------------------------------------------------------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-tags"] +===== `tags` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * There is no default value for this setting. + +Add any number of arbitrary tags to your event. + +This can help with processing later. + +[id="{version}-plugins-{type}s-{plugin}-type"] +===== `type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Add a `type` field to all events handled by this input. + +Types are used mainly for filter activation. + +The type is stored as part of the event itself, so you can +also use the type to search for it in Kibana. + +If you try to set a type on an event that already has one (for +example when you send an event from a shipper to an indexer) then +a new input will not override the existing type. A type set at +the shipper stays with that event for its life even +when sent to another Logstash server. + +ifeval::["{type}"=="input" and "{plugin}"=="beats"] + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <<{version}-plugins-inputs-beats-type,`type`>> config option in +Logstash, it is ignored. + +endif::[] + diff --git a/docs/versioned-plugins/include/output.asciidoc b/docs/versioned-plugins/include/output.asciidoc new file mode 100644 index 000000000..9a5cd3d16 --- /dev/null +++ b/docs/versioned-plugins/include/output.asciidoc @@ -0,0 +1,51 @@ +==== Common Options + +The following configuration options are supported by all output plugins: + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-codec>> |{logstash-ref}/configuration-file-structure.html#codec[codec]|No +| <<{version}-plugins-{type}s-{plugin}-enable_metric>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +[id="{version}-plugins-{type}s-{plugin}-codec"] +===== `codec` + + * Value type is {logstash-ref}/configuration-file-structure.html#codec[codec] + * Default value is `"plain"` + +The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output, without needing a separate filter in your Logstash pipeline. + +[id="{version}-plugins-{type}s-{plugin}-enable_metric"] +===== `enable_metric` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Disable or enable metric logging for this specific plugin instance +by default we record all the metrics we can, but you can disable metrics collection +for a specific plugin. + +[id="{version}-plugins-{type}s-{plugin}-id"] +===== `id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. +It is strongly recommended to set this ID in your configuration. This is particularly useful +when you have two or more plugins of the same type, for example, if you have 2 {plugin} outputs. +Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs. + +["source","json",subs="attributes"] +--------------------------------------------------------------------------------------------------- +output { + {plugin} { + id => "my_plugin_id" + } +} +--------------------------------------------------------------------------------------------------- + + diff --git a/docs/versioned-plugins/include/plugin-intro.asciidoc b/docs/versioned-plugins/include/plugin-intro.asciidoc new file mode 100644 index 000000000..2087fcb7b --- /dev/null +++ b/docs/versioned-plugins/include/plugin-intro.asciidoc @@ -0,0 +1,13 @@ +[id="{type}-plugins"] += {type_uc} plugins + +[partintro] +-- +Looking for a specific version of the Logstash plugin docs? You've come to the +right place. This section contains all available versions of the documentation +for the Logstash {type} plugins. + +Want to learn how to use Logstash? See the +{logstash-ref}/index.html[Logstash Reference]. + +-- diff --git a/docs/versioned-plugins/include/plugin_header.asciidoc b/docs/versioned-plugins/include/plugin_header.asciidoc new file mode 100644 index 000000000..2c4e168a6 --- /dev/null +++ b/docs/versioned-plugins/include/plugin_header.asciidoc @@ -0,0 +1,43 @@ +ifeval::["{versioned_docs}"!="true"] +++++ +{plugin} +++++ +endif::[] +ifeval::["{versioned_docs}"=="true"] +++++ +{plugin} {version} +++++ +endif::[] + +* Plugin version: {version} +* Released on: {release_date} +* {changelog_url}[Changelog] + +ifeval::["{versioned_docs}"!="true"] + +For other plugin versions, see the +<<{type}-{plugin}-index,Versioned {plugin} {type} plugin docs>>. + +endif::[] + +ifeval::["{versioned_docs}"=="true"] + +For other versions, see the <<{type}-{plugin}-index,overview list>>. + +To learn more about Logstash, see the {logstash-ref}/index.html[Logstash Reference]. + +endif::[] + +ifeval::[("{default_plugin}"=="0") and ("{versioned_docs}"!="true")] + +==== Installation + +For plugins not bundled by default, it is easy to install by running +bin/logstash-plugin install logstash-{type}-{plugin}+. See {logstash-ref}/working-with-plugins.html[Working with plugins] for more details. + +endif::[] + +==== Getting Help + +For questions about the plugin, open a topic in the http://discuss.elastic.co[Discuss] forums. For bugs or feature requests, open an issue in https://github.com/logstash-plugins/logstash-{type}-{plugin}[Github]. +For the list of Elastic supported plugins, please consult the https://www.elastic.co/support/matrix#show_logstash_plugins[Elastic Support Matrix]. + diff --git a/docs/versioned-plugins/include/version-list-intro.asciidoc b/docs/versioned-plugins/include/version-list-intro.asciidoc new file mode 100644 index 000000000..5ba89ed4b --- /dev/null +++ b/docs/versioned-plugins/include/version-list-intro.asciidoc @@ -0,0 +1,13 @@ +[id="{type}-{plugin}-index"] + +== Versioned {plugin} {type} plugin docs +++++ +{plugin} +++++ + +This page lists all available versions of the documentation for this plugin. +To see which version of the plugin you have installed, run `bin/logstash-plugin +list --verbose`. + +NOTE: Versioned plugin documentation is not available for plugins released prior +to Logstash 6.0. diff --git a/docs/versioned-plugins/index.asciidoc b/docs/versioned-plugins/index.asciidoc new file mode 100644 index 000000000..e1a56df29 --- /dev/null +++ b/docs/versioned-plugins/index.asciidoc @@ -0,0 +1,20 @@ +:versioned_docs: true + +// Set include path for static files that live in the logstash repo +:include_path: ../include + +include::{asciidoc-dir}/../../shared/attributes.asciidoc[] + +// Override logstash-ref setting imported from shared/attributes.asciidoc +:logstash-ref: http://www.elastic.co/guide/en/logstash/current + +[[logstash-plugin-reference]] += Versioned Plugin Reference + +include::inputs-index.asciidoc[] + +include::outputs-index.asciidoc[] + +include::filters-index.asciidoc[] + +include::codecs-index.asciidoc[] diff --git a/docs/versioned-plugins/inputs-index.asciidoc b/docs/versioned-plugins/inputs-index.asciidoc new file mode 100644 index 000000000..2d56074ae --- /dev/null +++ b/docs/versioned-plugins/inputs-index.asciidoc @@ -0,0 +1,10 @@ +:type: input +:type_uc: Input + +include::include/plugin-intro.asciidoc[] + +include::inputs/beats-index.asciidoc[] +include::inputs/http-index.asciidoc[] +include::inputs/s3-index.asciidoc[] +include::inputs/tcp-index.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/beats-index.asciidoc b/docs/versioned-plugins/inputs/beats-index.asciidoc new file mode 100644 index 000000000..7f4c37ff0 --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-index.asciidoc @@ -0,0 +1,34 @@ +:plugin: beats +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2018-01-05 +| <> | 2017-12-19 +| <> | 2017-12-12 +| <> | 2017-11-07 +| <> | 2017-10-06 +| <> | 2017-08-15 +| <> | 2017-07-28 +| <> | 2017-07-12 +| <> | 2017-06-23 +| <> | 2017-06-22 +| <> | 2017-06-07 +| <> | 2017-06-03 +|======================================================================= + +include::beats-v5.0.6.asciidoc[] +include::beats-v5.0.5.asciidoc[] +include::beats-v5.0.4.asciidoc[] +include::beats-v5.0.3.asciidoc[] +include::beats-v5.0.2.asciidoc[] +include::beats-v5.0.1.asciidoc[] +include::beats-v5.0.0.asciidoc[] +include::beats-v4.0.5.asciidoc[] +include::beats-v4.0.4.asciidoc[] +include::beats-v4.0.3.asciidoc[] +include::beats-v4.0.2.asciidoc[] +include::beats-v4.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/beats-v4.0.1.asciidoc b/docs/versioned-plugins/inputs/beats-v4.0.1.asciidoc new file mode 100644 index 000000000..29989870a --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v4.0.1.asciidoc @@ -0,0 +1,240 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.1 +:release_date: 2017-06-03 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v4.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <<{version}-plugins-{type}s-{plugin}-type,`type`>> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] +===== `congestion_threshold` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +The number of seconds before we raise a timeout. +This option is useful to control how much time to wait if something is blocking the pipeline. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-target_field_for_codec"] +===== `target_field_for_codec` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"message"` + +This is the default field to which the specified codec will be applied. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v4.0.2.asciidoc b/docs/versioned-plugins/inputs/beats-v4.0.2.asciidoc new file mode 100644 index 000000000..f870f35dd --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v4.0.2.asciidoc @@ -0,0 +1,240 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.2 +:release_date: 2017-06-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v4.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <<{version}-plugins-{type}s-{plugin}-type,`type`>> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] +===== `congestion_threshold` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +The number of seconds before we raise a timeout. +This option is useful to control how much time to wait if something is blocking the pipeline. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-target_field_for_codec"] +===== `target_field_for_codec` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"message"` + +This is the default field to which the specified codec will be applied. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v4.0.3.asciidoc b/docs/versioned-plugins/inputs/beats-v4.0.3.asciidoc new file mode 100644 index 000000000..62c834b72 --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v4.0.3.asciidoc @@ -0,0 +1,240 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.3 +:release_date: 2017-06-22 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v4.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <<{version}-plugins-{type}s-{plugin}-type,`type`>> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] +===== `congestion_threshold` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +The number of seconds before we raise a timeout. +This option is useful to control how much time to wait if something is blocking the pipeline. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-target_field_for_codec"] +===== `target_field_for_codec` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"message"` + +This is the default field to which the specified codec will be applied. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v4.0.4.asciidoc b/docs/versioned-plugins/inputs/beats-v4.0.4.asciidoc new file mode 100644 index 000000000..fef8fe7ac --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v4.0.4.asciidoc @@ -0,0 +1,241 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v4.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <<{version}-plugins-{type}s-{plugin}-type,`type`>> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] +===== `congestion_threshold` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +The number of seconds before we raise a timeout. +This option is useful to control how much time to wait if something is blocking the pipeline. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-target_field_for_codec"] +===== `target_field_for_codec` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"message"` + +This is the default field to which the specified codec will be applied. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/beats-v4.0.5.asciidoc b/docs/versioned-plugins/inputs/beats-v4.0.5.asciidoc new file mode 100644 index 000000000..09a9d06d7 --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v4.0.5.asciidoc @@ -0,0 +1,241 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.5 +:release_date: 2017-07-12 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v4.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <<{version}-plugins-{type}s-{plugin}-type,`type`>> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] +===== `congestion_threshold` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +The number of seconds before we raise a timeout. +This option is useful to control how much time to wait if something is blocking the pipeline. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-target_field_for_codec"] +===== `target_field_for_codec` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"message"` + +This is the default field to which the specified codec will be applied. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v5.0.0.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.0.asciidoc new file mode 100644 index 000000000..330491da8 --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v5.0.0.asciidoc @@ -0,0 +1,222 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.0 +:release_date: 2017-07-28 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v5.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <<{version}-plugins-{type}s-{plugin}-type,`type`>> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v5.0.1.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.1.asciidoc new file mode 100644 index 000000000..8773d2362 --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v5.0.1.asciidoc @@ -0,0 +1,222 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.1 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v5.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <<{version}-plugins-{type}s-{plugin}-type,`type`>> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v5.0.2.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.2.asciidoc new file mode 100644 index 000000000..50fd44a0c --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v5.0.2.asciidoc @@ -0,0 +1,222 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.2 +:release_date: 2017-10-06 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v5.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <<{version}-plugins-{type}s-{plugin}-type,`type`>> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v5.0.3.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.3.asciidoc new file mode 100644 index 000000000..dba12f1d3 --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v5.0.3.asciidoc @@ -0,0 +1,222 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v5.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <<{version}-plugins-{type}s-{plugin}-type,`type`>> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v5.0.4.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.4.asciidoc new file mode 100644 index 000000000..7c0f91207 --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v5.0.4.asciidoc @@ -0,0 +1,222 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.4 +:release_date: 2017-12-12 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v5.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <<{version}-plugins-{type}s-{plugin}-type,`type`>> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v5.0.5.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.5.asciidoc new file mode 100644 index 000000000..ad517c548 --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v5.0.5.asciidoc @@ -0,0 +1,222 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.5 +:release_date: 2017-12-19 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v5.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <<{version}-plugins-{type}s-{plugin}-type,`type`>> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v5.0.6.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.6.asciidoc new file mode 100644 index 000000000..cb9b31826 --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v5.0.6.asciidoc @@ -0,0 +1,222 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.6 +:release_date: 2018-01-05 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v5.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <<{version}-plugins-{type}s-{plugin}-type,`type`>> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/http-index.asciidoc b/docs/versioned-plugins/inputs/http-index.asciidoc new file mode 100644 index 000000000..f2530b53b --- /dev/null +++ b/docs/versioned-plugins/inputs/http-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: http +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-09 +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::http-v3.0.8.asciidoc[] +include::http-v3.0.7.asciidoc[] +include::http-v3.0.6.asciidoc[] +include::http-v3.0.5.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/http-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/http-v3.0.5.asciidoc new file mode 100644 index 000000000..d7d84181e --- /dev/null +++ b/docs/versioned-plugins/inputs/http-v3.0.5.asciidoc @@ -0,0 +1,168 @@ +:plugin: http +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-http/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Using this input you can receive single or multiline events over http(s). +Applications can send an HTTP POST request with a body to the endpoint started by this +input and Logstash will convert it into an event for subsequent processing. Users +can pass plain text, JSON, or any formatted data and use a corresponding codec with this +input. For Content-Type `application/json` the `json` codec is used, but for all other +data formats, `plain` codec is used. + +This input can also be used to receive webhook requests to integrate with other services +and applications. By taking advantage of the vast plugin ecosystem available in Logstash +you can trigger actionable events right from your application. + +==== Security +This plugin supports standard HTTP basic authentication headers to identify the requester. +You can pass in a username, password combination while sending data to this input + +You can also setup SSL and send data securely over https, with an option of validating +the client's certificate. Currently, the certificate setup is through +https://docs.oracle.com/cd/E19509-01/820-3503/ggfen/index.html[Java Keystore +format] + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-additional_codecs>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-response_headers>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-additional_codecs"] +===== `additional_codecs` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{"application/json"=>"json"}` + +Apply specific codecs for specific content types. +The default codec will be applied only after this list is checked +and no codec for the request's content-type is found + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +Codec used to decode the incoming data. +This codec will be used as a fall-back if the content-type +is not found in the "additional_codecs" hash +The host or ip to bind + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS keystore to validate the client's certificates + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password for basic authorization + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `8080` + +The TCP port to bind to + +[id="{version}-plugins-{type}s-{plugin}-response_headers"] +===== `response_headers` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{"Content-Type"=>"text/plain"}` + +specify a custom set of response headers + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +SSL Configurations + +Enable SSL + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `4` + +Maximum number of threads to use + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username for basic authorization + +[id="{version}-plugins-{type}s-{plugin}-verify_mode"] +===== `verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +Set the client certificate verification method. Valid methods: none, peer, force_peer + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http-v3.0.6.asciidoc b/docs/versioned-plugins/inputs/http-v3.0.6.asciidoc new file mode 100644 index 000000000..bb40ccd95 --- /dev/null +++ b/docs/versioned-plugins/inputs/http-v3.0.6.asciidoc @@ -0,0 +1,168 @@ +:plugin: http +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-http/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Using this input you can receive single or multiline events over http(s). +Applications can send an HTTP POST request with a body to the endpoint started by this +input and Logstash will convert it into an event for subsequent processing. Users +can pass plain text, JSON, or any formatted data and use a corresponding codec with this +input. For Content-Type `application/json` the `json` codec is used, but for all other +data formats, `plain` codec is used. + +This input can also be used to receive webhook requests to integrate with other services +and applications. By taking advantage of the vast plugin ecosystem available in Logstash +you can trigger actionable events right from your application. + +==== Security +This plugin supports standard HTTP basic authentication headers to identify the requester. +You can pass in a username, password combination while sending data to this input + +You can also setup SSL and send data securely over https, with an option of validating +the client's certificate. Currently, the certificate setup is through +https://docs.oracle.com/cd/E19509-01/820-3503/ggfen/index.html[Java Keystore +format] + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-additional_codecs>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-response_headers>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-additional_codecs"] +===== `additional_codecs` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{"application/json"=>"json"}` + +Apply specific codecs for specific content types. +The default codec will be applied only after this list is checked +and no codec for the request's content-type is found + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +Codec used to decode the incoming data. +This codec will be used as a fall-back if the content-type +is not found in the "additional_codecs" hash +The host or ip to bind + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS keystore to validate the client's certificates + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password for basic authorization + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `8080` + +The TCP port to bind to + +[id="{version}-plugins-{type}s-{plugin}-response_headers"] +===== `response_headers` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{"Content-Type"=>"text/plain"}` + +specify a custom set of response headers + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +SSL Configurations + +Enable SSL + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `4` + +Maximum number of threads to use + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username for basic authorization + +[id="{version}-plugins-{type}s-{plugin}-verify_mode"] +===== `verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +Set the client certificate verification method. Valid methods: none, peer, force_peer + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http-v3.0.7.asciidoc b/docs/versioned-plugins/inputs/http-v3.0.7.asciidoc new file mode 100644 index 000000000..cd4f7010b --- /dev/null +++ b/docs/versioned-plugins/inputs/http-v3.0.7.asciidoc @@ -0,0 +1,165 @@ +:plugin: http +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.7 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-http/blob/v3.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Using this input you can receive single or multiline events over http(s). +Applications can send an HTTP POST request with a body to the endpoint started by this +input and Logstash will convert it into an event for subsequent processing. Users +can pass plain text, JSON, or any formatted data and use a corresponding codec with this +input. For Content-Type `application/json` the `json` codec is used, but for all other +data formats, `plain` codec is used. + +This input can also be used to receive webhook requests to integrate with other services +and applications. By taking advantage of the vast plugin ecosystem available in Logstash +you can trigger actionable events right from your application. + +==== Security +This plugin supports standard HTTP basic authentication headers to identify the requester. +You can pass in a username, password combination while sending data to this input + +You can also setup SSL and send data securely over https, with an option of validating +the client's certificate. Currently, the certificate setup is through +https://docs.oracle.com/cd/E19509-01/820-3503/ggfen/index.html[Java Keystore +format] + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-additional_codecs>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-response_headers>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-additional_codecs"] +===== `additional_codecs` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{"application/json"=>"json"}` + +Apply specific codecs for specific content types. +The default codec will be applied only after this list is checked +and no codec for the request's content-type is found + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +The host or ip to bind + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS keystore to validate the client's certificates + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password for basic authorization + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `8080` + +The TCP port to bind to + +[id="{version}-plugins-{type}s-{plugin}-response_headers"] +===== `response_headers` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{"Content-Type"=>"text/plain"}` + +specify a custom set of response headers + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +SSL Configurations + +Enable SSL + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `4` + +Maximum number of threads to use + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username for basic authorization + +[id="{version}-plugins-{type}s-{plugin}-verify_mode"] +===== `verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +Set the client certificate verification method. Valid methods: none, peer, force_peer + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http-v3.0.8.asciidoc b/docs/versioned-plugins/inputs/http-v3.0.8.asciidoc new file mode 100644 index 000000000..3224ded6a --- /dev/null +++ b/docs/versioned-plugins/inputs/http-v3.0.8.asciidoc @@ -0,0 +1,178 @@ +:plugin: http +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.8 +:release_date: 2017-12-09 +:changelog_url: https://github.com/logstash-plugins/logstash-input-http/blob/v3.0.8/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Using this input you can receive single or multiline events over http(s). +Applications can send an HTTP POST request with a body to the endpoint started by this +input and Logstash will convert it into an event for subsequent processing. Users +can pass plain text, JSON, or any formatted data and use a corresponding codec with this +input. For Content-Type `application/json` the `json` codec is used, but for all other +data formats, `plain` codec is used. + +This input can also be used to receive webhook requests to integrate with other services +and applications. By taking advantage of the vast plugin ecosystem available in Logstash +you can trigger actionable events right from your application. + +==== Blocking Behavior + +The HTTP protocol doesn't deal well with long running requests. This plugin will either return +a 429 (busy) error when Logstash is backlogged, or it will time out the request. + +If a 429 error is encountered clients should sleep, backing off exponentially with some random +jitter, then retry their request. + +This plugin will block if the Logstash queue is blocked and there are available HTTP input threads. +This will cause most HTTP clients to time out. Sent events will still be processed in this case. This +behavior is not optimal and will be changed in a future release. In the future, this plugin will always +return a 429 if the queue is busy, and will not time out in the event of a busy queue. + +==== Security +This plugin supports standard HTTP basic authentication headers to identify the requester. +You can pass in a username, password combination while sending data to this input + +You can also setup SSL and send data securely over https, with an option of validating +the client's certificate. Currently, the certificate setup is through +https://docs.oracle.com/cd/E19509-01/820-3503/ggfen/index.html[Java Keystore +format] + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-additional_codecs>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-response_headers>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-additional_codecs"] +===== `additional_codecs` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{"application/json"=>"json"}` + +Apply specific codecs for specific content types. +The default codec will be applied only after this list is checked +and no codec for the request's content-type is found + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +The host or ip to bind + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS keystore to validate the client's certificates + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password for basic authorization + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `8080` + +The TCP port to bind to + +[id="{version}-plugins-{type}s-{plugin}-response_headers"] +===== `response_headers` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{"Content-Type"=>"text/plain"}` + +specify a custom set of response headers + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +SSL Configurations + +Enable SSL + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `4` + +Maximum number of threads to use + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username for basic authorization + +[id="{version}-plugins-{type}s-{plugin}-verify_mode"] +===== `verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +Set the client certificate verification method. Valid methods: none, peer, force_peer + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/s3-index.asciidoc b/docs/versioned-plugins/inputs/s3-index.asciidoc new file mode 100644 index 000000000..f82e887ba --- /dev/null +++ b/docs/versioned-plugins/inputs/s3-index.asciidoc @@ -0,0 +1,22 @@ +:plugin: s3 +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2018-01-09 +| <> | 2017-12-19 +| <> | 2017-11-07 +| <> | 2017-10-03 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::s3-v3.2.0.asciidoc[] +include::s3-v3.1.9.asciidoc[] +include::s3-v3.1.8.asciidoc[] +include::s3-v3.1.7.asciidoc[] +include::s3-v3.1.6.asciidoc[] +include::s3-v3.1.5.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/s3-v3.1.5.asciidoc b/docs/versioned-plugins/inputs/s3-v3.1.5.asciidoc new file mode 100644 index 000000000..5814470ab --- /dev/null +++ b/docs/versioned-plugins/inputs/s3-v3.1.5.asciidoc @@ -0,0 +1,214 @@ +:plugin: s3 +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-s3/blob/v3.1.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== S3 input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Stream events from files from a S3 bucket. + +Each line from each file generates an event. +Files ending in `.gz` are handled as gzip'ed files. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== S3 Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-delete>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-region>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-backup_add_prefix"] +===== `backup_add_prefix` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Append a prefix to the key (full path including file name in s3) after processing. +If backing up to another (or the same) bucket, this effectively lets you +choose a new 'folder' to place the files in + +[id="{version}-plugins-{type}s-{plugin}-backup_to_bucket"] +===== `backup_to_bucket` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Name of a S3 bucket to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-backup_to_dir"] +===== `backup_to_dir` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Path of a local directory to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-bucket"] +===== `bucket` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The name of the S3 bucket. + +[id="{version}-plugins-{type}s-{plugin}-delete"] +===== `delete` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Whether to delete processed files from the original bucket. + +[id="{version}-plugins-{type}s-{plugin}-exclude_pattern"] +===== `exclude_pattern` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Ruby style regexp of keys to exclude from the bucket + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Interval to wait between to check the file list again after a run is finished. +Value is in seconds. + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +If specified, the prefix of filenames in the bucket must match (not a regexp) + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Where to write the since database (keeps track of the date +the last handled file was added to S3). The default will write +sincedb files to some path matching "$HOME/.sincedb*" +Should be a path with filename not just a directory. + +[id="{version}-plugins-{type}s-{plugin}-temporary_directory"] +===== `temporary_directory` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"/tmp/logstash"` + +Set the directory where logstash will store the tmp files before processing them. +default to the current OS temporary directory in linux /tmp/logstash + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/s3-v3.1.6.asciidoc b/docs/versioned-plugins/inputs/s3-v3.1.6.asciidoc new file mode 100644 index 000000000..e6e62de76 --- /dev/null +++ b/docs/versioned-plugins/inputs/s3-v3.1.6.asciidoc @@ -0,0 +1,214 @@ +:plugin: s3 +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.6 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-s3/blob/v3.1.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== S3 input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Stream events from files from a S3 bucket. + +Each line from each file generates an event. +Files ending in `.gz` are handled as gzip'ed files. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== S3 Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-delete>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-region>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-backup_add_prefix"] +===== `backup_add_prefix` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Append a prefix to the key (full path including file name in s3) after processing. +If backing up to another (or the same) bucket, this effectively lets you +choose a new 'folder' to place the files in + +[id="{version}-plugins-{type}s-{plugin}-backup_to_bucket"] +===== `backup_to_bucket` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Name of a S3 bucket to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-backup_to_dir"] +===== `backup_to_dir` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Path of a local directory to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-bucket"] +===== `bucket` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The name of the S3 bucket. + +[id="{version}-plugins-{type}s-{plugin}-delete"] +===== `delete` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Whether to delete processed files from the original bucket. + +[id="{version}-plugins-{type}s-{plugin}-exclude_pattern"] +===== `exclude_pattern` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Ruby style regexp of keys to exclude from the bucket + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Interval to wait between to check the file list again after a run is finished. +Value is in seconds. + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +If specified, the prefix of filenames in the bucket must match (not a regexp) + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Where to write the since database (keeps track of the date +the last handled file was added to S3). The default will write +sincedb files to some path matching "$HOME/.sincedb*" +Should be a path with filename not just a directory. + +[id="{version}-plugins-{type}s-{plugin}-temporary_directory"] +===== `temporary_directory` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"/tmp/logstash"` + +Set the directory where logstash will store the tmp files before processing them. +default to the current OS temporary directory in linux /tmp/logstash + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/s3-v3.1.7.asciidoc b/docs/versioned-plugins/inputs/s3-v3.1.7.asciidoc new file mode 100644 index 000000000..aeb66c47d --- /dev/null +++ b/docs/versioned-plugins/inputs/s3-v3.1.7.asciidoc @@ -0,0 +1,214 @@ +:plugin: s3 +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.7 +:release_date: 2017-10-03 +:changelog_url: https://github.com/logstash-plugins/logstash-input-s3/blob/v3.1.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== S3 input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Stream events from files from a S3 bucket. + +Each line from each file generates an event. +Files ending in `.gz` are handled as gzip'ed files. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== S3 Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-delete>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-region>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-backup_add_prefix"] +===== `backup_add_prefix` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Append a prefix to the key (full path including file name in s3) after processing. +If backing up to another (or the same) bucket, this effectively lets you +choose a new 'folder' to place the files in + +[id="{version}-plugins-{type}s-{plugin}-backup_to_bucket"] +===== `backup_to_bucket` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Name of a S3 bucket to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-backup_to_dir"] +===== `backup_to_dir` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Path of a local directory to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-bucket"] +===== `bucket` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The name of the S3 bucket. + +[id="{version}-plugins-{type}s-{plugin}-delete"] +===== `delete` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Whether to delete processed files from the original bucket. + +[id="{version}-plugins-{type}s-{plugin}-exclude_pattern"] +===== `exclude_pattern` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Ruby style regexp of keys to exclude from the bucket + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Interval to wait between to check the file list again after a run is finished. +Value is in seconds. + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +If specified, the prefix of filenames in the bucket must match (not a regexp) + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Where to write the since database (keeps track of the date +the last handled file was added to S3). The default will write +sincedb files to some path matching "$HOME/.sincedb*" +Should be a path with filename not just a directory. + +[id="{version}-plugins-{type}s-{plugin}-temporary_directory"] +===== `temporary_directory` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"/tmp/logstash"` + +Set the directory where logstash will store the tmp files before processing them. +default to the current OS temporary directory in linux /tmp/logstash + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/s3-v3.1.8.asciidoc b/docs/versioned-plugins/inputs/s3-v3.1.8.asciidoc new file mode 100644 index 000000000..9a5d4f7ee --- /dev/null +++ b/docs/versioned-plugins/inputs/s3-v3.1.8.asciidoc @@ -0,0 +1,214 @@ +:plugin: s3 +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.8 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-s3/blob/v3.1.8/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== S3 input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Stream events from files from a S3 bucket. + +Each line from each file generates an event. +Files ending in `.gz` are handled as gzip'ed files. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== S3 Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-delete>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-region>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-backup_add_prefix"] +===== `backup_add_prefix` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Append a prefix to the key (full path including file name in s3) after processing. +If backing up to another (or the same) bucket, this effectively lets you +choose a new 'folder' to place the files in + +[id="{version}-plugins-{type}s-{plugin}-backup_to_bucket"] +===== `backup_to_bucket` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Name of a S3 bucket to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-backup_to_dir"] +===== `backup_to_dir` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Path of a local directory to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-bucket"] +===== `bucket` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The name of the S3 bucket. + +[id="{version}-plugins-{type}s-{plugin}-delete"] +===== `delete` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Whether to delete processed files from the original bucket. + +[id="{version}-plugins-{type}s-{plugin}-exclude_pattern"] +===== `exclude_pattern` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Ruby style regexp of keys to exclude from the bucket + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Interval to wait between to check the file list again after a run is finished. +Value is in seconds. + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +If specified, the prefix of filenames in the bucket must match (not a regexp) + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Where to write the since database (keeps track of the date +the last handled file was added to S3). The default will write +sincedb files to some path matching "$HOME/.sincedb*" +Should be a path with filename not just a directory. + +[id="{version}-plugins-{type}s-{plugin}-temporary_directory"] +===== `temporary_directory` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"/tmp/logstash"` + +Set the directory where logstash will store the tmp files before processing them. +default to the current OS temporary directory in linux /tmp/logstash + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/s3-v3.1.9.asciidoc b/docs/versioned-plugins/inputs/s3-v3.1.9.asciidoc new file mode 100644 index 000000000..2ceaaaf40 --- /dev/null +++ b/docs/versioned-plugins/inputs/s3-v3.1.9.asciidoc @@ -0,0 +1,215 @@ +:plugin: s3 +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.9 +:release_date: 2017-12-19 +:changelog_url: https://github.com/logstash-plugins/logstash-input-s3/blob/v3.1.9/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== S3 input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Stream events from files from a S3 bucket. + +Each line from each file generates an event. +Files ending in `.gz` are handled as gzip'ed files. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== S3 Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-delete>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-region>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-backup_add_prefix"] +===== `backup_add_prefix` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Append a prefix to the key (full path including file name in s3) after processing. +If backing up to another (or the same) bucket, this effectively lets you +choose a new 'folder' to place the files in + +[id="{version}-plugins-{type}s-{plugin}-backup_to_bucket"] +===== `backup_to_bucket` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Name of a S3 bucket to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-backup_to_dir"] +===== `backup_to_dir` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Path of a local directory to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-bucket"] +===== `bucket` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The name of the S3 bucket. + +[id="{version}-plugins-{type}s-{plugin}-delete"] +===== `delete` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Whether to delete processed files from the original bucket. + +[id="{version}-plugins-{type}s-{plugin}-exclude_pattern"] +===== `exclude_pattern` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Ruby style regexp of keys to exclude from the bucket + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Interval to wait between to check the file list again after a run is finished. +Value is in seconds. + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +If specified, the prefix of filenames in the bucket must match (not a regexp) + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Where to write the since database (keeps track of the date +the last handled file was added to S3). The default will write +sincedb files to in the directory '{path.data}/plugins/inputs/s3/' + +If specified, this setting must be a filename path and not just a directory. + +[id="{version}-plugins-{type}s-{plugin}-temporary_directory"] +===== `temporary_directory` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"/tmp/logstash"` + +Set the directory where logstash will store the tmp files before processing them. +default to the current OS temporary directory in linux /tmp/logstash + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/s3-v3.2.0.asciidoc b/docs/versioned-plugins/inputs/s3-v3.2.0.asciidoc new file mode 100644 index 000000000..d2482e853 --- /dev/null +++ b/docs/versioned-plugins/inputs/s3-v3.2.0.asciidoc @@ -0,0 +1,215 @@ +:plugin: s3 +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.0 +:release_date: 2018-01-09 +:changelog_url: https://github.com/logstash-plugins/logstash-input-s3/blob/v3.2.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== S3 input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Stream events from files from a S3 bucket. + +Each line from each file generates an event. +Files ending in `.gz` are handled as gzip'ed files. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== S3 Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-delete>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-region>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-backup_add_prefix"] +===== `backup_add_prefix` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Append a prefix to the key (full path including file name in s3) after processing. +If backing up to another (or the same) bucket, this effectively lets you +choose a new 'folder' to place the files in + +[id="{version}-plugins-{type}s-{plugin}-backup_to_bucket"] +===== `backup_to_bucket` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Name of a S3 bucket to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-backup_to_dir"] +===== `backup_to_dir` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Path of a local directory to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-bucket"] +===== `bucket` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The name of the S3 bucket. + +[id="{version}-plugins-{type}s-{plugin}-delete"] +===== `delete` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Whether to delete processed files from the original bucket. + +[id="{version}-plugins-{type}s-{plugin}-exclude_pattern"] +===== `exclude_pattern` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Ruby style regexp of keys to exclude from the bucket + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Interval to wait between to check the file list again after a run is finished. +Value is in seconds. + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +If specified, the prefix of filenames in the bucket must match (not a regexp) + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Where to write the since database (keeps track of the date +the last handled file was added to S3). The default will write +sincedb files to in the directory '{path.data}/plugins/inputs/s3/' + +If specified, this setting must be a filename path and not just a directory. + +[id="{version}-plugins-{type}s-{plugin}-temporary_directory"] +===== `temporary_directory` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"/tmp/logstash"` + +Set the directory where logstash will store the tmp files before processing them. +default to the current OS temporary directory in linux /tmp/logstash + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/tcp-index.asciidoc b/docs/versioned-plugins/inputs/tcp-index.asciidoc new file mode 100644 index 000000000..270e51dd1 --- /dev/null +++ b/docs/versioned-plugins/inputs/tcp-index.asciidoc @@ -0,0 +1,26 @@ +:plugin: tcp +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-27 +| <> | 2017-08-30 +| <> | 2017-08-16 +| <> | 2017-08-04 +| <> | 2017-08-30 +| <> | 2017-08-18 +| <> | 2017-06-30 +| <> | 2017-06-23 +|======================================================================= + +include::tcp-v5.0.3.asciidoc[] +include::tcp-v5.0.2.asciidoc[] +include::tcp-v5.0.1.asciidoc[] +include::tcp-v5.0.0.asciidoc[] +include::tcp-v4.2.4.asciidoc[] +include::tcp-v4.2.3.asciidoc[] +include::tcp-v4.2.2.asciidoc[] +include::tcp-v4.1.2.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/tcp-v4.1.2.asciidoc b/docs/versioned-plugins/inputs/tcp-v4.1.2.asciidoc new file mode 100644 index 000000000..21d7816aa --- /dev/null +++ b/docs/versioned-plugins/inputs/tcp-v4.1.2.asciidoc @@ -0,0 +1,205 @@ +:plugin: tcp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.1.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-tcp/blob/v4.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tcp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a TCP socket. + +Like stdin and file inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +#### Accepting log4j2 logs + +Log4j2 can send JSON over a socket, and we can use that combined with our tcp +input to accept the logs. + +First, we need to configure your application to send logs in JSON over a +socket. The following log4j2.xml accomplishes this task. + +Note, you will want to change the `host` and `port` settings in this +configuration to match your needs. + + + + + + + + + + + + + + +To accept this in Logstash, you will want tcp input and a date filter: + + input { + tcp { + port => 12345 + codec => json + } + } + +and add a date filter to take log4j2's `timeMillis` field and use it as the +event timestamp + + filter { + date { + match => [ "timeMillis", "UNIX_MS" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-data_timeout"] +===== `data_timeout` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `-1` + + + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] +===== `ssl_extra_chain_certs` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +An Array of extra X509 certificates to be added to the certificate chain. +Useful when the CA chain is not necessary in the system store. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/tcp-v4.2.2.asciidoc b/docs/versioned-plugins/inputs/tcp-v4.2.2.asciidoc new file mode 100644 index 000000000..c5729d0e4 --- /dev/null +++ b/docs/versioned-plugins/inputs/tcp-v4.2.2.asciidoc @@ -0,0 +1,205 @@ +:plugin: tcp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.2.2 +:release_date: 2017-06-30 +:changelog_url: https://github.com/logstash-plugins/logstash-input-tcp/blob/v4.2.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tcp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a TCP socket. + +Like stdin and file inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +#### Accepting log4j2 logs + +Log4j2 can send JSON over a socket, and we can use that combined with our tcp +input to accept the logs. + +First, we need to configure your application to send logs in JSON over a +socket. The following log4j2.xml accomplishes this task. + +Note, you will want to change the `host` and `port` settings in this +configuration to match your needs. + + + + + + + + + + + + + + +To accept this in Logstash, you will want tcp input and a date filter: + + input { + tcp { + port => 12345 + codec => json + } + } + +and add a date filter to take log4j2's `timeMillis` field and use it as the +event timestamp + + filter { + date { + match => [ "timeMillis", "UNIX_MS" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-data_timeout"] +===== `data_timeout` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `-1` + + + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] +===== `ssl_extra_chain_certs` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +An Array of extra X509 certificates to be added to the certificate chain. +Useful when the CA chain is not necessary in the system store. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/tcp-v4.2.3.asciidoc b/docs/versioned-plugins/inputs/tcp-v4.2.3.asciidoc new file mode 100644 index 000000000..22e027905 --- /dev/null +++ b/docs/versioned-plugins/inputs/tcp-v4.2.3.asciidoc @@ -0,0 +1,205 @@ +:plugin: tcp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.2.3 +:release_date: 2017-08-18 +:changelog_url: https://github.com/logstash-plugins/logstash-input-tcp/blob/v4.2.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tcp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a TCP socket. + +Like stdin and file inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +#### Accepting log4j2 logs + +Log4j2 can send JSON over a socket, and we can use that combined with our tcp +input to accept the logs. + +First, we need to configure your application to send logs in JSON over a +socket. The following log4j2.xml accomplishes this task. + +Note, you will want to change the `host` and `port` settings in this +configuration to match your needs. + + + + + + + + + + + + + + +To accept this in Logstash, you will want tcp input and a date filter: + + input { + tcp { + port => 12345 + codec => json + } + } + +and add a date filter to take log4j2's `timeMillis` field and use it as the +event timestamp + + filter { + date { + match => [ "timeMillis", "UNIX_MS" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-data_timeout"] +===== `data_timeout` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `-1` + + + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] +===== `ssl_extra_chain_certs` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +An Array of extra X509 certificates to be added to the certificate chain. +Useful when the CA chain is not necessary in the system store. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/tcp-v4.2.4.asciidoc b/docs/versioned-plugins/inputs/tcp-v4.2.4.asciidoc new file mode 100644 index 000000000..76d5809da --- /dev/null +++ b/docs/versioned-plugins/inputs/tcp-v4.2.4.asciidoc @@ -0,0 +1,205 @@ +:plugin: tcp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.2.4 +:release_date: 2017-08-30 +:changelog_url: https://github.com/logstash-plugins/logstash-input-tcp/blob/v4.2.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tcp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a TCP socket. + +Like stdin and file inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +#### Accepting log4j2 logs + +Log4j2 can send JSON over a socket, and we can use that combined with our tcp +input to accept the logs. + +First, we need to configure your application to send logs in JSON over a +socket. The following log4j2.xml accomplishes this task. + +Note, you will want to change the `host` and `port` settings in this +configuration to match your needs. + + + + + + + + + + + + + + +To accept this in Logstash, you will want tcp input and a date filter: + + input { + tcp { + port => 12345 + codec => json + } + } + +and add a date filter to take log4j2's `timeMillis` field and use it as the +event timestamp + + filter { + date { + match => [ "timeMillis", "UNIX_MS" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-data_timeout"] +===== `data_timeout` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `-1` + + + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] +===== `ssl_extra_chain_certs` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +An Array of extra X509 certificates to be added to the certificate chain. +Useful when the CA chain is not necessary in the system store. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/tcp-v5.0.0.asciidoc b/docs/versioned-plugins/inputs/tcp-v5.0.0.asciidoc new file mode 100644 index 000000000..a0746ae4d --- /dev/null +++ b/docs/versioned-plugins/inputs/tcp-v5.0.0.asciidoc @@ -0,0 +1,187 @@ +:plugin: tcp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.0 +:release_date: 2017-08-04 +:changelog_url: https://github.com/logstash-plugins/logstash-input-tcp/blob/v5.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tcp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a TCP socket. + +Like stdin and file inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +#### Accepting log4j2 logs + +Log4j2 can send JSON over a socket, and we can use that combined with our tcp +input to accept the logs. + +First, we need to configure your application to send logs in JSON over a +socket. The following log4j2.xml accomplishes this task. + +Note, you will want to change the `host` and `port` settings in this +configuration to match your needs. + + + + + + + + + + + + + + +To accept this in Logstash, you will want tcp input and a date filter: + + input { + tcp { + port => 12345 + codec => json + } + } + +and add a date filter to take log4j2's `timeMillis` field and use it as the +event timestamp + + filter { + date { + match => [ "timeMillis", "UNIX_MS" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] +===== `ssl_extra_chain_certs` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +An Array of extra X509 certificates to be added to the certificate chain. +Useful when the CA chain is not necessary in the system store. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/tcp-v5.0.1.asciidoc b/docs/versioned-plugins/inputs/tcp-v5.0.1.asciidoc new file mode 100644 index 000000000..ed31c2842 --- /dev/null +++ b/docs/versioned-plugins/inputs/tcp-v5.0.1.asciidoc @@ -0,0 +1,187 @@ +:plugin: tcp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.1 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-tcp/blob/v5.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tcp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a TCP socket. + +Like stdin and file inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +#### Accepting log4j2 logs + +Log4j2 can send JSON over a socket, and we can use that combined with our tcp +input to accept the logs. + +First, we need to configure your application to send logs in JSON over a +socket. The following log4j2.xml accomplishes this task. + +Note, you will want to change the `host` and `port` settings in this +configuration to match your needs. + + + + + + + + + + + + + + +To accept this in Logstash, you will want tcp input and a date filter: + + input { + tcp { + port => 12345 + codec => json + } + } + +and add a date filter to take log4j2's `timeMillis` field and use it as the +event timestamp + + filter { + date { + match => [ "timeMillis", "UNIX_MS" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] +===== `ssl_extra_chain_certs` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +An Array of extra X509 certificates to be added to the certificate chain. +Useful when the CA chain is not necessary in the system store. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/tcp-v5.0.2.asciidoc b/docs/versioned-plugins/inputs/tcp-v5.0.2.asciidoc new file mode 100644 index 000000000..f73a5d5cc --- /dev/null +++ b/docs/versioned-plugins/inputs/tcp-v5.0.2.asciidoc @@ -0,0 +1,187 @@ +:plugin: tcp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.2 +:release_date: 2017-08-30 +:changelog_url: https://github.com/logstash-plugins/logstash-input-tcp/blob/v5.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tcp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a TCP socket. + +Like stdin and file inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +#### Accepting log4j2 logs + +Log4j2 can send JSON over a socket, and we can use that combined with our tcp +input to accept the logs. + +First, we need to configure your application to send logs in JSON over a +socket. The following log4j2.xml accomplishes this task. + +Note, you will want to change the `host` and `port` settings in this +configuration to match your needs. + + + + + + + + + + + + + + +To accept this in Logstash, you will want tcp input and a date filter: + + input { + tcp { + port => 12345 + codec => json + } + } + +and add a date filter to take log4j2's `timeMillis` field and use it as the +event timestamp + + filter { + date { + match => [ "timeMillis", "UNIX_MS" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] +===== `ssl_extra_chain_certs` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +An Array of extra X509 certificates to be added to the certificate chain. +Useful when the CA chain is not necessary in the system store. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/tcp-v5.0.3.asciidoc b/docs/versioned-plugins/inputs/tcp-v5.0.3.asciidoc new file mode 100644 index 000000000..f7a996644 --- /dev/null +++ b/docs/versioned-plugins/inputs/tcp-v5.0.3.asciidoc @@ -0,0 +1,187 @@ +:plugin: tcp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.3 +:release_date: 2017-12-27 +:changelog_url: https://github.com/logstash-plugins/logstash-input-tcp/blob/v5.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tcp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a TCP socket. + +Like stdin and file inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +#### Accepting log4j2 logs + +Log4j2 can send JSON over a socket, and we can use that combined with our tcp +input to accept the logs. + +First, we need to configure your application to send logs in JSON over a +socket. The following log4j2.xml accomplishes this task. + +Note, you will want to change the `host` and `port` settings in this +configuration to match your needs. + + + + + + + + + + + + + + +To accept this in Logstash, you will want tcp input and a date filter: + + input { + tcp { + port => 12345 + codec => json + } + } + +and add a date filter to take log4j2's `timeMillis` field and use it as the +event timestamp + + filter { + date { + match => [ "timeMillis", "UNIX_MS" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] +===== `ssl_extra_chain_certs` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +An Array of extra X509 certificates to be added to the certificate chain. +Useful when the CA chain is not necessary in the system store. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs-index.asciidoc b/docs/versioned-plugins/outputs-index.asciidoc new file mode 100644 index 000000000..b1f495dc3 --- /dev/null +++ b/docs/versioned-plugins/outputs-index.asciidoc @@ -0,0 +1,10 @@ +:type: output +:type_uc: Output + +include::include/plugin-intro.asciidoc[] + +include::outputs/elasticsearch-index.asciidoc[] +include::outputs/email-index.asciidoc[] +include::outputs/file-index.asciidoc[] +include::outputs/graphite-index.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/elasticsearch-index.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-index.asciidoc new file mode 100644 index 000000000..ceb808ddf --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-index.asciidoc @@ -0,0 +1,44 @@ +:plugin: elasticsearch +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-30 +| <> | 2017-09-29 +| <> | 2017-09-21 +| <> | 2017-09-21 +| <> | 2017-08-21 +| <> | 2017-08-16 +| <> | 2017-08-01 +| <> | 2017-09-22 +| <> | 2017-09-21 +| <> | 2017-08-21 +| <> | 2017-07-20 +| <> | 2017-07-18 +| <> | 2017-06-23 +| <> | 2017-06-09 +| <> | 2017-06-06 +| <> | 2017-06-05 +| <> | 2017-05-26 +|======================================================================= + +include::elasticsearch-v9.0.2.asciidoc[] +include::elasticsearch-v9.0.0.asciidoc[] +include::elasticsearch-v8.2.2.asciidoc[] +include::elasticsearch-v8.2.0.asciidoc[] +include::elasticsearch-v8.1.1.asciidoc[] +include::elasticsearch-v8.0.1.asciidoc[] +include::elasticsearch-v8.0.0.asciidoc[] +include::elasticsearch-v7.4.2.asciidoc[] +include::elasticsearch-v7.4.1.asciidoc[] +include::elasticsearch-v7.4.0.asciidoc[] +include::elasticsearch-v7.3.8.asciidoc[] +include::elasticsearch-v7.3.7.asciidoc[] +include::elasticsearch-v7.3.6.asciidoc[] +include::elasticsearch-v7.3.5.asciidoc[] +include::elasticsearch-v7.3.4.asciidoc[] +include::elasticsearch-v7.3.3.asciidoc[] +include::elasticsearch-v7.3.2.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.2.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.2.asciidoc new file mode 100644 index 000000000..65b44f827 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.2.asciidoc @@ -0,0 +1,679 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.3.2 +:release_date: 2017-05-26 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.3.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 2.2.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. + +The following errors are retried infinitely: + +- Network errors (inability to connect) +- 429 (Too many requests) and +- 503 (Service unavailable) errors + +NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.3.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.3.asciidoc new file mode 100644 index 000000000..6ddc5e069 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.3.asciidoc @@ -0,0 +1,679 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.3.3 +:release_date: 2017-06-05 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.3.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 2.2.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. + +The following errors are retried infinitely: + +- Network errors (inability to connect) +- 429 (Too many requests) and +- 503 (Service unavailable) errors + +NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.4.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.4.asciidoc new file mode 100644 index 000000000..0a4460ac3 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.4.asciidoc @@ -0,0 +1,679 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.3.4 +:release_date: 2017-06-06 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.3.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 2.2.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. + +The following errors are retried infinitely: + +- Network errors (inability to connect) +- 429 (Too many requests) and +- 503 (Service unavailable) errors + +NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.5.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.5.asciidoc new file mode 100644 index 000000000..8e73d5923 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.5.asciidoc @@ -0,0 +1,679 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.3.5 +:release_date: 2017-06-09 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.3.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 2.2.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. + +The following errors are retried infinitely: + +- Network errors (inability to connect) +- 429 (Too many requests) and +- 503 (Service unavailable) errors + +NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.6.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.6.asciidoc new file mode 100644 index 000000000..81ae429de --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.6.asciidoc @@ -0,0 +1,680 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.3.6 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.3.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 2.2.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. + +The following errors are retried infinitely: + +- Network errors (inability to connect) +- 429 (Too many requests) and +- 503 (Service unavailable) errors + +NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.7.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.7.asciidoc new file mode 100644 index 000000000..67f6679fc --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.7.asciidoc @@ -0,0 +1,680 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.3.7 +:release_date: 2017-07-18 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.3.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 2.2.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. + +The following errors are retried infinitely: + +- Network errors (inability to connect) +- 429 (Too many requests) and +- 503 (Service unavailable) errors + +NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.8.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.8.asciidoc new file mode 100644 index 000000000..212a65006 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.8.asciidoc @@ -0,0 +1,680 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.3.8 +:release_date: 2017-07-20 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.3.8/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 2.2.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. + +The following errors are retried infinitely: + +- Network errors (inability to connect) +- 429 (Too many requests) and +- 503 (Service unavailable) errors + +NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.4.0.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.4.0.asciidoc new file mode 100644 index 000000000..13905161d --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.4.0.asciidoc @@ -0,0 +1,681 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.4.0 +:release_date: 2017-08-21 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.4.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 7.4.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP +request are handled differently than error codes for individual documents. + +HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. + +The following document errors are handled as follows: +- 400 and 404 errors are sent to the DLQ if enabled. If a DLQ is not enabled a log message will be emitted and the event will be dropped. +- 409 errors (conflict) are logged as a warning and dropped. + +Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.4.1.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.4.1.asciidoc new file mode 100644 index 000000000..cb7a3c2bb --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.4.1.asciidoc @@ -0,0 +1,681 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.4.1 +:release_date: 2017-09-21 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.4.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 7.4.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP +request are handled differently than error codes for individual documents. + +HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. + +The following document errors are handled as follows: +- 400 and 404 errors are sent to the DLQ if enabled. If a DLQ is not enabled a log message will be emitted and the event will be dropped. +- 409 errors (conflict) are logged as a warning and dropped. + +Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.4.2.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.4.2.asciidoc new file mode 100644 index 000000000..0bca71d1e --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.4.2.asciidoc @@ -0,0 +1,698 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.4.2 +:release_date: 2017-09-22 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.4.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x + +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +**Users installing ES 5.x and LS 5.x** + +This change will not affect you and you will continue to use the ES defaults. + +**Users upgrading from LS 2.x to LS 5.x with ES 5.x** + +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 7.4.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP +request are handled differently than error codes for individual documents. + +HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. + +The following document errors are handled as follows: + +* 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See <> for more info. +* 409 errors (conflict) are logged as a warning and dropped. + +Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +[[dlq-policy]] +==== DLQ Policy + +Mapping (404) errors from Elasticsearch can lead to data loss. Unfortunately +mapping errors cannot be handled without human intervention and without looking +at the field that caused the mapping mismatch. If the DLQ is enabled, the +original events causing the mapping errors are stored in a file that can be +processed at a later time. Often times, the offending field can be removed and +re-indexed to Elasticsearch. If the DLQ is not enabled, and a mapping error +happens, the problem is logged as a warning, and the event is dropped. See +<> for more information about processing events in the DLQ. + +==== Batch Sizes + +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v8.0.0.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v8.0.0.asciidoc new file mode 100644 index 000000000..8c61f8211 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v8.0.0.asciidoc @@ -0,0 +1,662 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v8.0.0 +:release_date: 2017-08-01 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v8.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 2.2.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. + +The following errors are retried infinitely: + +- Network errors (inability to connect) +- 429 (Too many requests) and +- 503 (Service unavailable) errors + +NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v8.0.1.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v8.0.1.asciidoc new file mode 100644 index 000000000..9c07dd9dd --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v8.0.1.asciidoc @@ -0,0 +1,662 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v8.0.1 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v8.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 2.2.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. + +The following errors are retried infinitely: + +- Network errors (inability to connect) +- 429 (Too many requests) and +- 503 (Service unavailable) errors + +NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v8.1.1.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v8.1.1.asciidoc new file mode 100644 index 000000000..7ec9129ac --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v8.1.1.asciidoc @@ -0,0 +1,663 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v8.1.1 +:release_date: 2017-08-21 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v8.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 8.1.1 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP +request are handled differently than error codes for individual documents. + +HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. + +The following document errors are handled as follows: +- 400 and 404 errors are sent to the DLQ if enabled. If a DLQ is not enabled a log message will be emitted and the event will be dropped. +- 409 errors (conflict) are logged as a warning and dropped. + +Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v8.2.0.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v8.2.0.asciidoc new file mode 100644 index 000000000..1a233d788 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v8.2.0.asciidoc @@ -0,0 +1,663 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v8.2.0 +:release_date: 2017-09-21 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v8.2.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 8.1.1 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP +request are handled differently than error codes for individual documents. + +HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. + +The following document errors are handled as follows: +- 400 and 404 errors are sent to the DLQ if enabled. If a DLQ is not enabled a log message will be emitted and the event will be dropped. +- 409 errors (conflict) are logged as a warning and dropped. + +Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v8.2.2.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v8.2.2.asciidoc new file mode 100644 index 000000000..715386282 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v8.2.2.asciidoc @@ -0,0 +1,680 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v8.2.2 +:release_date: 2017-09-21 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v8.2.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x + +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +**Users installing ES 5.x and LS 5.x** + +This change will not affect you and you will continue to use the ES defaults. + +**Users upgrading from LS 2.x to LS 5.x with ES 5.x** + +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 8.1.1 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP +request are handled differently than error codes for individual documents. + +HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. + +The following document errors are handled as follows: + +* 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See <> for more info. +* 409 errors (conflict) are logged as a warning and dropped. + +Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +[[dlq-policy]] +==== DLQ Policy + +Mapping (404) errors from Elasticsearch can lead to data loss. Unfortunately +mapping errors cannot be handled without human intervention and without looking +at the field that caused the mapping mismatch. If the DLQ is enabled, the +original events causing the mapping errors are stored in a file that can be +processed at a later time. Often times, the offending field can be removed and +re-indexed to Elasticsearch. If the DLQ is not enabled, and a mapping error +happens, the problem is logged as a warning, and the event is dropped. See +<> for more information about processing events in the DLQ. + +==== Batch Sizes + +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v9.0.0.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v9.0.0.asciidoc new file mode 100644 index 000000000..1973c5180 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v9.0.0.asciidoc @@ -0,0 +1,684 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v9.0.0 +:release_date: 2017-09-29 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v9.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x + +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +**Users installing ES 5.x and LS 5.x** + +This change will not affect you and you will continue to use the ES defaults. + +**Users upgrading from LS 2.x to LS 5.x with ES 5.x** + +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 8.1.1 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP +request are handled differently than error codes for individual documents. + +HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. + +The following document errors are handled as follows: + +* 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See <> for more info. +* 409 errors (conflict) are logged as a warning and dropped. + +Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +[[dlq-policy]] +==== DLQ Policy + +Mapping (404) errors from Elasticsearch can lead to data loss. Unfortunately +mapping errors cannot be handled without human intervention and without looking +at the field that caused the mapping mismatch. If the DLQ is enabled, the +original events causing the mapping errors are stored in a file that can be +processed at a later time. Often times, the offending field can be removed and +re-indexed to Elasticsearch. If the DLQ is not enabled, and a mapping error +happens, the problem is logged as a warning, and the event is dropped. See +<> for more information about processing events in the DLQ. + +==== Batch Sizes + +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + * This option is deprecated + +Note: This option is deprecated due to the https://www.elastic.co/guide/en/elasticsearch/reference/6.0/removal-of-types.html[removal of types in Logstash 6.0]. +It will be removed in the next major version of Logstash. +This sets the document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'doc'. + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0. +When using indexed (stored) scripts on Elasticsearch 6 and higher, you must set this parameter to `""` (empty string). + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v9.0.2.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v9.0.2.asciidoc new file mode 100644 index 000000000..1ef847fb7 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v9.0.2.asciidoc @@ -0,0 +1,686 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v9.0.2 +:release_date: 2017-11-30 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v9.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x + +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +**Users installing ES 5.x and LS 5.x** + +This change will not affect you and you will continue to use the ES defaults. + +**Users upgrading from LS 2.x to LS 5.x with ES 5.x** + +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 8.1.1 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP +request are handled differently than error codes for individual documents. + +HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. + +The following document errors are handled as follows: + + * 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See <> for more info. + * 409 errors (conflict) are logged as a warning and dropped. + +Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +[[dlq-policy]] +==== DLQ Policy + +Mapping (404) errors from Elasticsearch can lead to data loss. Unfortunately +mapping errors cannot be handled without human intervention and without looking +at the field that caused the mapping mismatch. If the DLQ is enabled, the +original events causing the mapping errors are stored in a file that can be +processed at a later time. Often times, the offending field can be removed and +re-indexed to Elasticsearch. If the DLQ is not enabled, and a mapping error +happens, the problem is logged as a warning, and the event is dropped. See +<> for more information about processing events in the DLQ. + +==== Batch Sizes + +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + * This option is deprecated + +Note: This option is deprecated due to the https://www.elastic.co/guide/en/elasticsearch/reference/6.0/removal-of-types.html[removal of types in Logstash 6.0]. +It will be removed in the next major version of Logstash. +This sets the document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +If you don't set a value for this option: + +- for elasticsearch clusters 6.x and above: the value of 'doc' will be used; +- for elasticsearch clusters 5.x and below: the event's 'type' field will be used, if the field is not present the value of 'doc' will be used. + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0. +When using indexed (stored) scripts on Elasticsearch 6 and higher, you must set this parameter to `""` (empty string). + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/email-index.asciidoc b/docs/versioned-plugins/outputs/email-index.asciidoc new file mode 100644 index 000000000..b3b9b73ee --- /dev/null +++ b/docs/versioned-plugins/outputs/email-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: email +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2018-01-15 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::email-v4.1.0.asciidoc[] +include::email-v4.0.6.asciidoc[] +include::email-v4.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/email-v4.0.4.asciidoc b/docs/versioned-plugins/outputs/email-v4.0.4.asciidoc new file mode 100644 index 000000000..c9b5d9c67 --- /dev/null +++ b/docs/versioned-plugins/outputs/email-v4.0.4.asciidoc @@ -0,0 +1,233 @@ +:plugin: email +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-email/blob/v4.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Email output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + +==== Usage Example +[source,ruby] +---------------------------------- +output { + if "shouldmail" in [tags] { + email { + to => 'technical@logstash.net' + from => 'monitor@logstash.net' + subject => 'Alert - %{title}' + body => "Tags: %{tags}\\n\\Content:\\n%{message}" + domain => 'mail.logstash.net' + port => 25 + } + } +} +---------------------------------- +Send email when an output is received. Alternatively, you may include or +exclude the email output execution using conditionals. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Email Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-address>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-attachments>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-authentication>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-body>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cc>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-contenttype>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-debug>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-domain>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-from>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-htmlbody>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-replyto>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-subject>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-to>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-use_tls>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-username>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-via>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-address"] +===== `address` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"localhost"` + +The address used to connect to the mail server + +[id="{version}-plugins-{type}s-{plugin}-attachments"] +===== `attachments` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Attachments - specify the name(s) and location(s) of the files. + +[id="{version}-plugins-{type}s-{plugin}-authentication"] +===== `authentication` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Authentication method used when identifying with the server + +[id="{version}-plugins-{type}s-{plugin}-body"] +===== `body` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Body for the email - plain text only. + +[id="{version}-plugins-{type}s-{plugin}-cc"] +===== `cc` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The fully-qualified email address(es) to include as cc: address(es). + +This field also accepts a comma-separated string of addresses, for example: +`"me@host.com, you@host.com"` + +[id="{version}-plugins-{type}s-{plugin}-contenttype"] +===== `contenttype` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"text/html; charset=UTF-8"` + +contenttype : for multipart messages, set the content-type and/or charset of the HTML part. +NOTE: this may not be functional (KH) + +[id="{version}-plugins-{type}s-{plugin}-debug"] +===== `debug` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Run the mail relay in debug mode + +[id="{version}-plugins-{type}s-{plugin}-domain"] +===== `domain` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"localhost"` + +Domain used to send the email messages + +[id="{version}-plugins-{type}s-{plugin}-from"] +===== `from` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash.alert@nowhere.com"` + +The fully-qualified email address for the From: field in the email. + +[id="{version}-plugins-{type}s-{plugin}-htmlbody"] +===== `htmlbody` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +HTML Body for the email, which may contain HTML markup. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Password to authenticate with the server + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `25` + +Port used to communicate with the mail server + +[id="{version}-plugins-{type}s-{plugin}-replyto"] +===== `replyto` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The fully qualified email address for the Reply-To: field. + +[id="{version}-plugins-{type}s-{plugin}-subject"] +===== `subject` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Subject: for the email. + +[id="{version}-plugins-{type}s-{plugin}-to"] +===== `to` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The fully-qualified email address to send the email to. + +This field also accepts a comma-separated string of addresses, for example: +`"me@host.com, you@host.com"` + +You can also use dynamic fields from the event with the `%{fieldname}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-use_tls"] +===== `use_tls` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enables TLS when communicating with the server + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate with the server + +[id="{version}-plugins-{type}s-{plugin}-via"] +===== `via` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"smtp"` + +How Logstash should send the email, either via SMTP or by invoking sendmail. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/email-v4.0.6.asciidoc b/docs/versioned-plugins/outputs/email-v4.0.6.asciidoc new file mode 100644 index 000000000..065849e2a --- /dev/null +++ b/docs/versioned-plugins/outputs/email-v4.0.6.asciidoc @@ -0,0 +1,235 @@ +:plugin: email +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.6 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-email/blob/v4.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Email output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Sends email when an output is received. Alternatively, you may include or +exclude the email output execution using conditionals. + +==== Usage Example +[source,ruby] +---------------------------------- +output { + if "shouldmail" in [tags] { + email { + to => 'technical@example.com' + from => 'monitor@example.com' + subject => 'Alert - %{title}' + body => "Tags: %{tags}\\n\\Content:\\n%{message}" + domain => 'mail.example.com' + port => 25 + } + } +} +---------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Email Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-address>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-attachments>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-authentication>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-body>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cc>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-contenttype>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-debug>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-domain>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-from>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-htmlbody>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-replyto>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-subject>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-to>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-use_tls>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-username>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-via>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-address"] +===== `address` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"localhost"` + +The address used to connect to the mail server + +[id="{version}-plugins-{type}s-{plugin}-attachments"] +===== `attachments` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Attachments - specify the name(s) and location(s) of the files. + +[id="{version}-plugins-{type}s-{plugin}-authentication"] +===== `authentication` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Authentication method used when identifying with the server + +[id="{version}-plugins-{type}s-{plugin}-body"] +===== `body` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Body for the email - plain text only. + +[id="{version}-plugins-{type}s-{plugin}-cc"] +===== `cc` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The fully-qualified email address(es) to include as cc: address(es). + +This field also accepts a comma-separated string of addresses, for example: +`"me@example.com, you@example.com"` + +[id="{version}-plugins-{type}s-{plugin}-contenttype"] +===== `contenttype` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"text/html; charset=UTF-8"` + +contenttype : for multipart messages, set the content-type and/or charset of the HTML part. +NOTE: this may not be functional (KH) + +[id="{version}-plugins-{type}s-{plugin}-debug"] +===== `debug` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Run the mail relay in debug mode + +[id="{version}-plugins-{type}s-{plugin}-domain"] +===== `domain` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"localhost"` + +The HELO/EHLO domain name used in the greeting message when connecting +to a remote SMTP server. Some servers require this name to match the +actual hostname of the connecting client. + +[id="{version}-plugins-{type}s-{plugin}-from"] +===== `from` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash.alert@example.com"` + +The fully-qualified email address for the From: field in the email. + +[id="{version}-plugins-{type}s-{plugin}-htmlbody"] +===== `htmlbody` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +HTML Body for the email, which may contain HTML markup. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Password to authenticate with the server + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `25` + +Port used to communicate with the mail server + +[id="{version}-plugins-{type}s-{plugin}-replyto"] +===== `replyto` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The fully qualified email address for the Reply-To: field. + +[id="{version}-plugins-{type}s-{plugin}-subject"] +===== `subject` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Subject: for the email. + +[id="{version}-plugins-{type}s-{plugin}-to"] +===== `to` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The fully-qualified email address to send the email to. + +This field also accepts a comma-separated string of addresses, for example: +`"me@example.com, you@example.com"` + +You can also use dynamic fields from the event with the `%{fieldname}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-use_tls"] +===== `use_tls` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enables TLS when communicating with the server + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate with the server + +[id="{version}-plugins-{type}s-{plugin}-via"] +===== `via` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"smtp"` + +How Logstash should send the email, either via SMTP or by invoking sendmail. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/email-v4.1.0.asciidoc b/docs/versioned-plugins/outputs/email-v4.1.0.asciidoc new file mode 100644 index 000000000..51c123e94 --- /dev/null +++ b/docs/versioned-plugins/outputs/email-v4.1.0.asciidoc @@ -0,0 +1,256 @@ +:plugin: email +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.1.0 +:release_date: 2018-01-15 +:changelog_url: https://github.com/logstash-plugins/logstash-output-email/blob/v4.1.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Email output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Sends email when an output is received. Alternatively, you may include or +exclude the email output execution using conditionals. + +==== Usage Example +[source,ruby] +---------------------------------- +output { + if "shouldmail" in [tags] { + email { + to => 'technical@example.com' + from => 'monitor@example.com' + subject => 'Alert - %{title}' + body => "Tags: %{tags}\\n\\Content:\\n%{message}" + template_file => "/tmp/email_template.mustache" + domain => 'mail.example.com' + port => 25 + } + } +} +---------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Email Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-address>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-attachments>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-authentication>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-body>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cc>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bcc>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-contenttype>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-debug>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-domain>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-from>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-htmlbody>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-replyto>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-subject>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-to>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-use_tls>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-username>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-via>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_file>> |{logstash-ref}/configuration-file-structure.html#path[path]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-address"] +===== `address` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"localhost"` + +The address used to connect to the mail server + +[id="{version}-plugins-{type}s-{plugin}-attachments"] +===== `attachments` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Attachments - specify the name(s) and location(s) of the files. + +[id="{version}-plugins-{type}s-{plugin}-authentication"] +===== `authentication` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Authentication method used when identifying with the server + +[id="{version}-plugins-{type}s-{plugin}-body"] +===== `body` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Body for the email - plain text only. + +[id="{version}-plugins-{type}s-{plugin}-cc"] +===== `cc` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The fully-qualified email address(es) to include as cc: address(es). + +This field also accepts a comma-separated string of addresses, for example: +`"me@example.com, you@example.com"` + +[id="{version}-plugins-{type}s-{plugin}-bcc"] +===== `bcc` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The fully-qualified email address(es) to include as bcc: address(es). + +This field accepts several addresses like cc. + +[id="{version}-plugins-{type}s-{plugin}-contenttype"] +===== `contenttype` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"text/html; charset=UTF-8"` + +contenttype : for multipart messages, set the content-type and/or charset of the HTML part. +NOTE: this may not be functional (KH) + +[id="{version}-plugins-{type}s-{plugin}-debug"] +===== `debug` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Run the mail relay in debug mode + +[id="{version}-plugins-{type}s-{plugin}-domain"] +===== `domain` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"localhost"` + +The HELO/EHLO domain name used in the greeting message when connecting +to a remote SMTP server. Some servers require this name to match the +actual hostname of the connecting client. + +[id="{version}-plugins-{type}s-{plugin}-from"] +===== `from` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash.alert@example.com"` + +The fully-qualified email address for the From: field in the email. + +[id="{version}-plugins-{type}s-{plugin}-htmlbody"] +===== `htmlbody` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +HTML Body for the email, which may contain HTML markup. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Password to authenticate with the server + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `25` + +Port used to communicate with the mail server + +[id="{version}-plugins-{type}s-{plugin}-replyto"] +===== `replyto` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The fully qualified email address for the Reply-To: field. + +[id="{version}-plugins-{type}s-{plugin}-subject"] +===== `subject` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Subject: for the email. + +[id="{version}-plugins-{type}s-{plugin}-to"] +===== `to` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The fully-qualified email address to send the email to. + +This field also accepts a comma-separated string of addresses, for example: +`"me@example.com, you@example.com"` + +You can also use dynamic fields from the event with the `%{fieldname}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-use_tls"] +===== `use_tls` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enables TLS when communicating with the server + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate with the server + +[id="{version}-plugins-{type}s-{plugin}-via"] +===== `via` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"smtp"` + +How Logstash should send the email, either via SMTP or by invoking sendmail. + +[id="{version}-plugins-{type}s-{plugin}-template_file"] +===== `template_file` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +Path of a [Mustache templating](https://mustache.github.io/) file used for email templating. See example in test fixture. +Can be used with `body` to send multi-part emails. Takes precedence over `htmlBody`. + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/file-index.asciidoc b/docs/versioned-plugins/outputs/file-index.asciidoc new file mode 100644 index 000000000..b63d73c5b --- /dev/null +++ b/docs/versioned-plugins/outputs/file-index.asciidoc @@ -0,0 +1,22 @@ +:plugin: file +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-24 +| <> | 2017-11-23 +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-08-01 +| <> | 2017-06-23 +|======================================================================= + +include::file-v4.2.1.asciidoc[] +include::file-v4.2.0.asciidoc[] +include::file-v4.1.2.asciidoc[] +include::file-v4.1.1.asciidoc[] +include::file-v4.1.0.asciidoc[] +include::file-v4.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/file-v4.0.2.asciidoc b/docs/versioned-plugins/outputs/file-v4.0.2.asciidoc new file mode 100644 index 000000000..d81d33861 --- /dev/null +++ b/docs/versioned-plugins/outputs/file-v4.0.2.asciidoc @@ -0,0 +1,136 @@ +:plugin: file +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-file/blob/v4.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== File output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output writes events to files on disk. You can use fields +from the event as parts of the filename and/or path. + +By default, this output writes one event per line in **json** format. +You can customise the line format using the `line` codec like +[source,ruby] +output { + file { + path => ... + codec => line { format => "custom format: %{message}"} + } +} + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== File Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] +===== `create_if_deleted` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +If the configured file is deleted, but an event is handled by the plugin, +the plugin will recreate the file. Default => true + +[id="{version}-plugins-{type}s-{plugin}-dir_mode"] +===== `dir_mode` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `-1` + +Dir access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"dir_mode" => 0750` + +[id="{version}-plugins-{type}s-{plugin}-file_mode"] +===== `file_mode` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `-1` + +File access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"file_mode" => 0640` + +[id="{version}-plugins-{type}s-{plugin}-filename_failure"] +===== `filename_failure` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"_filepath_failures"` + +If the generated path is invalid, the events will be saved +into this file and inside the defined path. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval"] +===== `flush_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Flush interval (in seconds) for flushing writes to log files. +0 will flush on every message. + +[id="{version}-plugins-{type}s-{plugin}-gzip"] +===== `gzip` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Gzip the output stream before writing to disk. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The path to the file to write. Event fields can be used here, +like `/var/log/logstash/%{host}/%{application}` +One may also utilize the path option for date-based log +rotation via the joda time format. This will use the event +timestamp. +E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create +`./test-2013-05-29.txt` + +If you use an absolute path you cannot start with a dynamic string. +E.g: `/%{myfield}/`, `/test-%{myfield}/` are not valid paths + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/file-v4.1.0.asciidoc b/docs/versioned-plugins/outputs/file-v4.1.0.asciidoc new file mode 100644 index 000000000..286f07dea --- /dev/null +++ b/docs/versioned-plugins/outputs/file-v4.1.0.asciidoc @@ -0,0 +1,136 @@ +:plugin: file +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.1.0 +:release_date: 2017-08-01 +:changelog_url: https://github.com/logstash-plugins/logstash-output-file/blob/v4.1.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== File output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output writes events to files on disk. You can use fields +from the event as parts of the filename and/or path. + +By default, this output writes one event per line in **json** format. +You can customise the line format using the `line` codec like +[source,ruby] +output { + file { + path => ... + codec => line { format => "custom format: %{message}"} + } +} + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== File Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] +===== `create_if_deleted` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +If the configured file is deleted, but an event is handled by the plugin, +the plugin will recreate the file. Default => true + +[id="{version}-plugins-{type}s-{plugin}-dir_mode"] +===== `dir_mode` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `-1` + +Dir access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"dir_mode" => 0750` + +[id="{version}-plugins-{type}s-{plugin}-file_mode"] +===== `file_mode` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `-1` + +File access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"file_mode" => 0640` + +[id="{version}-plugins-{type}s-{plugin}-filename_failure"] +===== `filename_failure` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"_filepath_failures"` + +If the generated path is invalid, the events will be saved +into this file and inside the defined path. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval"] +===== `flush_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Flush interval (in seconds) for flushing writes to log files. +0 will flush on every message. + +[id="{version}-plugins-{type}s-{plugin}-gzip"] +===== `gzip` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Gzip the output stream before writing to disk. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The path to the file to write. Event fields can be used here, +like `/var/log/logstash/%{host}/%{application}` +One may also utilize the path option for date-based log +rotation via the joda time format. This will use the event +timestamp. +E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create +`./test-2013-05-29.txt` + +If you use an absolute path you cannot start with a dynamic string. +E.g: `/%{myfield}/`, `/test-%{myfield}/` are not valid paths + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/file-v4.1.1.asciidoc b/docs/versioned-plugins/outputs/file-v4.1.1.asciidoc new file mode 100644 index 000000000..6244718ab --- /dev/null +++ b/docs/versioned-plugins/outputs/file-v4.1.1.asciidoc @@ -0,0 +1,136 @@ +:plugin: file +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.1.1 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-file/blob/v4.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== File output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output writes events to files on disk. You can use fields +from the event as parts of the filename and/or path. + +By default, this output writes one event per line in **json** format. +You can customise the line format using the `line` codec like +[source,ruby] +output { + file { + path => ... + codec => line { format => "custom format: %{message}"} + } +} + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== File Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] +===== `create_if_deleted` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +If the configured file is deleted, but an event is handled by the plugin, +the plugin will recreate the file. Default => true + +[id="{version}-plugins-{type}s-{plugin}-dir_mode"] +===== `dir_mode` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `-1` + +Dir access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"dir_mode" => 0750` + +[id="{version}-plugins-{type}s-{plugin}-file_mode"] +===== `file_mode` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `-1` + +File access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"file_mode" => 0640` + +[id="{version}-plugins-{type}s-{plugin}-filename_failure"] +===== `filename_failure` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"_filepath_failures"` + +If the generated path is invalid, the events will be saved +into this file and inside the defined path. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval"] +===== `flush_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Flush interval (in seconds) for flushing writes to log files. +0 will flush on every message. + +[id="{version}-plugins-{type}s-{plugin}-gzip"] +===== `gzip` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Gzip the output stream before writing to disk. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The path to the file to write. Event fields can be used here, +like `/var/log/logstash/%{host}/%{application}` +One may also utilize the path option for date-based log +rotation via the joda time format. This will use the event +timestamp. +E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create +`./test-2013-05-29.txt` + +If you use an absolute path you cannot start with a dynamic string. +E.g: `/%{myfield}/`, `/test-%{myfield}/` are not valid paths + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/file-v4.1.2.asciidoc b/docs/versioned-plugins/outputs/file-v4.1.2.asciidoc new file mode 100644 index 000000000..a56ecad98 --- /dev/null +++ b/docs/versioned-plugins/outputs/file-v4.1.2.asciidoc @@ -0,0 +1,136 @@ +:plugin: file +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.1.2 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-file/blob/v4.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== File output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output writes events to files on disk. You can use fields +from the event as parts of the filename and/or path. + +By default, this output writes one event per line in **json** format. +You can customise the line format using the `line` codec like +[source,ruby] +output { + file { + path => ... + codec => line { format => "custom format: %{message}"} + } +} + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== File Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] +===== `create_if_deleted` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +If the configured file is deleted, but an event is handled by the plugin, +the plugin will recreate the file. Default => true + +[id="{version}-plugins-{type}s-{plugin}-dir_mode"] +===== `dir_mode` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `-1` + +Dir access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"dir_mode" => 0750` + +[id="{version}-plugins-{type}s-{plugin}-file_mode"] +===== `file_mode` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `-1` + +File access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"file_mode" => 0640` + +[id="{version}-plugins-{type}s-{plugin}-filename_failure"] +===== `filename_failure` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"_filepath_failures"` + +If the generated path is invalid, the events will be saved +into this file and inside the defined path. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval"] +===== `flush_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Flush interval (in seconds) for flushing writes to log files. +0 will flush on every message. + +[id="{version}-plugins-{type}s-{plugin}-gzip"] +===== `gzip` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Gzip the output stream before writing to disk. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The path to the file to write. Event fields can be used here, +like `/var/log/logstash/%{host}/%{application}` +One may also utilize the path option for date-based log +rotation via the joda time format. This will use the event +timestamp. +E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create +`./test-2013-05-29.txt` + +If you use an absolute path you cannot start with a dynamic string. +E.g: `/%{myfield}/`, `/test-%{myfield}/` are not valid paths + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/file-v4.2.0.asciidoc b/docs/versioned-plugins/outputs/file-v4.2.0.asciidoc new file mode 100644 index 000000000..578cecaf0 --- /dev/null +++ b/docs/versioned-plugins/outputs/file-v4.2.0.asciidoc @@ -0,0 +1,136 @@ +:plugin: file +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.2.0 +:release_date: 2017-11-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-file/blob/v4.2.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== File output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output writes events to files on disk. You can use fields +from the event as parts of the filename and/or path. + +By default, this output writes one event per line in **json** format. +You can customise the line format using the `line` codec like +[source,ruby] +output { + file { + path => ... + codec => line { format => "custom format: %{message}"} + } +} + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== File Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] +===== `create_if_deleted` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +If the configured file is deleted, but an event is handled by the plugin, +the plugin will recreate the file. Default => true + +[id="{version}-plugins-{type}s-{plugin}-dir_mode"] +===== `dir_mode` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `-1` + +Dir access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"dir_mode" => 0750` + +[id="{version}-plugins-{type}s-{plugin}-file_mode"] +===== `file_mode` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `-1` + +File access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"file_mode" => 0640` + +[id="{version}-plugins-{type}s-{plugin}-filename_failure"] +===== `filename_failure` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"_filepath_failures"` + +If the generated path is invalid, the events will be saved +into this file and inside the defined path. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval"] +===== `flush_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Flush interval (in seconds) for flushing writes to log files. +0 will flush on every message. + +[id="{version}-plugins-{type}s-{plugin}-gzip"] +===== `gzip` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Gzip the output stream before writing to disk. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The path to the file to write. Event fields can be used here, +like `/var/log/logstash/%{host}/%{application}` +One may also utilize the path option for date-based log +rotation via the joda time format. This will use the event +timestamp. +E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create +`./test-2013-05-29.txt` + +If you use an absolute path you cannot start with a dynamic string. +E.g: `/%{myfield}/`, `/test-%{myfield}/` are not valid paths + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/file-v4.2.1.asciidoc b/docs/versioned-plugins/outputs/file-v4.2.1.asciidoc new file mode 100644 index 000000000..1da9c8f14 --- /dev/null +++ b/docs/versioned-plugins/outputs/file-v4.2.1.asciidoc @@ -0,0 +1,136 @@ +:plugin: file +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.2.1 +:release_date: 2017-11-24 +:changelog_url: https://github.com/logstash-plugins/logstash-output-file/blob/v4.2.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== File output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output writes events to files on disk. You can use fields +from the event as parts of the filename and/or path. + +By default, this output writes one event per line in **json** format. +You can customise the line format using the `line` codec like +[source,ruby] +output { + file { + path => ... + codec => line { format => "custom format: %{message}"} + } +} + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== File Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] +===== `create_if_deleted` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +If the configured file is deleted, but an event is handled by the plugin, +the plugin will recreate the file. Default => true + +[id="{version}-plugins-{type}s-{plugin}-dir_mode"] +===== `dir_mode` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `-1` + +Dir access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"dir_mode" => 0750` + +[id="{version}-plugins-{type}s-{plugin}-file_mode"] +===== `file_mode` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `-1` + +File access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"file_mode" => 0640` + +[id="{version}-plugins-{type}s-{plugin}-filename_failure"] +===== `filename_failure` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"_filepath_failures"` + +If the generated path is invalid, the events will be saved +into this file and inside the defined path. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval"] +===== `flush_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Flush interval (in seconds) for flushing writes to log files. +0 will flush on every message. + +[id="{version}-plugins-{type}s-{plugin}-gzip"] +===== `gzip` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Gzip the output stream before writing to disk. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The path to the file to write. Event fields can be used here, +like `/var/log/logstash/%{host}/%{application}` +One may also utilize the path option for date-based log +rotation via the joda time format. This will use the event +timestamp. +E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create +`./test-2013-05-29.txt` + +If you use an absolute path you cannot start with a dynamic string. +E.g: `/%{myfield}/`, `/test-%{myfield}/` are not valid paths + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/graphite-index.asciidoc b/docs/versioned-plugins/outputs/graphite-index.asciidoc new file mode 100644 index 000000000..a9048c6d4 --- /dev/null +++ b/docs/versioned-plugins/outputs/graphite-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: graphite +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::graphite-v3.1.4.asciidoc[] +include::graphite-v3.1.3.asciidoc[] +include::graphite-v3.1.2.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/graphite-v3.1.2.asciidoc b/docs/versioned-plugins/outputs/graphite-v3.1.2.asciidoc new file mode 100644 index 000000000..bce20b8e2 --- /dev/null +++ b/docs/versioned-plugins/outputs/graphite-v3.1.2.asciidoc @@ -0,0 +1,173 @@ +:plugin: graphite +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-graphite/blob/v3.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Graphite output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you to pull metrics from your logs and ship them to +Graphite. Graphite is an open source tool for storing and graphing metrics. + +An example use case: Some applications emit aggregated stats in the logs +every 10 seconds. Using the grok filter and this output, it is possible to +capture the metric values from the logs and emit them to Graphite. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Graphite Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-exclude_metrics>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-fields_are_metrics>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_metrics>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-metrics_format>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-nested_object_separator>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-resend_on_failure>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timestamp_field>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-exclude_metrics"] +===== `exclude_metrics` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `["%{[^}]+}"]` + +Exclude regex matched metric names, by default exclude unresolved %{field} strings. + +[id="{version}-plugins-{type}s-{plugin}-fields_are_metrics"] +===== `fields_are_metrics` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +An array indicating that these event fields should be treated as metrics +and will be sent verbatim to Graphite. You may use either `fields_are_metrics` +or `metrics`, but not both. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"localhost"` + +The hostname or IP address of the Graphite server. + +[id="{version}-plugins-{type}s-{plugin}-include_metrics"] +===== `include_metrics` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[".*"]` + +Include only regex matched metric names. + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +The metric(s) to use. This supports dynamic strings like %{host} +for metric names and also for values. This is a hash field with key +being the metric name, value being the metric value. Example: +[source,ruby] + metrics => { "%{host}/uptime" => "%{uptime_1m}" } + +The value will be coerced to a floating point value. Values which cannot be +coerced will be set to zero (0). You may use either `metrics` or `fields_are_metrics`, +but not both. + +[id="{version}-plugins-{type}s-{plugin}-metrics_format"] +===== `metrics_format` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"*"` + +Defines the format of the metric string. The placeholder '*' will be +replaced with the name of the actual metric. +[source,ruby] + metrics_format => "foo.bar.*.sum" + +NOTE: If no metrics_format is defined, the name of the metric will be used as fallback. + +[id="{version}-plugins-{type}s-{plugin}-nested_object_separator"] +===== `nested_object_separator` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"."` + +When hashes are passed in as values they are broken out into a dotted notation +For instance if you configure this plugin with +# [source,ruby] + metrics => "mymetrics" + +and "mymetrics" is a nested hash of '{a => 1, b => { c => 2 }}' +this plugin will generate two metrics: a => 1, and b.c => 2 . +If you've specified a 'metrics_format' it will respect that, +but you still may want control over the separator within these nested key names. +This config setting changes the separator from the '.' default. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2003` + +The port to connect to on the Graphite server. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Interval between reconnect attempts to Carbon. + +[id="{version}-plugins-{type}s-{plugin}-resend_on_failure"] +===== `resend_on_failure` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Should metrics be resent on failure? + +[id="{version}-plugins-{type}s-{plugin}-timestamp_field"] +===== `timestamp_field` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"@timestamp"` + +Use this field for the timestamp instead of '@timestamp' which is the +default. Useful when backfilling or just getting more accurate data into +graphite since you probably have a cache layer infront of Logstash. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/graphite-v3.1.3.asciidoc b/docs/versioned-plugins/outputs/graphite-v3.1.3.asciidoc new file mode 100644 index 000000000..cc5d3c33f --- /dev/null +++ b/docs/versioned-plugins/outputs/graphite-v3.1.3.asciidoc @@ -0,0 +1,173 @@ +:plugin: graphite +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-graphite/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Graphite output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you to pull metrics from your logs and ship them to +Graphite. Graphite is an open source tool for storing and graphing metrics. + +An example use case: Some applications emit aggregated stats in the logs +every 10 seconds. Using the grok filter and this output, it is possible to +capture the metric values from the logs and emit them to Graphite. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Graphite Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-exclude_metrics>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-fields_are_metrics>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_metrics>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-metrics_format>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-nested_object_separator>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-resend_on_failure>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timestamp_field>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-exclude_metrics"] +===== `exclude_metrics` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `["%{[^}]+}"]` + +Exclude regex matched metric names, by default exclude unresolved %{field} strings. + +[id="{version}-plugins-{type}s-{plugin}-fields_are_metrics"] +===== `fields_are_metrics` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +An array indicating that these event fields should be treated as metrics +and will be sent verbatim to Graphite. You may use either `fields_are_metrics` +or `metrics`, but not both. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"localhost"` + +The hostname or IP address of the Graphite server. + +[id="{version}-plugins-{type}s-{plugin}-include_metrics"] +===== `include_metrics` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[".*"]` + +Include only regex matched metric names. + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +The metric(s) to use. This supports dynamic strings like %{host} +for metric names and also for values. This is a hash field with key +being the metric name, value being the metric value. Example: +[source,ruby] + metrics => { "%{host}/uptime" => "%{uptime_1m}" } + +The value will be coerced to a floating point value. Values which cannot be +coerced will be set to zero (0). You may use either `metrics` or `fields_are_metrics`, +but not both. + +[id="{version}-plugins-{type}s-{plugin}-metrics_format"] +===== `metrics_format` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"*"` + +Defines the format of the metric string. The placeholder '*' will be +replaced with the name of the actual metric. +[source,ruby] + metrics_format => "foo.bar.*.sum" + +NOTE: If no metrics_format is defined, the name of the metric will be used as fallback. + +[id="{version}-plugins-{type}s-{plugin}-nested_object_separator"] +===== `nested_object_separator` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"."` + +When hashes are passed in as values they are broken out into a dotted notation +For instance if you configure this plugin with +# [source,ruby] + metrics => "mymetrics" + +and "mymetrics" is a nested hash of '{a => 1, b => { c => 2 }}' +this plugin will generate two metrics: a => 1, and b.c => 2 . +If you've specified a 'metrics_format' it will respect that, +but you still may want control over the separator within these nested key names. +This config setting changes the separator from the '.' default. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2003` + +The port to connect to on the Graphite server. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Interval between reconnect attempts to Carbon. + +[id="{version}-plugins-{type}s-{plugin}-resend_on_failure"] +===== `resend_on_failure` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Should metrics be resent on failure? + +[id="{version}-plugins-{type}s-{plugin}-timestamp_field"] +===== `timestamp_field` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"@timestamp"` + +Use this field for the timestamp instead of '@timestamp' which is the +default. Useful when backfilling or just getting more accurate data into +graphite since you probably have a cache layer infront of Logstash. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/graphite-v3.1.4.asciidoc b/docs/versioned-plugins/outputs/graphite-v3.1.4.asciidoc new file mode 100644 index 000000000..972083f6b --- /dev/null +++ b/docs/versioned-plugins/outputs/graphite-v3.1.4.asciidoc @@ -0,0 +1,173 @@ +:plugin: graphite +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.4 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-graphite/blob/v3.1.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Graphite output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you to pull metrics from your logs and ship them to +Graphite. Graphite is an open source tool for storing and graphing metrics. + +An example use case: Some applications emit aggregated stats in the logs +every 10 seconds. Using the grok filter and this output, it is possible to +capture the metric values from the logs and emit them to Graphite. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Graphite Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-exclude_metrics>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-fields_are_metrics>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_metrics>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-metrics_format>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-nested_object_separator>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-resend_on_failure>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timestamp_field>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-exclude_metrics"] +===== `exclude_metrics` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `["%{[^}]+}"]` + +Exclude regex matched metric names, by default exclude unresolved %{field} strings. + +[id="{version}-plugins-{type}s-{plugin}-fields_are_metrics"] +===== `fields_are_metrics` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +An array indicating that these event fields should be treated as metrics +and will be sent verbatim to Graphite. You may use either `fields_are_metrics` +or `metrics`, but not both. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"localhost"` + +The hostname or IP address of the Graphite server. + +[id="{version}-plugins-{type}s-{plugin}-include_metrics"] +===== `include_metrics` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[".*"]` + +Include only regex matched metric names. + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +The metric(s) to use. This supports dynamic strings like %{host} +for metric names and also for values. This is a hash field with key +being the metric name, value being the metric value. Example: +[source,ruby] + metrics => { "%{host}/uptime" => "%{uptime_1m}" } + +The value will be coerced to a floating point value. Values which cannot be +coerced will be set to zero (0). You may use either `metrics` or `fields_are_metrics`, +but not both. + +[id="{version}-plugins-{type}s-{plugin}-metrics_format"] +===== `metrics_format` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"*"` + +Defines the format of the metric string. The placeholder '*' will be +replaced with the name of the actual metric. +[source,ruby] + metrics_format => "foo.bar.*.sum" + +NOTE: If no metrics_format is defined, the name of the metric will be used as fallback. + +[id="{version}-plugins-{type}s-{plugin}-nested_object_separator"] +===== `nested_object_separator` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"."` + +When hashes are passed in as values they are broken out into a dotted notation +For instance if you configure this plugin with +# [source,ruby] + metrics => "mymetrics" + +and "mymetrics" is a nested hash of '{a => 1, b => { c => 2 }}' +this plugin will generate two metrics: a => 1, and b.c => 2 . +If you've specified a 'metrics_format' it will respect that, +but you still may want control over the separator within these nested key names. +This config setting changes the separator from the '.' default. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2003` + +The port to connect to on the Graphite server. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Interval between reconnect attempts to Carbon. + +[id="{version}-plugins-{type}s-{plugin}-resend_on_failure"] +===== `resend_on_failure` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Should metrics be resent on failure? + +[id="{version}-plugins-{type}s-{plugin}-timestamp_field"] +===== `timestamp_field` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"@timestamp"` + +Use this field for the timestamp instead of '@timestamp' which is the +default. Useful when backfilling or just getting more accurate data into +graphite since you probably have a cache layer infront of Logstash. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file