From c59fbb3358d003640e80ba9bda52fca91063e4f2 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Tue, 21 May 2019 13:47:47 -0400 Subject: [PATCH 001/224] Reorganize Painless doc structure (#42303) --- docs/painless/index.asciidoc | 2 +- docs/painless/painless-contexts.asciidoc | 2 - .../painless/painless-contexts/index.asciidoc | 2 + ...ption.asciidoc => painless-guide.asciidoc} | 19 +++++--- docs/painless/painless-guide/index.asciidoc | 7 +++ .../painless-debugging.asciidoc | 0 .../painless-execute-script.asciidoc | 0 .../painless-method-dispatch.asciidoc | 30 ++++++++++++ .../painless-walkthrough.asciidoc} | 48 ++----------------- docs/painless/painless-lang-spec.asciidoc | 36 +------------- .../painless-lang-spec/index.asciidoc | 35 ++++++++++++++ .../painless-casting.asciidoc | 0 .../painless-comments.asciidoc | 0 .../painless-functions.asciidoc | 0 .../painless-identifiers.asciidoc | 0 .../painless-keywords.asciidoc | 0 .../painless-lambdas.asciidoc | 0 .../painless-literals.asciidoc | 0 .../painless-operators-array.asciidoc | 0 .../painless-operators-boolean.asciidoc | 0 .../painless-operators-general.asciidoc | 0 .../painless-operators-numeric.asciidoc | 0 .../painless-operators-reference.asciidoc | 0 .../painless-operators.asciidoc | 0 .../painless-regexes.asciidoc | 0 .../painless-scripts.asciidoc | 0 .../painless-statements.asciidoc | 0 .../painless-types.asciidoc | 0 .../painless-variables.asciidoc | 0 docs/painless/painless-xref.asciidoc | 2 - docs/reference/ingest/ingest-node.asciidoc | 2 +- .../modules/scripting/painless.asciidoc | 29 ++++++++++- 32 files changed, 120 insertions(+), 94 deletions(-) rename docs/painless/{painless-description.asciidoc => painless-guide.asciidoc} (56%) create mode 100644 docs/painless/painless-guide/index.asciidoc rename docs/painless/{ => painless-guide}/painless-debugging.asciidoc (100%) rename docs/painless/{ => painless-guide}/painless-execute-script.asciidoc (100%) create mode 100644 docs/painless/painless-guide/painless-method-dispatch.asciidoc rename docs/painless/{painless-getting-started.asciidoc => painless-guide/painless-walkthrough.asciidoc} (83%) create mode 100644 docs/painless/painless-lang-spec/index.asciidoc rename docs/painless/{ => painless-lang-spec}/painless-casting.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-comments.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-functions.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-identifiers.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-keywords.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-lambdas.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-literals.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-operators-array.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-operators-boolean.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-operators-general.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-operators-numeric.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-operators-reference.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-operators.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-regexes.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-scripts.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-statements.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-types.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-variables.asciidoc (100%) delete mode 100644 docs/painless/painless-xref.asciidoc diff --git a/docs/painless/index.asciidoc b/docs/painless/index.asciidoc index 92e0a33bf1347..c41899bbd98da 100644 --- a/docs/painless/index.asciidoc +++ b/docs/painless/index.asciidoc @@ -3,7 +3,7 @@ include::../Versions.asciidoc[] -include::painless-getting-started.asciidoc[] +include::painless-guide.asciidoc[] include::painless-lang-spec.asciidoc[] diff --git a/docs/painless/painless-contexts.asciidoc b/docs/painless/painless-contexts.asciidoc index 7c342a3da7a5a..ccc9e3ac4db24 100644 --- a/docs/painless/painless-contexts.asciidoc +++ b/docs/painless/painless-contexts.asciidoc @@ -54,6 +54,4 @@ specialized code may define new ways to use a Painless script. | {xpack-ref}/transform-script.html[Elasticsearch Documentation] |==== -include::painless-contexts/painless-context-examples.asciidoc[] - include::painless-contexts/index.asciidoc[] diff --git a/docs/painless/painless-contexts/index.asciidoc b/docs/painless/painless-contexts/index.asciidoc index 0c8c21c06a9be..11b4c9993374e 100644 --- a/docs/painless/painless-contexts/index.asciidoc +++ b/docs/painless/painless-contexts/index.asciidoc @@ -1,3 +1,5 @@ +include::painless-context-examples.asciidoc[] + include::painless-ingest-processor-context.asciidoc[] include::painless-update-context.asciidoc[] diff --git a/docs/painless/painless-description.asciidoc b/docs/painless/painless-guide.asciidoc similarity index 56% rename from docs/painless/painless-description.asciidoc rename to docs/painless/painless-guide.asciidoc index dfaf66ca26d4b..5e926498088ab 100644 --- a/docs/painless/painless-description.asciidoc +++ b/docs/painless/painless-guide.asciidoc @@ -1,11 +1,14 @@ +[[painless-guide]] +== Painless Guide + _Painless_ is a simple, secure scripting language designed specifically for use with Elasticsearch. It is the default scripting language for Elasticsearch and -can safely be used for inline and stored scripts. For a detailed description of -the Painless syntax and language features, see the -{painless}/painless-lang-spec.html[Painless Language Specification]. +can safely be used for inline and stored scripts. For a jump start into +Painless, see <>. For a +detailed description of the Painless syntax and language features, see the +<>. -[[painless-features]] -You can use Painless anywhere scripts can be used in Elasticsearch. Painless +You can use Painless anywhere scripts are used in Elasticsearch. Painless provides: * Fast performance: Painless scripts https://benchmarks.elastic.co/index.html#search_qps_scripts[ @@ -18,7 +21,9 @@ complete list of available classes and methods. * Optional typing: Variables and parameters can use explicit types or the dynamic `def` type. -* Syntax: Extends Java's syntax to provide http://groovy-lang.org/index.html[ -Groovy-style] scripting language features that make scripts easier to write. +* Syntax: Extends a subset of Java's syntax to provide additional scripting +language features. * Optimizations: Designed specifically for Elasticsearch scripting. + +include::painless-guide/index.asciidoc[] \ No newline at end of file diff --git a/docs/painless/painless-guide/index.asciidoc b/docs/painless/painless-guide/index.asciidoc new file mode 100644 index 0000000000000..b45406a4e7273 --- /dev/null +++ b/docs/painless/painless-guide/index.asciidoc @@ -0,0 +1,7 @@ +include::painless-walkthrough.asciidoc[] + +include::painless-method-dispatch.asciidoc[] + +include::painless-debugging.asciidoc[] + +include::painless-execute-script.asciidoc[] diff --git a/docs/painless/painless-debugging.asciidoc b/docs/painless/painless-guide/painless-debugging.asciidoc similarity index 100% rename from docs/painless/painless-debugging.asciidoc rename to docs/painless/painless-guide/painless-debugging.asciidoc diff --git a/docs/painless/painless-execute-script.asciidoc b/docs/painless/painless-guide/painless-execute-script.asciidoc similarity index 100% rename from docs/painless/painless-execute-script.asciidoc rename to docs/painless/painless-guide/painless-execute-script.asciidoc diff --git a/docs/painless/painless-guide/painless-method-dispatch.asciidoc b/docs/painless/painless-guide/painless-method-dispatch.asciidoc new file mode 100644 index 0000000000000..0f7d0423174b5 --- /dev/null +++ b/docs/painless/painless-guide/painless-method-dispatch.asciidoc @@ -0,0 +1,30 @@ +[[modules-scripting-painless-dispatch]] +=== How painless dispatches functions + +Painless uses receiver, name, and https://en.wikipedia.org/wiki/Arity[arity] +for method dispatch. For example, `s.foo(a, b)` is resolved by first getting +the class of `s` and then looking up the method `foo` with two parameters. This +is different from Groovy which uses the +https://en.wikipedia.org/wiki/Multiple_dispatch[runtime types] of the +parameters and Java which uses the compile time types of the parameters. + +The consequence of this that Painless doesn't support overloaded methods like +Java, leading to some trouble when it whitelists classes from the Java +standard library. For example, in Java and Groovy, `Matcher` has two methods: +`group(int)` and `group(String)`. Painless can't whitelist both of these methods +because they have the same name and the same number of parameters. So instead it +has `group(int)` and `namedGroup(String)`. + +We have a few justifications for this different way of dispatching methods: + +1. It makes operating on `def` types simpler and, presumably, faster. Using +receiver, name, and arity means that when Painless sees a call on a `def` object it +can dispatch the appropriate method without having to do expensive comparisons +of the types of the parameters. The same is true for invocations with `def` +typed parameters. +2. It keeps things consistent. It would be genuinely weird for Painless to +behave like Groovy if any `def` typed parameters were involved and Java +otherwise. It'd be slow for it to behave like Groovy all the time. +3. It keeps Painless maintainable. Adding the Java or Groovy like method +dispatch *feels* like it'd add a ton of complexity which'd make maintenance and +other improvements much more difficult. diff --git a/docs/painless/painless-getting-started.asciidoc b/docs/painless/painless-guide/painless-walkthrough.asciidoc similarity index 83% rename from docs/painless/painless-getting-started.asciidoc rename to docs/painless/painless-guide/painless-walkthrough.asciidoc index f562033471e31..70089a08726d2 100644 --- a/docs/painless/painless-getting-started.asciidoc +++ b/docs/painless/painless-guide/painless-walkthrough.asciidoc @@ -1,10 +1,5 @@ -[[painless-getting-started]] -== Getting Started with Painless - -include::painless-description.asciidoc[] - -[[painless-examples]] -=== Painless Examples +[[painless-walkthrough]] +=== A Brief Painless Walkthrough To illustrate how Painless works, let's load some hockey stats into an Elasticsearch index: @@ -121,7 +116,7 @@ GET hockey/_search [float] -===== Missing values +==== Missing values `doc['field'].value` throws an exception if the field is missing in a document. @@ -198,7 +193,7 @@ POST hockey/_update/1 ==== Dates Date fields are exposed as -`ReadableDateTime`, so they support methods like `getYear`, `getDayOfWeek` +`ZonedDateTime`, so they support methods like `getYear`, `getDayOfWeek` or e.g. getting milliseconds since epoch with `getMillis`. To use these in a script, leave out the `get` prefix and continue with lowercasing the rest of the method name. For example, the following returns every hockey @@ -365,38 +360,3 @@ Note: all of the `_update_by_query` examples above could really do with a {ref}/query-dsl-script-query.html[script query] it wouldn't be as efficient as using any other query because script queries aren't able to use the inverted index to limit the documents that they have to check. - -[[modules-scripting-painless-dispatch]] -=== How painless dispatches functions - -Painless uses receiver, name, and https://en.wikipedia.org/wiki/Arity[arity] -for method dispatch. For example, `s.foo(a, b)` is resolved by first getting -the class of `s` and then looking up the method `foo` with two parameters. This -is different from Groovy which uses the -https://en.wikipedia.org/wiki/Multiple_dispatch[runtime types] of the -parameters and Java which uses the compile time types of the parameters. - -The consequence of this that Painless doesn't support overloaded methods like -Java, leading to some trouble when it whitelists classes from the Java -standard library. For example, in Java and Groovy, `Matcher` has two methods: -`group(int)` and `group(String)`. Painless can't whitelist both of these methods -because they have the same name and the same number of parameters. So instead it -has `group(int)` and `namedGroup(String)`. - -We have a few justifications for this different way of dispatching methods: - -1. It makes operating on `def` types simpler and, presumably, faster. Using -receiver, name, and arity means that when Painless sees a call on a `def` object it -can dispatch the appropriate method without having to do expensive comparisons -of the types of the parameters. The same is true for invocations with `def` -typed parameters. -2. It keeps things consistent. It would be genuinely weird for Painless to -behave like Groovy if any `def` typed parameters were involved and Java -otherwise. It'd be slow for it to behave like Groovy all the time. -3. It keeps Painless maintainable. Adding the Java or Groovy like method -dispatch *feels* like it'd add a ton of complexity which'd make maintenance and -other improvements much more difficult. - -include::painless-debugging.asciidoc[] - -include::painless-execute-script.asciidoc[] diff --git a/docs/painless/painless-lang-spec.asciidoc b/docs/painless/painless-lang-spec.asciidoc index d50f3db2dc0d3..2f108c73732eb 100644 --- a/docs/painless/painless-lang-spec.asciidoc +++ b/docs/painless/painless-lang-spec.asciidoc @@ -17,38 +17,4 @@ into Java Virtual Machine (JVM) byte code and executed against a standard JVM. This specification uses ANTLR4 grammar notation to describe the allowed syntax. However, the actual Painless grammar is more compact than what is shown here. -include::painless-comments.asciidoc[] - -include::painless-keywords.asciidoc[] - -include::painless-literals.asciidoc[] - -include::painless-identifiers.asciidoc[] - -include::painless-variables.asciidoc[] - -include::painless-types.asciidoc[] - -include::painless-casting.asciidoc[] - -include::painless-operators.asciidoc[] - -include::painless-operators-general.asciidoc[] - -include::painless-operators-numeric.asciidoc[] - -include::painless-operators-boolean.asciidoc[] - -include::painless-operators-reference.asciidoc[] - -include::painless-operators-array.asciidoc[] - -include::painless-statements.asciidoc[] - -include::painless-scripts.asciidoc[] - -include::painless-functions.asciidoc[] - -include::painless-lambdas.asciidoc[] - -include::painless-regexes.asciidoc[] +include::painless-lang-spec/index.asciidoc[] \ No newline at end of file diff --git a/docs/painless/painless-lang-spec/index.asciidoc b/docs/painless/painless-lang-spec/index.asciidoc new file mode 100644 index 0000000000000..e75264ff3e4e1 --- /dev/null +++ b/docs/painless/painless-lang-spec/index.asciidoc @@ -0,0 +1,35 @@ +include::painless-comments.asciidoc[] + +include::painless-keywords.asciidoc[] + +include::painless-literals.asciidoc[] + +include::painless-identifiers.asciidoc[] + +include::painless-variables.asciidoc[] + +include::painless-types.asciidoc[] + +include::painless-casting.asciidoc[] + +include::painless-operators.asciidoc[] + +include::painless-operators-general.asciidoc[] + +include::painless-operators-numeric.asciidoc[] + +include::painless-operators-boolean.asciidoc[] + +include::painless-operators-reference.asciidoc[] + +include::painless-operators-array.asciidoc[] + +include::painless-statements.asciidoc[] + +include::painless-scripts.asciidoc[] + +include::painless-functions.asciidoc[] + +include::painless-lambdas.asciidoc[] + +include::painless-regexes.asciidoc[] diff --git a/docs/painless/painless-casting.asciidoc b/docs/painless/painless-lang-spec/painless-casting.asciidoc similarity index 100% rename from docs/painless/painless-casting.asciidoc rename to docs/painless/painless-lang-spec/painless-casting.asciidoc diff --git a/docs/painless/painless-comments.asciidoc b/docs/painless/painless-lang-spec/painless-comments.asciidoc similarity index 100% rename from docs/painless/painless-comments.asciidoc rename to docs/painless/painless-lang-spec/painless-comments.asciidoc diff --git a/docs/painless/painless-functions.asciidoc b/docs/painless/painless-lang-spec/painless-functions.asciidoc similarity index 100% rename from docs/painless/painless-functions.asciidoc rename to docs/painless/painless-lang-spec/painless-functions.asciidoc diff --git a/docs/painless/painless-identifiers.asciidoc b/docs/painless/painless-lang-spec/painless-identifiers.asciidoc similarity index 100% rename from docs/painless/painless-identifiers.asciidoc rename to docs/painless/painless-lang-spec/painless-identifiers.asciidoc diff --git a/docs/painless/painless-keywords.asciidoc b/docs/painless/painless-lang-spec/painless-keywords.asciidoc similarity index 100% rename from docs/painless/painless-keywords.asciidoc rename to docs/painless/painless-lang-spec/painless-keywords.asciidoc diff --git a/docs/painless/painless-lambdas.asciidoc b/docs/painless/painless-lang-spec/painless-lambdas.asciidoc similarity index 100% rename from docs/painless/painless-lambdas.asciidoc rename to docs/painless/painless-lang-spec/painless-lambdas.asciidoc diff --git a/docs/painless/painless-literals.asciidoc b/docs/painless/painless-lang-spec/painless-literals.asciidoc similarity index 100% rename from docs/painless/painless-literals.asciidoc rename to docs/painless/painless-lang-spec/painless-literals.asciidoc diff --git a/docs/painless/painless-operators-array.asciidoc b/docs/painless/painless-lang-spec/painless-operators-array.asciidoc similarity index 100% rename from docs/painless/painless-operators-array.asciidoc rename to docs/painless/painless-lang-spec/painless-operators-array.asciidoc diff --git a/docs/painless/painless-operators-boolean.asciidoc b/docs/painless/painless-lang-spec/painless-operators-boolean.asciidoc similarity index 100% rename from docs/painless/painless-operators-boolean.asciidoc rename to docs/painless/painless-lang-spec/painless-operators-boolean.asciidoc diff --git a/docs/painless/painless-operators-general.asciidoc b/docs/painless/painless-lang-spec/painless-operators-general.asciidoc similarity index 100% rename from docs/painless/painless-operators-general.asciidoc rename to docs/painless/painless-lang-spec/painless-operators-general.asciidoc diff --git a/docs/painless/painless-operators-numeric.asciidoc b/docs/painless/painless-lang-spec/painless-operators-numeric.asciidoc similarity index 100% rename from docs/painless/painless-operators-numeric.asciidoc rename to docs/painless/painless-lang-spec/painless-operators-numeric.asciidoc diff --git a/docs/painless/painless-operators-reference.asciidoc b/docs/painless/painless-lang-spec/painless-operators-reference.asciidoc similarity index 100% rename from docs/painless/painless-operators-reference.asciidoc rename to docs/painless/painless-lang-spec/painless-operators-reference.asciidoc diff --git a/docs/painless/painless-operators.asciidoc b/docs/painless/painless-lang-spec/painless-operators.asciidoc similarity index 100% rename from docs/painless/painless-operators.asciidoc rename to docs/painless/painless-lang-spec/painless-operators.asciidoc diff --git a/docs/painless/painless-regexes.asciidoc b/docs/painless/painless-lang-spec/painless-regexes.asciidoc similarity index 100% rename from docs/painless/painless-regexes.asciidoc rename to docs/painless/painless-lang-spec/painless-regexes.asciidoc diff --git a/docs/painless/painless-scripts.asciidoc b/docs/painless/painless-lang-spec/painless-scripts.asciidoc similarity index 100% rename from docs/painless/painless-scripts.asciidoc rename to docs/painless/painless-lang-spec/painless-scripts.asciidoc diff --git a/docs/painless/painless-statements.asciidoc b/docs/painless/painless-lang-spec/painless-statements.asciidoc similarity index 100% rename from docs/painless/painless-statements.asciidoc rename to docs/painless/painless-lang-spec/painless-statements.asciidoc diff --git a/docs/painless/painless-types.asciidoc b/docs/painless/painless-lang-spec/painless-types.asciidoc similarity index 100% rename from docs/painless/painless-types.asciidoc rename to docs/painless/painless-lang-spec/painless-types.asciidoc diff --git a/docs/painless/painless-variables.asciidoc b/docs/painless/painless-lang-spec/painless-variables.asciidoc similarity index 100% rename from docs/painless/painless-variables.asciidoc rename to docs/painless/painless-lang-spec/painless-variables.asciidoc diff --git a/docs/painless/painless-xref.asciidoc b/docs/painless/painless-xref.asciidoc deleted file mode 100644 index 86407b3e697d6..0000000000000 --- a/docs/painless/painless-xref.asciidoc +++ /dev/null @@ -1,2 +0,0 @@ -Ready to start scripting with Painless? See {painless}/painless-getting-started.html[Getting Started with Painless] in the guide to the -{painless}/painless.html[Painless Scripting Language]. \ No newline at end of file diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 1f8abc5675db9..b1a92222bec59 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -563,7 +563,7 @@ template for all indexes that hold data that needs pre-index processing. [[conditionals-with-regex]] === Conditionals with the Regular Expressions The `if` conditional is implemented as a Painless script, which requires -{painless}//painless-examples.html#modules-scripting-painless-regex[explicit support for regular expressions]. +{painless}//painless-regexes.html[explicit support for regular expressions]. `script.painless.regex.enabled: true` must be set in `elasticsearch.yml` to use regular expressions in the `if` condition. diff --git a/docs/reference/modules/scripting/painless.asciidoc b/docs/reference/modules/scripting/painless.asciidoc index ac48aad73d28f..6dd9b50db51ed 100644 --- a/docs/reference/modules/scripting/painless.asciidoc +++ b/docs/reference/modules/scripting/painless.asciidoc @@ -1,7 +1,32 @@ [[modules-scripting-painless]] === Painless Scripting Language -include::../../../painless/painless-description.asciidoc[] +_Painless_ is a simple, secure scripting language designed specifically for use +with Elasticsearch. It is the default scripting language for Elasticsearch and +can safely be used for inline and stored scripts. To get started with +Painless, see the {painless}/painless-guide.html[Painless Guide]. For a +detailed description of the Painless syntax and language features, see the +{painless}/painless-lang-spec.html[Painless Language Specification]. -Ready to start scripting with Painless? See {painless}/painless-getting-started.html[Getting Started with Painless] in the guide to the +[[painless-features]] +You can use Painless anywhere scripts can be used in Elasticsearch. Painless +provides: + +* Fast performance: Painless scripts https://benchmarks.elastic.co/index.html#search_qps_scripts[ +run several times faster] than the alternatives. + +* Safety: Fine-grained whitelist with method call/field granularity. See the +{painless}/painless-api-reference.html[Painless API Reference] for a +complete list of available classes and methods. + +* Optional typing: Variables and parameters can use explicit types or the +dynamic `def` type. + +* Syntax: Extends a subset of Java's syntax to provide additional scripting +language features. + +* Optimizations: Designed specifically for Elasticsearch scripting. + +Ready to start scripting with Painless? See the +{painless}/painless-guide.html[Painless Guide] for the {painless}/index.html[Painless Scripting Language]. \ No newline at end of file From be412ca83f0d4644712a7baa8a6330b1e72b2858 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 21 May 2019 19:49:53 +0200 Subject: [PATCH 002/224] Remove Dead Code from Azure Repo Plugin (#42178) * None of this stuff is used --- .../repositories/azure/AzureBlobStore.java | 7 +----- .../repositories/azure/AzureRepository.java | 11 ++------- .../azure/AzureStorageService.java | 24 +++---------------- .../azure/AzureStorageSettings.java | 12 ++-------- .../repositories/azure/SocketAccess.java | 2 +- .../azure/AzureBlobStoreContainerTests.java | 15 ++++-------- .../azure/AzureBlobStoreTests.java | 16 ++++--------- .../azure/AzureStorageServiceMock.java | 14 ++--------- 8 files changed, 19 insertions(+), 82 deletions(-) diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 697125fbd537d..7eeadc7f6475b 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -45,8 +45,7 @@ public class AzureBlobStore implements BlobStore { private final String container; private final LocationMode locationMode; - public AzureBlobStore(RepositoryMetaData metadata, AzureStorageService service) - throws URISyntaxException, StorageException { + public AzureBlobStore(RepositoryMetaData metadata, AzureStorageService service) { this.container = Repository.CONTAINER_SETTING.get(metadata.settings()); this.clientName = Repository.CLIENT_NAME.get(metadata.settings()); this.service = service; @@ -69,10 +68,6 @@ public LocationMode getLocationMode() { return locationMode; } - public String getClientName() { - return clientName; - } - @Override public BlobContainer blobContainer(BlobPath path) { return new AzureBlobContainer(path, this); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 33ee9b64c2683..7c3520918fc58 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -112,20 +112,16 @@ public AzureRepository(RepositoryMetaData metadata, Environment environment, Nam } } - // only use for testing @Override protected BlobStore getBlobStore() { return super.getBlobStore(); } - /** - * {@inheritDoc} - */ @Override - protected AzureBlobStore createBlobStore() throws URISyntaxException, StorageException { + protected AzureBlobStore createBlobStore() { final AzureBlobStore blobStore = new AzureBlobStore(metadata, storageService); - logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + logger.debug(() -> new ParameterizedMessage( "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", blobStore, chunkSize, isCompress(), basePath)); return blobStore; @@ -136,9 +132,6 @@ protected BlobPath basePath() { return basePath; } - /** - * {@inheritDoc} - */ @Override protected ByteSizeValue chunkSize() { return chunkSize; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index 0d18592b8a7bb..89a78fd8045ee 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -97,7 +97,7 @@ public Tuple> client(String clientNa } } - protected CloudBlobClient buildClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { + private static CloudBlobClient buildClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { final CloudBlobClient client = createClient(azureStorageSettings); // Set timeout option if the user sets cloud.azure.storage.timeout or // cloud.azure.storage.xxx.timeout (it's negative by default) @@ -115,12 +115,12 @@ protected CloudBlobClient buildClient(AzureStorageSettings azureStorageSettings) return client; } - protected CloudBlobClient createClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { + private static CloudBlobClient createClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { final String connectionString = azureStorageSettings.buildConnectionString(); return CloudStorageAccount.parse(connectionString).createCloudBlobClient(); } - protected OperationContext buildOperationContext(AzureStorageSettings azureStorageSettings) { + private static OperationContext buildOperationContext(AzureStorageSettings azureStorageSettings) { final OperationContext context = new OperationContext(); context.setProxy(azureStorageSettings.getProxy()); return context; @@ -146,24 +146,6 @@ public boolean doesContainerExist(String account, String container) throws URISy return SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, client.v2().get())); } - public void deleteFiles(String account, String container, String path) throws URISyntaxException, StorageException { - final Tuple> client = client(account); - // container name must be lower case. - logger.trace(() -> new ParameterizedMessage("delete files container [{}], path [{}]", container, path)); - SocketAccess.doPrivilegedVoidException(() -> { - // list the blobs using a flat blob listing mode - final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); - for (final ListBlobItem blobItem : blobContainer.listBlobs(path, true, EnumSet.noneOf(BlobListingDetails.class), null, - client.v2().get())) { - final String blobName = blobNameFromUri(blobItem.getUri()); - logger.trace(() -> new ParameterizedMessage("removing blob [{}] full URI was [{}]", blobName, blobItem.getUri())); - // don't call {@code #deleteBlob}, use the same client - final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blobName); - azureBlob.delete(DeleteSnapshotsOption.NONE, null, null, client.v2().get()); - } - }); - } - /** * Extract the blob name from a URI like https://myservice.azure.net/container/path/to/myfile * It should remove the container part (first part of the path) and gives path/to/myfile diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java index 3f7a5df8f14b2..e57d855cb0ee5 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java @@ -129,14 +129,6 @@ private AzureStorageSettings(String account, String key, String endpointSuffix, this.locationMode = LocationMode.PRIMARY_ONLY; } - public String getKey() { - return key; - } - - public String getAccount() { - return account; - } - public String getEndpointSuffix() { return endpointSuffix; } @@ -207,7 +199,7 @@ public static Map load(Settings settings) { // pkg private for tests /** Parse settings for a single client. */ - static AzureStorageSettings getClientSettings(Settings settings, String clientName) { + private static AzureStorageSettings getClientSettings(Settings settings, String clientName) { try (SecureString account = getConfigValue(settings, clientName, ACCOUNT_SETTING); SecureString key = getConfigValue(settings, clientName, KEY_SETTING)) { return new AzureStorageSettings(account.toString(), key.toString(), @@ -226,7 +218,7 @@ private static T getConfigValue(Settings settings, String clientName, return concreteSetting.get(settings); } - public static T getValue(Settings settings, String groupName, Setting setting) { + private static T getValue(Settings settings, String groupName, Setting setting) { final Setting.AffixKey k = (Setting.AffixKey) setting.getRawKey(); final String fullKey = k.toConcreteKey(groupName).toString(); return setting.getConcreteSetting(fullKey).get(settings); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java index da8b85430067c..1400cc5b06627 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java @@ -48,7 +48,7 @@ public static T doPrivilegedIOException(PrivilegedExceptionAction operati } } - public static T doPrivilegedException(PrivilegedExceptionAction operation) throws StorageException, URISyntaxException { + public static T doPrivilegedException(PrivilegedExceptionAction operation) throws StorageException { SpecialPermission.check(); try { return AccessController.doPrivileged(operation); diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java index a06dd7c3f28b1..13cc487a1c122 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java @@ -19,24 +19,17 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.StorageException; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; -import java.io.IOException; -import java.net.URISyntaxException; public class AzureBlobStoreContainerTests extends ESBlobStoreContainerTestCase { @Override - protected BlobStore newBlobStore() throws IOException { - try { - RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); - AzureStorageServiceMock client = new AzureStorageServiceMock(); - return new AzureBlobStore(repositoryMetaData, client); - } catch (URISyntaxException | StorageException e) { - throw new IOException(e); - } + protected BlobStore newBlobStore() { + RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); + AzureStorageServiceMock client = new AzureStorageServiceMock(); + return new AzureBlobStore(repositoryMetaData, client); } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java index 9a0c9039d089c..67d30fda05b69 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java @@ -18,25 +18,17 @@ */ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.StorageException; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.ESBlobStoreTestCase; -import java.io.IOException; -import java.net.URISyntaxException; - public class AzureBlobStoreTests extends ESBlobStoreTestCase { @Override - protected BlobStore newBlobStore() throws IOException { - try { - RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); - AzureStorageServiceMock client = new AzureStorageServiceMock(); - return new AzureBlobStore(repositoryMetaData, client); - } catch (URISyntaxException | StorageException e) { - throw new IOException(e); - } + protected BlobStore newBlobStore() { + RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); + AzureStorageServiceMock client = new AzureStorageServiceMock(); + return new AzureBlobStore(repositoryMetaData, client); } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index 5f3072e1ad9ed..17502a1d1f982 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -33,7 +33,6 @@ import java.io.IOException; import java.io.InputStream; import java.net.SocketPermission; -import java.net.URISyntaxException; import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; import java.security.AccessController; @@ -61,21 +60,13 @@ public boolean doesContainerExist(String account, String container) { return true; } - @Override - public void deleteFiles(String account, String container, String path) throws URISyntaxException, StorageException { - final Map blobs = listBlobsByPrefix(account, container, path, null); - for (String key : blobs.keySet()) { - deleteBlob(account, container, key); - } - } - @Override public boolean blobExists(String account, String container, String blob) { return blobs.containsKey(blob); } @Override - public void deleteBlob(String account, String container, String blob) throws URISyntaxException, StorageException { + public void deleteBlob(String account, String container, String blob) throws StorageException { if (blobs.remove(blob) == null) { throw new StorageException("BlobNotFound", "[" + blob + "] does not exist.", 404, null, null); } @@ -109,8 +100,7 @@ public Map listBlobsByPrefix(String account, String contai @Override public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize, - boolean failIfAlreadyExists) - throws URISyntaxException, StorageException, FileAlreadyExistsException { + boolean failIfAlreadyExists) throws StorageException, FileAlreadyExistsException { if (failIfAlreadyExists && blobs.containsKey(blobName)) { throw new FileAlreadyExistsException(blobName); } From 4766ffa032ef0036e77c9d57165d36d8e0de9258 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 21 May 2019 20:37:45 +0200 Subject: [PATCH 003/224] Make unwrapCorrupt Check Suppressed Ex. (#41889) * Make unwrapCorrupt Check Suppressed Ex. * As discussed in #24800 we want to check for suppressed corruption indicating exceptions here as well to more reliably categorize corruption related exceptions * Closes #24800, 41201 --- .../org/elasticsearch/ExceptionsHelper.java | 36 +++++++++++++++++-- .../elasticsearch/ExceptionsHelperTests.java | 28 +++++++++++++++ .../recovery/RecoverySourceHandlerTests.java | 4 ++- 3 files changed, 64 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index e4269a375dd6c..48461ffe30d4b 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -175,12 +175,42 @@ public static T useOrSuppress(T first, T second) { return first; } + private static final List> CORRUPTION_EXCEPTIONS = + List.of(CorruptIndexException.class, IndexFormatTooOldException.class, IndexFormatTooNewException.class); + + /** + * Looks at the given Throwable's and its cause(s) as well as any suppressed exceptions on the Throwable as well as its causes + * and returns the first corruption indicating exception (as defined by {@link #CORRUPTION_EXCEPTIONS}) it finds. + * @param t Throwable + * @return Corruption indicating exception if one is found, otherwise {@code null} + */ public static IOException unwrapCorruption(Throwable t) { - return (IOException) unwrap(t, CorruptIndexException.class, - IndexFormatTooOldException.class, - IndexFormatTooNewException.class); + if (t != null) { + do { + for (Class clazz : CORRUPTION_EXCEPTIONS) { + if (clazz.isInstance(t)) { + return (IOException) t; + } + } + for (Throwable suppressed : t.getSuppressed()) { + IOException corruptionException = unwrapCorruption(suppressed); + if (corruptionException != null) { + return corruptionException; + } + } + } while ((t = t.getCause()) != null); + } + return null; } + /** + * Looks at the given Throwable and its cause(s) and returns the first Throwable that is of one of the given classes or {@code null} + * if no matching Throwable is found. Unlike {@link #unwrapCorruption} this method does only check the given Throwable and its causes + * but does not look at any suppressed exceptions. + * @param t Throwable + * @param clazzes Classes to look for + * @return Matching Throwable if one is found, otherwise {@code null} + */ public static Throwable unwrap(Throwable t, Class... clazzes) { if (t != null) { do { diff --git a/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java b/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java index 1d2a4ca6d5f75..2de2f259e6ff1 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java @@ -20,6 +20,7 @@ package org.elasticsearch; import org.apache.commons.codec.DecoderException; +import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.search.ShardSearchFailure; @@ -183,4 +184,31 @@ public void testGroupByNullIndex() { ShardOperationFailedException[] groupBy = ExceptionsHelper.groupBy(failures); assertThat(groupBy.length, equalTo(2)); } + + public void testUnwrapCorruption() { + final Throwable corruptIndexException = new CorruptIndexException("corrupt", "resource"); + assertThat(ExceptionsHelper.unwrapCorruption(corruptIndexException), equalTo(corruptIndexException)); + + final Throwable corruptionAsCause = new RuntimeException(corruptIndexException); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionAsCause), equalTo(corruptIndexException)); + + final Throwable corruptionSuppressed = new RuntimeException(); + corruptionSuppressed.addSuppressed(corruptIndexException); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionSuppressed), equalTo(corruptIndexException)); + + final Throwable corruptionSuppressedOnCause = new RuntimeException(new RuntimeException()); + corruptionSuppressedOnCause.getCause().addSuppressed(corruptIndexException); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionSuppressedOnCause), equalTo(corruptIndexException)); + + final Throwable corruptionCauseOnSuppressed = new RuntimeException(); + corruptionCauseOnSuppressed.addSuppressed(new RuntimeException(corruptIndexException)); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionCauseOnSuppressed), equalTo(corruptIndexException)); + + assertThat(ExceptionsHelper.unwrapCorruption(new RuntimeException()), nullValue()); + assertThat(ExceptionsHelper.unwrapCorruption(new RuntimeException(new RuntimeException())), nullValue()); + + final Throwable withSuppressedException = new RuntimeException(); + withSuppressedException.addSuppressed(new RuntimeException()); + assertThat(ExceptionsHelper.unwrapCorruption(withSuppressedException), nullValue()); + } } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index b63c7a2e0e8f6..b49bef57aceb1 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -438,10 +438,12 @@ protected void failEngine(IOException cause) { handler.sendFiles(store, metas.toArray(new StoreFileMetaData[0]), () -> 0); fail("exception index"); } catch (RuntimeException ex) { - assertNull(ExceptionsHelper.unwrapCorruption(ex)); + final IOException unwrappedCorruption = ExceptionsHelper.unwrapCorruption(ex); if (throwCorruptedIndexException) { + assertNotNull(unwrappedCorruption); assertEquals(ex.getMessage(), "[File corruption occurred on recovery but checksums are ok]"); } else { + assertNull(unwrappedCorruption); assertEquals(ex.getMessage(), "boom"); } } catch (CorruptIndexException ex) { From 07ab45a426912a78d3951e29169a47885c1f3246 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Tue, 21 May 2019 15:07:51 -0400 Subject: [PATCH 004/224] Mute transforms_stats yaml test AwaitsFix https://github.com/elastic/elasticsearch/issues/42309 --- .../rest-api-spec/test/data_frame/transforms_stats.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml index 93c942f0733a8..61117b138bff7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml @@ -42,6 +42,9 @@ teardown: --- "Test get transform stats": + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42309" - do: data_frame.get_data_frame_transform_stats: transform_id: "airline-transform-stats" From f5e54b495dba85b89b25387a673336ef4a73b653 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 21 May 2019 12:48:13 -0700 Subject: [PATCH 005/224] Deprecate support for chained multi-fields. (#41926) We now issue a deprecation warning if a multi-field definition contains a `[fields]` entry. This PR also simplifies the definition of `MultiFieldParserContext`. Addresses #41267. --- .../elasticsearch/index/mapper/Mapper.java | 8 ++-- .../index/mapper/TypeParsers.java | 14 ++++++- .../mapper/ExternalFieldMapperTests.java | 12 ++++++ .../index/mapper/TypeParsersTests.java | 37 +++++++++++++++++++ 4 files changed, 65 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java index d98630e5f765e..5de5394a94abe 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -136,10 +136,7 @@ public Supplier queryShardContextSupplier() { protected Function similarityLookupService() { return similarityLookupService; } public ParserContext createMultiFieldContext(ParserContext in) { - return new MultiFieldParserContext(in) { - @Override - public boolean isWithinMultiField() { return true; } - }; + return new MultiFieldParserContext(in); } static class MultiFieldParserContext extends ParserContext { @@ -147,6 +144,9 @@ static class MultiFieldParserContext extends ParserContext { super(in.type(), in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.queryShardContextSupplier()); } + + @Override + public boolean isWithinMultiField() { return true; } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java index 77d7be62fc1b9..9848a23cac11b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java @@ -19,8 +19,10 @@ package org.elasticsearch.index.mapper; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.index.IndexOptions; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.analysis.AnalysisMode; @@ -37,6 +39,7 @@ import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue; public class TypeParsers { + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(TypeParsers.class)); public static final String DOC_VALUES = "doc_values"; public static final String INDEX_OPTIONS_DOCS = "docs"; @@ -214,11 +217,18 @@ public static void parseField(FieldMapper.Builder builder, String name, Map multiFieldsPropNodes; + parserContext = parserContext.createMultiFieldContext(parserContext); + final Map multiFieldsPropNodes; if (propNode instanceof List && ((List) propNode).isEmpty()) { multiFieldsPropNodes = Collections.emptyMap(); } else if (propNode instanceof Map) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java index d80776007aba8..e5d3040f7a3bc 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java @@ -169,6 +169,12 @@ public void testExternalValuesWithMultifield() throws Exception { assertThat(raw, notNullValue()); assertThat(raw.binaryValue(), is(new BytesRef("foo"))); + + assertWarnings("At least one multi-field, [field], was " + + "encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated and will " + + "no longer be supported in 8.0. To resolve the issue, all instances of [fields] that occur within a [fields] block " + + "should be removed from the mappings, either by flattening the chained [fields] blocks into a single level, or " + + "switching to [copy_to] if appropriate."); } public void testExternalValuesWithMultifieldTwoLevels() throws Exception { @@ -234,5 +240,11 @@ public void testExternalValuesWithMultifieldTwoLevels() throws Exception { assertThat(doc.rootDoc().getField("field.raw"), notNullValue()); assertThat(doc.rootDoc().getField("field.raw").stringValue(), is("foo")); + + assertWarnings("At least one multi-field, [field], was " + + "encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated and will " + + "no longer be supported in 8.0. To resolve the issue, all instances of [fields] that occur within a [fields] block " + + "should be removed from the mappings, either by flattening the chained [fields] blocks into a single level, or " + + "switching to [copy_to] if appropriate."); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java index bc59c59aa54ab..70f469b96370c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java @@ -24,7 +24,11 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; import org.elasticsearch.index.analysis.AnalysisMode; @@ -36,6 +40,7 @@ import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -157,6 +162,38 @@ public void testParseTextFieldCheckAnalyzerWithSearchAnalyzerAnalysisMode() { TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext); } + public void testMultiFieldWithinMultiField() throws IOException { + TextFieldMapper.Builder builder = new TextFieldMapper.Builder("textField"); + + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject() + .field("type", "keyword") + .startObject("fields") + .startObject("sub-field") + .field("type", "keyword") + .startObject("fields") + .startObject("sub-sub-field") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject(); + + Map fieldNode = XContentHelper.convertToMap( + BytesReference.bytes(mapping), true, mapping.contentType()).v2(); + + Mapper.TypeParser typeParser = new KeywordFieldMapper.TypeParser(); + Mapper.TypeParser.ParserContext parserContext = new Mapper.TypeParser.ParserContext("type", + null, null, type -> typeParser, Version.CURRENT, null); + + TypeParsers.parseField(builder, "some-field", fieldNode, parserContext); + assertWarnings("At least one multi-field, [sub-field], was " + + "encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated and will " + + "no longer be supported in 8.0. To resolve the issue, all instances of [fields] that occur within a [fields] block " + + "should be removed from the mappings, either by flattening the chained [fields] blocks into a single level, or " + + "switching to [copy_to] if appropriate."); + } + private Analyzer createAnalyzerWithMode(String name, AnalysisMode mode) { TokenFilterFactory tokenFilter = new AbstractTokenFilterFactory(indexSettings, name, Settings.EMPTY) { @Override From 1dcaf4f1f8d98a6cd20a531a56ed9dd3a4588747 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Tue, 21 May 2019 15:45:34 -0400 Subject: [PATCH 006/224] Mute another transforms_stats yaml test AwaitsFix https://github.com/elastic/elasticsearch/issues/42309 --- .../rest-api-spec/test/data_frame/transforms_stats.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml index 61117b138bff7..f552e4710c781 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml @@ -149,6 +149,9 @@ teardown: --- "Test get multiple transform stats where one does not have a task": + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42309" - do: data_frame.put_data_frame_transform: transform_id: "airline-transform-stats-dos" From 4b0f36d361e6dc108cb9a9978f0bd1b31016baf8 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 21 May 2019 15:53:28 -0400 Subject: [PATCH 007/224] Execute actions under permit in primary mode only (#42241) Today when executing an action on a primary shard under permit, we do not enforce that the shard is in primary mode before executing the action. This commit addresses this by wrapping actions to be executed under permit in a check that the shard is in primary mode before executing the action. --- .../elasticsearch/ElasticsearchException.java | 7 +- .../TransportReplicationAction.java | 17 ++- .../index/seqno/RetentionLeaseActions.java | 17 +-- .../elasticsearch/index/shard/IndexShard.java | 25 +++- .../shard/ShardNotInPrimaryModeException.java | 36 +++++ .../ExceptionSerializationTests.java | 2 + .../TransportReplicationActionTests.java | 51 ++++++- .../index/shard/IndexShardTests.java | 128 ++++++++++++++---- 8 files changed, 235 insertions(+), 48 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/shard/ShardNotInPrimaryModeException.java diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 861228d221778..85df20d849afa 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1022,7 +1022,12 @@ private enum ElasticsearchExceptionHandle { org.elasticsearch.index.seqno.RetentionLeaseNotFoundException.class, org.elasticsearch.index.seqno.RetentionLeaseNotFoundException::new, 154, - Version.V_6_7_0); + Version.V_6_7_0), + SHARD_NOT_IN_PRIMARY_MODE_EXCEPTION( + org.elasticsearch.index.shard.ShardNotInPrimaryModeException.class, + org.elasticsearch.index.shard.ShardNotInPrimaryModeException::new, + 155, + Version.V_6_8_1); final Class exceptionClass; final CheckedFunction constructor; diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 6edaa95033997..d19009433deb5 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -63,6 +63,7 @@ import org.elasticsearch.index.shard.ReplicationGroup; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.index.shard.ShardNotInPrimaryModeException; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.NodeClosedException; @@ -307,10 +308,18 @@ protected void doRun() throws Exception { primaryRequest.getTargetAllocationID(), primaryRequest.getPrimaryTerm(), actualTerm); } - acquirePrimaryOperationPermit(indexShard, primaryRequest.getRequest(), ActionListener.wrap( - releasable -> runWithPrimaryShardReference(new PrimaryShardReference(indexShard, releasable)), - this::onFailure - )); + acquirePrimaryOperationPermit( + indexShard, + primaryRequest.getRequest(), + ActionListener.wrap( + releasable -> runWithPrimaryShardReference(new PrimaryShardReference(indexShard, releasable)), + e -> { + if (e instanceof ShardNotInPrimaryModeException) { + onFailure(new ReplicationOperation.RetryOnPrimaryException(shardId, "shard is not in primary mode", e)); + } else { + onFailure(e); + } + })); } void runWithPrimaryShardReference(final PrimaryShardReference primaryShardReference) { diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java index c69a4c6fab042..74c98bf3dca19 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java @@ -28,8 +28,6 @@ import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.PlainShardIterator; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; @@ -45,7 +43,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.Collections; import java.util.Objects; import java.util.function.Supplier; @@ -88,14 +85,10 @@ abstract static class TransportRetentionLeaseAction> extend @Override protected ShardsIterator shards(final ClusterState state, final InternalRequest request) { - final IndexShardRoutingTable shardRoutingTable = state + return state .routingTable() - .shardRoutingTable(request.concreteIndex(), request.request().getShardId().id()); - if (shardRoutingTable.primaryShard().active()) { - return shardRoutingTable.primaryShardIt(); - } else { - return new PlainShardIterator(request.request().getShardId(), Collections.emptyList()); - } + .shardRoutingTable(request.concreteIndex(), request.request().getShardId().id()) + .primaryShardIt(); } @Override @@ -174,6 +167,7 @@ void doRetentionLeaseAction(final IndexShard indexShard, final AddRequest reques protected Writeable.Reader getResponseReader() { return Response::new; } + } @Override @@ -400,9 +394,10 @@ public static class Response extends ActionResponse { public Response() { } - Response(StreamInput in) throws IOException { + Response(final StreamInput in) throws IOException { super(in); } + } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 11e4fb81d9fbe..da5ee8f8363ff 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.shard; import com.carrotsearch.hppc.ObjectLongMap; - import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.CheckIndex; @@ -2496,7 +2495,7 @@ public void acquirePrimaryOperationPermit(ActionListener onPermitAcq verifyNotClosed(); assert shardRouting.primary() : "acquirePrimaryOperationPermit should only be called on primary shard: " + shardRouting; - indexShardOperationPermits.acquire(onPermitAcquired, executorOnDelay, false, debugInfo); + indexShardOperationPermits.acquire(wrapPrimaryOperationPermitListener(onPermitAcquired), executorOnDelay, false, debugInfo); } /** @@ -2507,7 +2506,27 @@ public void acquireAllPrimaryOperationsPermits(final ActionListener verifyNotClosed(); assert shardRouting.primary() : "acquireAllPrimaryOperationsPermits should only be called on primary shard: " + shardRouting; - asyncBlockOperations(onPermitAcquired, timeout.duration(), timeout.timeUnit()); + asyncBlockOperations(wrapPrimaryOperationPermitListener(onPermitAcquired), timeout.duration(), timeout.timeUnit()); + } + + /** + * Wraps the action to run on a primary after acquiring permit. This wrapping is used to check if the shard is in primary mode before + * executing the action. + * + * @param listener the listener to wrap + * @return the wrapped listener + */ + private ActionListener wrapPrimaryOperationPermitListener(final ActionListener listener) { + return ActionListener.delegateFailure( + listener, + (l, r) -> { + if (replicationTracker.isPrimaryMode()) { + l.onResponse(r); + } else { + r.close(); + l.onFailure(new ShardNotInPrimaryModeException(shardId, state)); + } + }); } private void asyncBlockOperations(ActionListener onPermitAcquired, long timeout, TimeUnit timeUnit) { diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardNotInPrimaryModeException.java b/server/src/main/java/org/elasticsearch/index/shard/ShardNotInPrimaryModeException.java new file mode 100644 index 0000000000000..8bc23dcdd00f7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardNotInPrimaryModeException.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +public class ShardNotInPrimaryModeException extends IllegalIndexShardStateException { + + public ShardNotInPrimaryModeException(final ShardId shardId, final IndexShardState currentState) { + super(shardId, currentState, "shard is not in primary mode"); + } + + public ShardNotInPrimaryModeException(final StreamInput in) throws IOException { + super(in); + } + +} diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 1fac56886de45..a0aafbb41d371 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -66,6 +66,7 @@ import org.elasticsearch.index.shard.IllegalIndexShardStateException; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardNotInPrimaryModeException; import org.elasticsearch.indices.IndexTemplateMissingException; import org.elasticsearch.indices.InvalidIndexTemplateException; import org.elasticsearch.indices.recovery.RecoverFilesRecoveryException; @@ -816,6 +817,7 @@ public void testIds() { ids.put(152, NoSuchRemoteClusterException.class); ids.put(153, RetentionLeaseAlreadyExistsException.class); ids.put(154, RetentionLeaseNotFoundException.class); + ids.put(155, ShardNotInPrimaryModeException.class); Map, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 12cc9097b652c..4459aa5556988 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -64,9 +64,11 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardClosedException; +import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ReplicationGroup; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.index.shard.ShardNotInPrimaryModeException; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -390,6 +392,43 @@ public void testNotStartedPrimary() { assertIndexShardCounter(0); } + public void testShardNotInPrimaryMode() { + final String index = "test"; + final ShardId shardId = new ShardId(index, "_na_", 0); + final ClusterState state = state(index, true, ShardRoutingState.RELOCATING); + setState(clusterService, state); + final ReplicationTask task = maybeTask(); + final Request request = new Request(shardId); + PlainActionFuture listener = new PlainActionFuture<>(); + final AtomicBoolean executed = new AtomicBoolean(); + + final ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard(); + final long primaryTerm = state.metaData().index(index).primaryTerm(shardId.id()); + final TransportReplicationAction.ConcreteShardRequest primaryRequest + = new TransportReplicationAction.ConcreteShardRequest<>(request, primaryShard.allocationId().getId(), primaryTerm); + + isPrimaryMode.set(false); + + new TestAction(Settings.EMPTY, "internal:test-action", transportService, clusterService, shardStateAction, threadPool) { + @Override + protected void shardOperationOnPrimary(Request shardRequest, IndexShard primary, + ActionListener> listener) { + assertPhase(task, "primary"); + assertFalse(executed.getAndSet(true)); + super.shardOperationOnPrimary(shardRequest, primary, listener); + } + }.new AsyncPrimaryAction(primaryRequest, listener, task).run(); + + assertFalse(executed.get()); + assertIndexShardCounter(0); // no permit should be held + + final ExecutionException e = expectThrows(ExecutionException.class, listener::get); + assertThat(e.getCause(), instanceOf(ReplicationOperation.RetryOnPrimaryException.class)); + assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); + assertThat(e.getCause().getCause(), instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e.getCause().getCause(), hasToString(containsString("shard is not in primary mode"))); + } + /** * When relocating a primary shard, there is a cluster state update at the end of relocation where the active primary is switched from * the relocation source to the relocation target. If relocation source receives and processes this cluster state @@ -1126,6 +1165,8 @@ private void assertIndexShardCounter(int expected) { private final AtomicBoolean isRelocated = new AtomicBoolean(false); + private final AtomicBoolean isPrimaryMode = new AtomicBoolean(true); + /** * Sometimes build a ReplicationTask for tracking the phase of the * TransportReplicationAction. Since TransportReplicationAction has to work @@ -1271,10 +1312,16 @@ private IndexService mockIndexService(final IndexMetaData indexMetaData, Cluster private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService) { final IndexShard indexShard = mock(IndexShard.class); when(indexShard.shardId()).thenReturn(shardId); + when(indexShard.state()).thenReturn(IndexShardState.STARTED); doAnswer(invocation -> { ActionListener callback = (ActionListener) invocation.getArguments()[0]; - count.incrementAndGet(); - callback.onResponse(count::decrementAndGet); + if (isPrimaryMode.get()) { + count.incrementAndGet(); + callback.onResponse(count::decrementAndGet); + + } else { + callback.onFailure(new ShardNotInPrimaryModeException(shardId, IndexShardState.STARTED)); + } return null; }).when(indexShard).acquirePrimaryOperationPermit(any(ActionListener.class), anyString(), anyObject()); doAnswer(invocation -> { diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 0be7b4433fac3..786d5bc5e8df8 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -636,11 +636,13 @@ public void testOperationPermitsOnPrimaryShards() throws Exception { final ShardId shardId = new ShardId("test", "_na_", 0); final IndexShard indexShard; + final boolean isPrimaryMode; if (randomBoolean()) { // relocation target indexShard = newShard(newShardRouting(shardId, "local_node", "other node", true, ShardRoutingState.INITIALIZING, AllocationId.newRelocation(AllocationId.newInitializing()))); assertEquals(0, indexShard.getActiveOperationsCount()); + isPrimaryMode = false; } else if (randomBoolean()) { // simulate promotion indexShard = newStartedShard(false); @@ -660,21 +662,60 @@ public void testOperationPermitsOnPrimaryShards() throws Exception { if (randomBoolean()) { assertBusy(() -> assertEquals(0, indexShard.getActiveOperationsCount())); } + isPrimaryMode = true; } else { indexShard = newStartedShard(true); assertEquals(0, indexShard.getActiveOperationsCount()); + isPrimaryMode = true; } - final long primaryTerm = indexShard.getPendingPrimaryTerm(); - Releasable operation1 = acquirePrimaryOperationPermitBlockingly(indexShard); - assertEquals(1, indexShard.getActiveOperationsCount()); - Releasable operation2 = acquirePrimaryOperationPermitBlockingly(indexShard); - assertEquals(2, indexShard.getActiveOperationsCount()); + assert indexShard.getReplicationTracker().isPrimaryMode() == isPrimaryMode; + final long pendingPrimaryTerm = indexShard.getPendingPrimaryTerm(); + if (isPrimaryMode) { + Releasable operation1 = acquirePrimaryOperationPermitBlockingly(indexShard); + assertEquals(1, indexShard.getActiveOperationsCount()); + Releasable operation2 = acquirePrimaryOperationPermitBlockingly(indexShard); + assertEquals(2, indexShard.getActiveOperationsCount()); - Releasables.close(operation1, operation2); - assertEquals(0, indexShard.getActiveOperationsCount()); + Releasables.close(operation1, operation2); + assertEquals(0, indexShard.getActiveOperationsCount()); + } else { + indexShard.acquirePrimaryOperationPermit( + new ActionListener<>() { + @Override + public void onResponse(final Releasable releasable) { + throw new AssertionError(); + } + + @Override + public void onFailure(final Exception e) { + assertThat(e, instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e, hasToString(containsString("shard is not in primary mode"))); + } + }, + ThreadPool.Names.SAME, + "test"); + + final CountDownLatch latch = new CountDownLatch(1); + indexShard.acquireAllPrimaryOperationsPermits( + new ActionListener<>() { + @Override + public void onResponse(final Releasable releasable) { + throw new AssertionError(); + } + + @Override + public void onFailure(final Exception e) { + assertThat(e, instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e, hasToString(containsString("shard is not in primary mode"))); + latch.countDown(); + } + }, + TimeValue.timeValueSeconds(30)); + latch.await(); + } if (Assertions.ENABLED && indexShard.routingEntry().isRelocationTarget() == false) { - assertThat(expectThrows(AssertionError.class, () -> indexShard.acquireReplicaOperationPermit(primaryTerm, + assertThat(expectThrows(AssertionError.class, () -> indexShard.acquireReplicaOperationPermit(pendingPrimaryTerm, indexShard.getGlobalCheckpoint(), indexShard.getMaxSeqNoOfUpdatesOrDeletes(), new ActionListener() { @Override public void onResponse(Releasable releasable) { @@ -1688,10 +1729,9 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { // recovery can be now finalized recoveryThread.join(); assertTrue(shard.isRelocatedPrimary()); - try (Releasable ignored = acquirePrimaryOperationPermitBlockingly(shard)) { - // lock can again be acquired - assertTrue(shard.isRelocatedPrimary()); - } + final ExecutionException e = expectThrows(ExecutionException.class, () -> acquirePrimaryOperationPermitBlockingly(shard)); + assertThat(e.getCause(), instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); closeShards(shard); } @@ -1699,30 +1739,64 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { public void testDelayedOperationsBeforeAndAfterRelocated() throws Exception { final IndexShard shard = newStartedShard(true); IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); + final CountDownLatch startRecovery = new CountDownLatch(1); + final CountDownLatch relocationStarted = new CountDownLatch(1); Thread recoveryThread = new Thread(() -> { try { - shard.relocated(primaryContext -> {}); + startRecovery.await(); + shard.relocated(primaryContext -> relocationStarted.countDown()); } catch (InterruptedException e) { throw new RuntimeException(e); } }); recoveryThread.start(); - List> onLockAcquiredActions = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - PlainActionFuture onLockAcquired = new PlainActionFuture() { - @Override - public void onResponse(Releasable releasable) { - releasable.close(); - super.onResponse(releasable); - } - }; - shard.acquirePrimaryOperationPermit(onLockAcquired, ThreadPool.Names.WRITE, "i_" + i); - onLockAcquiredActions.add(onLockAcquired); - } - for (PlainActionFuture onLockAcquired : onLockAcquiredActions) { - assertNotNull(onLockAcquired.get(30, TimeUnit.SECONDS)); + final int numberOfAcquisitions = randomIntBetween(1, 10); + final int recoveryIndex = randomIntBetween(1, numberOfAcquisitions); + + for (int i = 0; i < numberOfAcquisitions; i++) { + + final PlainActionFuture onLockAcquired; + final Runnable assertion; + if (i < recoveryIndex) { + final AtomicBoolean invoked = new AtomicBoolean(); + onLockAcquired = new PlainActionFuture<>() { + + @Override + public void onResponse(Releasable releasable) { + invoked.set(true); + releasable.close(); + super.onResponse(releasable); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(); + } + + }; + assertion = () -> assertTrue(invoked.get()); + } else if (recoveryIndex == i) { + startRecovery.countDown(); + relocationStarted.await(); + onLockAcquired = new PlainActionFuture<>(); + assertion = () -> { + final ExecutionException e = expectThrows(ExecutionException.class, () -> onLockAcquired.get(30, TimeUnit.SECONDS)); + assertThat(e.getCause(), instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); + }; + } else { + onLockAcquired = new PlainActionFuture<>(); + assertion = () -> { + final ExecutionException e = expectThrows(ExecutionException.class, () -> onLockAcquired.get(30, TimeUnit.SECONDS)); + assertThat(e.getCause(), instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); + }; + } + + shard.acquirePrimaryOperationPermit(onLockAcquired, ThreadPool.Names.WRITE, "i_" + i); + assertion.run(); } recoveryThread.join(); From e8b85c90e941fe7003f7446c4364523663d681bc Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 21 May 2019 17:06:51 -0400 Subject: [PATCH 008/224] Mute testDelayedOperationsBeforeAndAfterRelocated Tracked at #42325 --- .../test/java/org/elasticsearch/index/shard/IndexShardTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 786d5bc5e8df8..64b0c0db1dc8c 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1736,6 +1736,7 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { closeShards(shard); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42325") public void testDelayedOperationsBeforeAndAfterRelocated() throws Exception { final IndexShard shard = newStartedShard(true); IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); From 2d8869175b102565f73ca14ae4d2d6e3fb66660b Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Tue, 21 May 2019 14:27:57 -0700 Subject: [PATCH 009/224] remove backcompat handling of 6.2.x versions (#42044) relates to refactoring initiative #41164. --- .../elasticsearch/ElasticsearchException.java | 2 +- .../main/java/org/elasticsearch/Version.java | 20 ------- .../org/elasticsearch/index/store/Store.java | 19 +----- .../indices/flush/SyncedFlushService.java | 14 +---- .../search/slice/SliceBuilder.java | 10 +--- .../java/org/elasticsearch/BuildTests.java | 5 -- .../ExceptionSerializationTests.java | 2 +- .../common/lucene/uid/VersionsTests.java | 14 +---- .../index/analysis/PreBuiltAnalyzerTests.java | 4 +- .../search/slice/SliceBuilderTests.java | 26 ++------ .../xpack/core/ml/action/UpdateJobAction.java | 10 +--- .../core/ml/action/UpdateProcessAction.java | 13 ++-- .../core/ml/datafeed/DatafeedConfig.java | 10 +--- .../core/ml/job/config/AnalysisConfig.java | 11 +--- .../xpack/core/ml/job/results/Bucket.java | 12 +--- .../action/token/CreateTokenRequest.java | 60 ++++++------------- .../action/token/CreateTokenResponse.java | 8 --- .../action/TransportOpenJobActionTests.java | 10 ++-- .../monitoring/MonitoringFeatureSetTests.java | 3 +- .../xpack/restart/FullClusterRestartIT.java | 8 +-- 20 files changed, 58 insertions(+), 203 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 85df20d849afa..a2e53a1189f1b 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1006,7 +1006,7 @@ private enum ElasticsearchExceptionHandle { UNKNOWN_NAMED_OBJECT_EXCEPTION(org.elasticsearch.common.xcontent.UnknownNamedObjectException.class, org.elasticsearch.common.xcontent.UnknownNamedObjectException::new, 148, UNKNOWN_VERSION_ADDED), TOO_MANY_BUCKETS_EXCEPTION(MultiBucketConsumerService.TooManyBucketsException.class, - MultiBucketConsumerService.TooManyBucketsException::new, 149, Version.V_6_2_0), + MultiBucketConsumerService.TooManyBucketsException::new, 149, UNKNOWN_VERSION_ADDED), COORDINATION_STATE_REJECTED_EXCEPTION(org.elasticsearch.cluster.coordination.CoordinationStateRejectedException.class, org.elasticsearch.cluster.coordination.CoordinationStateRejectedException::new, 150, Version.V_7_0_0), SNAPSHOT_IN_PROGRESS_EXCEPTION(org.elasticsearch.snapshots.SnapshotInProgressException.class, diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 0a6b19444efa7..90b7ae869e811 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -48,16 +48,6 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST); // The below version is missing from the 7.3 JAR private static final org.apache.lucene.util.Version LUCENE_7_2_1 = org.apache.lucene.util.Version.fromBits(7, 2, 1); - public static final int V_6_2_0_ID = 6020099; - public static final Version V_6_2_0 = new Version(V_6_2_0_ID, LUCENE_7_2_1); - public static final int V_6_2_1_ID = 6020199; - public static final Version V_6_2_1 = new Version(V_6_2_1_ID, LUCENE_7_2_1); - public static final int V_6_2_2_ID = 6020299; - public static final Version V_6_2_2 = new Version(V_6_2_2_ID, LUCENE_7_2_1); - public static final int V_6_2_3_ID = 6020399; - public static final Version V_6_2_3 = new Version(V_6_2_3_ID, LUCENE_7_2_1); - public static final int V_6_2_4_ID = 6020499; - public static final Version V_6_2_4 = new Version(V_6_2_4_ID, LUCENE_7_2_1); public static final int V_6_3_0_ID = 6030099; public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); public static final int V_6_3_1_ID = 6030199; @@ -176,16 +166,6 @@ public static Version fromId(int id) { return V_6_3_1; case V_6_3_0_ID: return V_6_3_0; - case V_6_2_4_ID: - return V_6_2_4; - case V_6_2_3_ID: - return V_6_2_3; - case V_6_2_2_ID: - return V_6_2_2; - case V_6_2_1_ID: - return V_6_2_1; - case V_6_2_0_ID: - return V_6_2_0; case V_EMPTY_ID: return V_EMPTY; default: diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 65d2f8d7812f8..5f1f7d23a8c6a 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -1550,23 +1550,8 @@ public void trimUnsafeCommits(final long lastSyncedGlobalCheckpoint, final long final IndexCommit lastIndexCommitCommit = existingCommits.get(existingCommits.size() - 1); final String translogUUID = lastIndexCommitCommit.getUserData().get(Translog.TRANSLOG_UUID_KEY); final IndexCommit startingIndexCommit; - // We may not have a safe commit if an index was create before v6.2; and if there is a snapshotted commit whose translog - // are not retained but max_seqno is at most the global checkpoint, we may mistakenly select it as a starting commit. - // To avoid this issue, we only select index commits whose translog are fully retained. - if (indexVersionCreated.before(org.elasticsearch.Version.V_6_2_0)) { - final List recoverableCommits = new ArrayList<>(); - for (IndexCommit commit : existingCommits) { - if (minRetainedTranslogGen <= Long.parseLong(commit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY))) { - recoverableCommits.add(commit); - } - } - assert recoverableCommits.isEmpty() == false : "No commit point with translog found; " + - "commits [" + existingCommits + "], minRetainedTranslogGen [" + minRetainedTranslogGen + "]"; - startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(recoverableCommits, lastSyncedGlobalCheckpoint); - } else { - // TODO: Asserts the starting commit is a safe commit once peer-recovery sets global checkpoint. - startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, lastSyncedGlobalCheckpoint); - } + // TODO: Asserts the starting commit is a safe commit once peer-recovery sets global checkpoint. + startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, lastSyncedGlobalCheckpoint); if (translogUUID.equals(startingIndexCommit.getUserData().get(Translog.TRANSLOG_UUID_KEY)) == false) { throw new IllegalStateException("starting commit translog uuid [" diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index 921a8f9cc7c47..6291531b7f907 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -594,10 +594,6 @@ static final class PreSyncedFlushResponse extends TransportResponse { this.existingSyncId = existingSyncId; } - boolean includeNumDocs(Version version) { - return version.onOrAfter(Version.V_6_2_2); - } - boolean includeExistingSyncId(Version version) { return version.onOrAfter(Version.V_6_3_0); } @@ -606,11 +602,7 @@ boolean includeExistingSyncId(Version version) { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); commitId = new Engine.CommitId(in); - if (includeNumDocs(in.getVersion())) { - numDocs = in.readInt(); - } else { - numDocs = UNKNOWN_NUM_DOCS; - } + numDocs = in.readInt(); if (includeExistingSyncId(in.getVersion())) { existingSyncId = in.readOptionalString(); } @@ -620,9 +612,7 @@ public void readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); commitId.writeTo(out); - if (includeNumDocs(out.getVersion())) { - out.writeInt(numDocs); - } + out.writeInt(numDocs); if (includeExistingSyncId(out.getVersion())) { out.writeOptionalString(existingSyncId); } diff --git a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java index 40e10eb589006..08f042aa69650 100644 --- a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java @@ -106,10 +106,6 @@ public SliceBuilder(String field, int id, int max) { public SliceBuilder(StreamInput in) throws IOException { String field = in.readString(); - if ("_uid".equals(field) && in.getVersion().before(Version.V_6_3_0)) { - // This is safe because _id and _uid are handled the same way in #toFilter - field = IdFieldMapper.NAME; - } this.field = field; this.id = in.readVInt(); this.max = in.readVInt(); @@ -117,11 +113,7 @@ public SliceBuilder(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (IdFieldMapper.NAME.equals(field) && out.getVersion().before(Version.V_6_3_0)) { - out.writeString("_uid"); - } else { - out.writeString(field); - } + out.writeString(field); out.writeVInt(id); out.writeVInt(max); } diff --git a/server/src/test/java/org/elasticsearch/BuildTests.java b/server/src/test/java/org/elasticsearch/BuildTests.java index e0d8140c708d6..59e289b9e98ef 100644 --- a/server/src/test/java/org/elasticsearch/BuildTests.java +++ b/server/src/test/java/org/elasticsearch/BuildTests.java @@ -199,29 +199,24 @@ public void testSerializationBWC() throws IOException { randomAlphaOfLength(6), randomAlphaOfLength(6), randomBoolean(), randomAlphaOfLength(6))); final List versions = Version.getDeclaredVersions(Version.class); - final Version pre63Version = randomFrom(versions.stream().filter(v -> v.before(Version.V_6_3_0)).collect(Collectors.toList())); final Version post63Pre67Version = randomFrom(versions.stream() .filter(v -> v.onOrAfter(Version.V_6_3_0) && v.before(Version.V_6_7_0)).collect(Collectors.toList())); final Version post67Pre70Version = randomFrom(versions.stream() .filter(v -> v.onOrAfter(Version.V_6_7_0) && v.before(Version.V_7_0_0)).collect(Collectors.toList())); final Version post70Version = randomFrom(versions.stream().filter(v -> v.onOrAfter(Version.V_7_0_0)).collect(Collectors.toList())); - final WriteableBuild pre63 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, pre63Version); final WriteableBuild post63pre67 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post63Pre67Version); final WriteableBuild post67pre70 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post67Pre70Version); final WriteableBuild post70 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post70Version); - assertThat(pre63.build.flavor(), equalTo(Build.Flavor.OSS)); assertThat(post63pre67.build.flavor(), equalTo(dockerBuild.build.flavor())); assertThat(post67pre70.build.flavor(), equalTo(dockerBuild.build.flavor())); assertThat(post70.build.flavor(), equalTo(dockerBuild.build.flavor())); - assertThat(pre63.build.type(), equalTo(Build.Type.UNKNOWN)); assertThat(post63pre67.build.type(), equalTo(Build.Type.TAR)); assertThat(post67pre70.build.type(), equalTo(dockerBuild.build.type())); assertThat(post70.build.type(), equalTo(dockerBuild.build.type())); - assertThat(pre63.build.getQualifiedVersion(), equalTo(pre63Version.toString())); assertThat(post63pre67.build.getQualifiedVersion(), equalTo(post63Pre67Version.toString())); assertThat(post67pre70.build.getQualifiedVersion(), equalTo(post67Pre70Version.toString())); assertThat(post70.build.getQualifiedVersion(), equalTo(dockerBuild.build.getQualifiedVersion())); diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index a0aafbb41d371..5b33068013965 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -366,7 +366,7 @@ public void testCircuitBreakingException() throws IOException { } public void testTooManyBucketsException() throws IOException { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_2_0, Version.CURRENT); + Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); MultiBucketConsumerService.TooManyBucketsException ex = serialize(new MultiBucketConsumerService.TooManyBucketsException("Too many buckets", 100), version); assertEquals("Too many buckets", ex.getMessage()); diff --git a/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java b/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java index febe2b976fb47..94945dc92c952 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java @@ -37,7 +37,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import static org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.loadDocIdAndVersion; @@ -189,23 +188,16 @@ public void testCacheFilterReader() throws Exception { } public void testLuceneVersionOnUnknownVersions() { - List allVersions = VersionUtils.allVersions(); - - // should have the same Lucene version as the latest 6.x version - Version version = Version.fromString("6.88.50"); - assertEquals(allVersions.get(Collections.binarySearch(allVersions, Version.V_7_0_0) - 1).luceneVersion, - version.luceneVersion); - // between two known versions, should use the lucene version of the previous version - version = Version.fromString("6.2.50"); - assertEquals(VersionUtils.getPreviousVersion(Version.V_6_2_4).luceneVersion, version.luceneVersion); + Version version = VersionUtils.getPreviousVersion(Version.CURRENT); + assertEquals(Version.fromId(version.id + 100).luceneVersion, version.luceneVersion); // too old version, major should be the oldest supported lucene version minus 1 version = Version.fromString("5.2.1"); assertEquals(VersionUtils.getFirstVersion().luceneVersion.major - 1, version.luceneVersion.major); // future version, should be the same version as today - version = Version.fromString("8.77.1"); + version = Version.fromId(Version.CURRENT.id + 100); assertEquals(Version.CURRENT.luceneVersion, version.luceneVersion); } } diff --git a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java index 65958ec9319c2..3ca1bec5a4b57 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java @@ -75,8 +75,8 @@ public void testThatInstancesAreCachedAndReused() { PreBuiltAnalyzers.STANDARD.getAnalyzer(VersionUtils.randomPreviousCompatibleVersion(random(), Version.CURRENT))); // Same Lucene version should be cached: - assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_6_2_1), - PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_6_2_2)); + assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(Version.fromString("5.0.0")), + PreBuiltAnalyzers.STOP.getAnalyzer(Version.fromString("5.0.1"))); } public void testThatAnalyzersAreUsedInMapping() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java index bf053d34bff56..fffa501cc4be4 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java @@ -53,7 +53,6 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.IndexNumericFieldData; -import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.Rewriteable; @@ -63,6 +62,7 @@ import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.ArrayList; @@ -455,21 +455,6 @@ public void testToFilterDeprecationMessage() throws IOException { } } - public void testSerializationBackcompat() throws IOException { - SliceBuilder sliceBuilder = new SliceBuilder(1, 5); - assertEquals(IdFieldMapper.NAME, sliceBuilder.getField()); - - SliceBuilder copy62 = copyWriteable(sliceBuilder, - new NamedWriteableRegistry(Collections.emptyList()), - SliceBuilder::new, Version.V_6_2_0); - assertEquals(sliceBuilder, copy62); - - SliceBuilder copy63 = copyWriteable(copy62, - new NamedWriteableRegistry(Collections.emptyList()), - SliceBuilder::new, Version.V_6_3_0); - assertEquals(sliceBuilder, copy63); - } - public void testToFilterWithRouting() throws IOException { Directory dir = new RAMDirectory(); try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) { @@ -489,15 +474,14 @@ public void testToFilterWithRouting() throws IOException { when(clusterService.operationRouting()).thenReturn(routing); when(clusterService.getSettings()).thenReturn(Settings.EMPTY); try (IndexReader reader = DirectoryReader.open(dir)) { - QueryShardContext context = createShardContext(Version.CURRENT, reader, "field", DocValuesType.SORTED, 5, 0); + Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); + QueryShardContext context = createShardContext(version, reader, "field", DocValuesType.SORTED, 5, 0); SliceBuilder builder = new SliceBuilder("field", 6, 10); String[] routings = new String[] { "foo" }; - Query query = builder.toFilter(clusterService, createRequest(1, routings, null), context, Version.CURRENT); + Query query = builder.toFilter(clusterService, createRequest(1, routings, null), context, version); assertEquals(new DocValuesSliceQuery("field", 6, 10), query); - query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, Version.CURRENT); + query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, version); assertEquals(new DocValuesSliceQuery("field", 6, 10), query); - query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, Version.V_6_2_0); - assertEquals(new DocValuesSliceQuery("field", 1, 2), query); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java index 85e1615c0dfe0..6ecee409c30f1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java @@ -92,11 +92,7 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); jobId = in.readString(); update = new JobUpdate(in); - if (in.getVersion().onOrAfter(Version.V_6_2_2)) { - isInternal = in.readBoolean(); - } else { - isInternal = false; - } + isInternal = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_6_3_0) && in.getVersion().before(Version.V_7_0_0)) { in.readBoolean(); // was waitForAck } @@ -107,9 +103,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(jobId); update.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_6_2_2)) { - out.writeBoolean(isInternal); - } + out.writeBoolean(isInternal); if (out.getVersion().onOrAfter(Version.V_6_3_0) && out.getVersion().before(Version.V_7_0_0)) { out.writeBoolean(false); // was waitForAck } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java index 5091ff1f968f1..6a8e1703ad1f2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.tasks.BaseTasksResponse; @@ -121,10 +120,8 @@ public Request(StreamInput in) throws IOException { if (in.readBoolean()) { detectorUpdates = in.readList(JobUpdate.DetectorUpdate::new); } - if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - filter = in.readOptionalWriteable(MlFilter::new); - updateScheduledEvents = in.readBoolean(); - } + filter = in.readOptionalWriteable(MlFilter::new); + updateScheduledEvents = in.readBoolean(); } @Override @@ -136,10 +133,8 @@ public void writeTo(StreamOutput out) throws IOException { if (hasDetectorUpdates) { out.writeList(detectorUpdates); } - if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeOptionalWriteable(filter); - out.writeBoolean(updateScheduledEvents); - } + out.writeOptionalWriteable(filter); + out.writeBoolean(updateScheduledEvents); } public Request(String jobId, ModelPlotConfig modelPlotConfig, List detectorUpdates, MlFilter filter, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index 810d97df34636..f08c4a9d7391d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -222,11 +222,7 @@ public DatafeedConfig(StreamInput in) throws IOException { } this.scrollSize = in.readOptionalVInt(); this.chunkingConfig = in.readOptionalWriteable(ChunkingConfig::new); - if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); - } else { - this.headers = Collections.emptyMap(); - } + this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); if (in.getVersion().onOrAfter(Version.V_6_6_0)) { delayedDataCheckConfig = in.readOptionalWriteable(DelayedDataCheckConfig::new); } else { @@ -432,9 +428,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalVInt(scrollSize); out.writeOptionalWriteable(chunkingConfig); - if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); - } + out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); if (out.getVersion().onOrAfter(Version.V_6_6_0)) { out.writeOptionalWriteable(delayedDataCheckConfig); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java index 933188c8221eb..9e01cd21e2b90 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.config; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -126,11 +125,7 @@ public AnalysisConfig(StreamInput in) throws IOException { bucketSpan = in.readTimeValue(); categorizationFieldName = in.readOptionalString(); categorizationFilters = in.readBoolean() ? Collections.unmodifiableList(in.readStringList()) : null; - if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - categorizationAnalyzerConfig = in.readOptionalWriteable(CategorizationAnalyzerConfig::new); - } else { - categorizationAnalyzerConfig = null; - } + categorizationAnalyzerConfig = in.readOptionalWriteable(CategorizationAnalyzerConfig::new); latency = in.readOptionalTimeValue(); summaryCountFieldName = in.readOptionalString(); detectors = Collections.unmodifiableList(in.readList(Detector::new)); @@ -149,9 +144,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeOptionalWriteable(categorizationAnalyzerConfig); - } + out.writeOptionalWriteable(categorizationAnalyzerConfig); out.writeOptionalTimeValue(latency); out.writeOptionalString(summaryCountFieldName); out.writeList(detectors); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java index d335ba39e0026..8e04e001ed6cd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java @@ -138,12 +138,8 @@ public Bucket(StreamInput in) throws IOException { if (in.getVersion().before(Version.V_6_5_0)) { in.readList(Bucket::readOldPerPartitionNormalization); } - if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - scheduledEvents = in.readStringList(); - if (scheduledEvents.isEmpty()) { - scheduledEvents = Collections.emptyList(); - } - } else { + scheduledEvents = in.readStringList(); + if (scheduledEvents.isEmpty()) { scheduledEvents = Collections.emptyList(); } } @@ -164,9 +160,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().before(Version.V_6_5_0)) { out.writeList(Collections.emptyList()); } - if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeStringCollection(scheduledEvents); - } + out.writeStringCollection(scheduledEvents); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java index ed31f0cc020c6..3fdfaab060542 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java @@ -192,32 +192,18 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeString(grantType); - if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeOptionalString(username); - if (password == null) { - out.writeOptionalBytesReference(null); - } else { - final byte[] passwordBytes = CharArrays.toUtf8Bytes(password.getChars()); - try { - out.writeOptionalBytesReference(new BytesArray(passwordBytes)); - } finally { - Arrays.fill(passwordBytes, (byte) 0); - } - } - out.writeOptionalString(refreshToken); + out.writeOptionalString(username); + if (password == null) { + out.writeOptionalBytesReference(null); } else { - if ("refresh_token".equals(grantType)) { - throw new IllegalArgumentException("a refresh request cannot be sent to an older version"); - } else { - out.writeString(username); - final byte[] passwordBytes = CharArrays.toUtf8Bytes(password.getChars()); - try { - out.writeByteArray(passwordBytes); - } finally { - Arrays.fill(passwordBytes, (byte) 0); - } + final byte[] passwordBytes = CharArrays.toUtf8Bytes(password.getChars()); + try { + out.writeOptionalBytesReference(new BytesArray(passwordBytes)); + } finally { + Arrays.fill(passwordBytes, (byte) 0); } } + out.writeOptionalString(refreshToken); out.writeOptionalString(scope); } @@ -225,29 +211,19 @@ public void writeTo(StreamOutput out) throws IOException { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); grantType = in.readString(); - if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - username = in.readOptionalString(); - BytesReference bytesRef = in.readOptionalBytesReference(); - if (bytesRef != null) { - byte[] bytes = BytesReference.toBytes(bytesRef); - try { - password = new SecureString(CharArrays.utf8BytesToChars(bytes)); - } finally { - Arrays.fill(bytes, (byte) 0); - } - } else { - password = null; - } - refreshToken = in.readOptionalString(); - } else { - username = in.readString(); - final byte[] passwordBytes = in.readByteArray(); + username = in.readOptionalString(); + BytesReference bytesRef = in.readOptionalBytesReference(); + if (bytesRef != null) { + byte[] bytes = BytesReference.toBytes(bytesRef); try { - password = new SecureString(CharArrays.utf8BytesToChars(passwordBytes)); + password = new SecureString(CharArrays.utf8BytesToChars(bytes)); } finally { - Arrays.fill(passwordBytes, (byte) 0); + Arrays.fill(bytes, (byte) 0); } + } else { + password = null; } + refreshToken = in.readOptionalString(); scope = in.readOptionalString(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java index 30111a92431dc..93ddc56459677 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java @@ -61,12 +61,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(scope); if (out.getVersion().onOrAfter(Version.V_6_5_0)) { out.writeOptionalString(refreshToken); - } else if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - if (refreshToken == null) { - out.writeString(""); - } else { - out.writeString(refreshToken); - } } } @@ -78,8 +72,6 @@ public void readFrom(StreamInput in) throws IOException { scope = in.readOptionalString(); if (in.getVersion().onOrAfter(Version.V_6_5_0)) { refreshToken = in.readOptionalString(); - } else if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - refreshToken = in.readString(); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java index 92d7bbcc49e54..1065503e091d4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java @@ -406,11 +406,13 @@ public void testSelectLeastLoadedMlNode_jobWithRulesButNoNodeMeetsRequiredVersio Map nodeAttr = new HashMap<>(); nodeAttr.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "10"); nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); + Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), + VersionUtils.getPreviousVersion(Version.V_6_4_0)); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), - nodeAttr, Collections.emptySet(), Version.V_6_2_0)) + nodeAttr, Collections.emptySet(), version)) .add(new DiscoveryNode("_node_name2", "_node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), - nodeAttr, Collections.emptySet(), Version.V_6_3_0)) + nodeAttr, Collections.emptySet(), version)) .build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); @@ -437,9 +439,9 @@ public void testSelectLeastLoadedMlNode_jobWithRulesAndNodeMeetsRequiredVersion( nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), - nodeAttr, Collections.emptySet(), Version.V_6_2_0)) + nodeAttr, Collections.emptySet(), Version.fromString("6.2.0"))) .add(new DiscoveryNode("_node_name2", "_node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), - nodeAttr, Collections.emptySet(), Version.V_6_4_0)) + nodeAttr, Collections.emptySet(), Version.fromString("6.4.0"))) .build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java index 1a06a9a4037f9..d644a63e7bcaa 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.rest.yaml.ObjectPath; import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.XPackFeatureSet.Usage; @@ -63,7 +64,7 @@ public void testEnabledDefault() { public void testUsage() throws Exception { // anything prior to 6.3 does not include collection_enabled (so defaults it to null) - final Version serializedVersion = randomFrom(Version.CURRENT, Version.V_6_3_0, Version.V_6_2_2); + final Version serializedVersion = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); final boolean collectionEnabled = randomBoolean(); int localCount = randomIntBetween(0, 5); List exporterList = new ArrayList<>(); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index f73496db0f875..f17aab309ba72 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -266,9 +266,7 @@ public void testRollupAfterRestart() throws Exception { final Request clusterHealthRequest = new Request("GET", "/_cluster/health"); clusterHealthRequest.addParameter("wait_for_status", "yellow"); clusterHealthRequest.addParameter("wait_for_no_relocating_shards", "true"); - if (getOldClusterVersion().onOrAfter(Version.V_6_2_0)) { - clusterHealthRequest.addParameter("wait_for_no_initializing_shards", "true"); - } + clusterHealthRequest.addParameter("wait_for_no_initializing_shards", "true"); Map clusterHealthResponse = entityAsMap(client().performRequest(clusterHealthRequest)); assertThat(clusterHealthResponse.get("timed_out"), equalTo(Boolean.FALSE)); @@ -384,9 +382,7 @@ private void waitForYellow(String indexName) throws IOException { request.addParameter("wait_for_status", "yellow"); request.addParameter("timeout", "30s"); request.addParameter("wait_for_no_relocating_shards", "true"); - if (getOldClusterVersion().onOrAfter(Version.V_6_2_0)) { - request.addParameter("wait_for_no_initializing_shards", "true"); - } + request.addParameter("wait_for_no_initializing_shards", "true"); Map response = entityAsMap(client().performRequest(request)); assertThat(response.get("timed_out"), equalTo(Boolean.FALSE)); } From a3bd569a0f4b4014ba4992a8dc0d390cc1a17ff9 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 22 May 2019 00:16:53 +0200 Subject: [PATCH 010/224] Cleanup Redundant BlobStoreFormat Class (#42195) * No need to have an abstract class here when there's only a single impl. --- .../blobstore/BlobStoreFormat.java | 111 ------------------ .../blobstore/ChecksumBlobStoreFormat.java | 92 +++++++++++---- 2 files changed, 71 insertions(+), 132 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java deleted file mode 100644 index 044caee41c55d..0000000000000 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.repositories.blobstore; - -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.common.CheckedFunction; -import org.elasticsearch.common.blobstore.BlobContainer; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.snapshots.SnapshotInfo; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; - -/** - * Base class that handles serialization of various data structures during snapshot/restore operations. - */ -public abstract class BlobStoreFormat { - - protected final String blobNameFormat; - - protected final CheckedFunction reader; - - protected final NamedXContentRegistry namedXContentRegistry; - - // Serialization parameters to specify correct context for metadata serialization - protected static final ToXContent.Params SNAPSHOT_ONLY_FORMAT_PARAMS; - - static { - Map snapshotOnlyParams = new HashMap<>(); - // when metadata is serialized certain elements of the metadata shouldn't be included into snapshot - // exclusion of these elements is done by setting MetaData.CONTEXT_MODE_PARAM to MetaData.CONTEXT_MODE_SNAPSHOT - snapshotOnlyParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_SNAPSHOT); - // serialize SnapshotInfo using the SNAPSHOT mode - snapshotOnlyParams.put(SnapshotInfo.CONTEXT_MODE_PARAM, SnapshotInfo.CONTEXT_MODE_SNAPSHOT); - SNAPSHOT_ONLY_FORMAT_PARAMS = new ToXContent.MapParams(snapshotOnlyParams); - } - - /** - * @param blobNameFormat format of the blobname in {@link String#format(Locale, String, Object...)} format - * @param reader the prototype object that can deserialize objects with type T - */ - protected BlobStoreFormat(String blobNameFormat, CheckedFunction reader, - NamedXContentRegistry namedXContentRegistry) { - this.reader = reader; - this.blobNameFormat = blobNameFormat; - this.namedXContentRegistry = namedXContentRegistry; - } - - /** - * Reads and parses the blob with given blob name. - * - * @param blobContainer blob container - * @param blobName blob name - * @return parsed blob object - */ - public abstract T readBlob(BlobContainer blobContainer, String blobName) throws IOException; - - /** - * Reads and parses the blob with given name, applying name translation using the {link #blobName} method - * - * @param blobContainer blob container - * @param name name to be translated into - * @return parsed blob object - */ - public T read(BlobContainer blobContainer, String name) throws IOException { - String blobName = blobName(name); - return readBlob(blobContainer, blobName); - } - - /** - * Deletes obj in the blob container - */ - public void delete(BlobContainer blobContainer, String name) throws IOException { - blobContainer.deleteBlob(blobName(name)); - } - - public String blobName(String name) { - return String.format(Locale.ROOT, blobNameFormat, name); - } - - protected T read(BytesReference bytes) throws IOException { - try (XContentParser parser = XContentHelper - .createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, bytes)) { - return reader.apply(parser); - } - } - -} diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java index d4d009b8ad63e..d216fe3234e83 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.OutputStreamIndexOutput; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.blobstore.BlobContainer; @@ -33,24 +34,43 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.internal.io.Streams; import org.elasticsearch.gateway.CorruptStateException; +import org.elasticsearch.snapshots.SnapshotInfo; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; /** * Snapshot metadata file format used in v2.0 and above */ -public class ChecksumBlobStoreFormat extends BlobStoreFormat { +public final class ChecksumBlobStoreFormat { + + // Serialization parameters to specify correct context for metadata serialization + private static final ToXContent.Params SNAPSHOT_ONLY_FORMAT_PARAMS; + + static { + Map snapshotOnlyParams = new HashMap<>(); + // when metadata is serialized certain elements of the metadata shouldn't be included into snapshot + // exclusion of these elements is done by setting MetaData.CONTEXT_MODE_PARAM to MetaData.CONTEXT_MODE_SNAPSHOT + snapshotOnlyParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_SNAPSHOT); + // serialize SnapshotInfo using the SNAPSHOT mode + snapshotOnlyParams.put(SnapshotInfo.CONTEXT_MODE_PARAM, SnapshotInfo.CONTEXT_MODE_SNAPSHOT); + SNAPSHOT_ONLY_FORMAT_PARAMS = new ToXContent.MapParams(snapshotOnlyParams); + } private static final XContentType DEFAULT_X_CONTENT_TYPE = XContentType.SMILE; @@ -59,12 +79,18 @@ public class ChecksumBlobStoreFormat extends BlobStoreForm private static final int BUFFER_SIZE = 4096; - protected final XContentType xContentType; + private final XContentType xContentType; - protected final boolean compress; + private final boolean compress; private final String codec; + private final String blobNameFormat; + + private final CheckedFunction reader; + + private final NamedXContentRegistry namedXContentRegistry; + /** * @param codec codec name * @param blobNameFormat format of the blobname in {@link String#format} format @@ -74,7 +100,9 @@ public class ChecksumBlobStoreFormat extends BlobStoreForm */ public ChecksumBlobStoreFormat(String codec, String blobNameFormat, CheckedFunction reader, NamedXContentRegistry namedXContentRegistry, boolean compress, XContentType xContentType) { - super(blobNameFormat, reader, namedXContentRegistry); + this.reader = reader; + this.blobNameFormat = blobNameFormat; + this.namedXContentRegistry = namedXContentRegistry; this.xContentType = xContentType; this.compress = compress; this.codec = codec; @@ -91,6 +119,29 @@ public ChecksumBlobStoreFormat(String codec, String blobNameFormat, CheckedFunct this(codec, blobNameFormat, reader, namedXContentRegistry, compress, DEFAULT_X_CONTENT_TYPE); } + /** + * Reads and parses the blob with given name, applying name translation using the {link #blobName} method + * + * @param blobContainer blob container + * @param name name to be translated into + * @return parsed blob object + */ + public T read(BlobContainer blobContainer, String name) throws IOException { + String blobName = blobName(name); + return readBlob(blobContainer, blobName); + } + + /** + * Deletes obj in the blob container + */ + public void delete(BlobContainer blobContainer, String name) throws IOException { + blobContainer.deleteBlob(blobName(name)); + } + + public String blobName(String name) { + return String.format(Locale.ROOT, blobNameFormat, name); + } + /** * Reads blob with specified name without resolving the blobName using using {@link #blobName} method. * @@ -108,8 +159,10 @@ public T readBlob(BlobContainer blobContainer, String blobName) throws IOExcepti CodecUtil.checkHeader(indexInput, codec, VERSION, VERSION); long filePointer = indexInput.getFilePointer(); long contentSize = indexInput.length() - CodecUtil.footerLength() - filePointer; - BytesReference bytesReference = new BytesArray(bytes, (int) filePointer, (int) contentSize); - return read(bytesReference); + try (XContentParser parser = XContentHelper.createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, + new BytesArray(bytes, (int) filePointer, (int) contentSize))) { + return reader.apply(parser); + } } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { // we trick this into a dedicated exception with the original stacktrace throw new CorruptStateException(ex); @@ -156,7 +209,17 @@ public void write(T obj, BlobContainer blobContainer, String name) throws IOExce } private void writeTo(final T obj, final String blobName, final CheckedConsumer consumer) throws IOException { - final BytesReference bytes = write(obj); + final BytesReference bytes; + try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { + if (compress) { + try (StreamOutput compressedStreamOutput = CompressorFactory.COMPRESSOR.streamOutput(bytesStreamOutput)) { + write(obj, compressedStreamOutput); + } + } else { + write(obj, bytesStreamOutput); + } + bytes = bytesStreamOutput.bytes(); + } try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) { final String resourceDesc = "ChecksumBlobStoreFormat.writeBlob(blob=\"" + blobName + "\")"; try (OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput(resourceDesc, blobName, outputStream, BUFFER_SIZE)) { @@ -176,20 +239,7 @@ public void close() { } } - protected BytesReference write(T obj) throws IOException { - try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { - if (compress) { - try (StreamOutput compressedStreamOutput = CompressorFactory.COMPRESSOR.streamOutput(bytesStreamOutput)) { - write(obj, compressedStreamOutput); - } - } else { - write(obj, bytesStreamOutput); - } - return bytesStreamOutput.bytes(); - } - } - - protected void write(T obj, StreamOutput streamOutput) throws IOException { + private void write(T obj, StreamOutput streamOutput) throws IOException { try (XContentBuilder builder = XContentFactory.contentBuilder(xContentType, streamOutput)) { builder.startObject(); obj.toXContent(builder, SNAPSHOT_ONLY_FORMAT_PARAMS); From bb2ec18f672d850c043633f154cc196196995b29 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 21 May 2019 19:13:39 -0400 Subject: [PATCH 011/224] Fix off-by-one error in an index shard test There is an off-by-one error in this test. It leads to the recovery thread never being started, and that means joining on it will wait indefinitely. This commit addresses that by fixing the off-by-one error. Closes #42325 --- .../index/shard/IndexShardTests.java | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 64b0c0db1dc8c..64886af18332a 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1736,7 +1736,6 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { closeShards(shard); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42325") public void testDelayedOperationsBeforeAndAfterRelocated() throws Exception { final IndexShard shard = newStartedShard(true); IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); @@ -1754,12 +1753,11 @@ public void testDelayedOperationsBeforeAndAfterRelocated() throws Exception { recoveryThread.start(); final int numberOfAcquisitions = randomIntBetween(1, 10); - final int recoveryIndex = randomIntBetween(1, numberOfAcquisitions); + final List assertions = new ArrayList<>(numberOfAcquisitions); + final int recoveryIndex = randomIntBetween(0, numberOfAcquisitions - 1); for (int i = 0; i < numberOfAcquisitions; i++) { - final PlainActionFuture onLockAcquired; - final Runnable assertion; if (i < recoveryIndex) { final AtomicBoolean invoked = new AtomicBoolean(); onLockAcquired = new PlainActionFuture<>() { @@ -1777,26 +1775,29 @@ public void onFailure(Exception e) { } }; - assertion = () -> assertTrue(invoked.get()); + assertions.add(() -> assertTrue(invoked.get())); } else if (recoveryIndex == i) { startRecovery.countDown(); relocationStarted.await(); onLockAcquired = new PlainActionFuture<>(); - assertion = () -> { + assertions.add(() -> { final ExecutionException e = expectThrows(ExecutionException.class, () -> onLockAcquired.get(30, TimeUnit.SECONDS)); assertThat(e.getCause(), instanceOf(ShardNotInPrimaryModeException.class)); assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); - }; + }); } else { onLockAcquired = new PlainActionFuture<>(); - assertion = () -> { + assertions.add(() -> { final ExecutionException e = expectThrows(ExecutionException.class, () -> onLockAcquired.get(30, TimeUnit.SECONDS)); assertThat(e.getCause(), instanceOf(ShardNotInPrimaryModeException.class)); assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); - }; + }); } shard.acquirePrimaryOperationPermit(onLockAcquired, ThreadPool.Names.WRITE, "i_" + i); + } + + for (final Runnable assertion : assertions) { assertion.run(); } From c1aef4bd558a84bcab9f7cf485b72d6a5e5aa601 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 21 May 2019 19:45:52 -0400 Subject: [PATCH 012/224] Estimate num history ops should always use translog (#42211) Currently, we ignore soft-deletes in peer recovery, thus estimateNumberOfHistoryOperations should always use translog. Relates #38904 --- .../elasticsearch/index/engine/InternalEngine.java | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index a85c4f981d1b3..24d1078510c0b 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -513,15 +513,8 @@ public Translog.Snapshot readHistoryOperations(String source, MapperService mapp * Returns the estimated number of history operations whose seq# at least the provided seq# in this engine. */ @Override - public int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException { - if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { - try (Translog.Snapshot snapshot = newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), - Long.MAX_VALUE, false)) { - return snapshot.totalOperations(); - } - } else { - return getTranslog().estimateTotalOperationsFromMinSeq(startingSeqNo); - } + public int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) { + return getTranslog().estimateTotalOperationsFromMinSeq(startingSeqNo); } @Override From 57859413eaf1f59357eb6a9875ca0ae51a76bbb3 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 21 May 2019 19:54:46 -0400 Subject: [PATCH 013/224] Skip global checkpoint sync for closed indices (#41874) The verifying-before-close step ensures the global checkpoints on all shard copies are in sync; thus, we don' t need to sync global checkpoints for closed indices. Relate #33888 --- .../elasticsearch/index/shard/IndexShard.java | 4 +-- .../index/shard/IndexShardTests.java | 27 +++++++++++++++++++ .../indices/state/CloseIndexIT.java | 27 +++++++++++++++++++ 3 files changed, 56 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index da5ee8f8363ff..fdd95614756b7 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -2136,8 +2136,8 @@ public void maybeSyncGlobalCheckpoint(final String reason) { StreamSupport .stream(globalCheckpoints.values().spliterator(), false) .anyMatch(v -> v.value < globalCheckpoint); - // only sync if there is a shard lagging the primary - if (syncNeeded) { + // only sync if index is not closed and there is a shard lagging the primary + if (syncNeeded && indexSettings.getIndexMetaData().getState() == IndexMetaData.State.OPEN) { logger.trace("syncing global checkpoint for [{}]", reason); globalCheckpointSyncer.run(); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 64886af18332a..04ef68852cc3f 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1140,6 +1140,33 @@ public void testGlobalCheckpointSync() throws IOException { closeShards(replicaShard, primaryShard); } + public void testClosedIndicesSkipSyncGlobalCheckpoint() throws Exception { + ShardId shardId = new ShardId("index", "_na_", 0); + IndexMetaData.Builder indexMetadata = IndexMetaData.builder("index") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)) + .state(IndexMetaData.State.CLOSE).primaryTerm(0, 1); + ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, randomAlphaOfLength(8), true, + ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE); + AtomicBoolean synced = new AtomicBoolean(); + IndexShard primaryShard = newShard(shardRouting, indexMetadata.build(), null, new InternalEngineFactory(), + () -> synced.set(true), RetentionLeaseSyncer.EMPTY); + recoverShardFromStore(primaryShard); + IndexShard replicaShard = newShard(shardId, false); + recoverReplica(replicaShard, primaryShard, true); + int numDocs = between(1, 10); + for (int i = 0; i < numDocs; i++) { + indexDoc(primaryShard, "_doc", Integer.toString(i)); + } + assertThat(primaryShard.getLocalCheckpoint(), equalTo(numDocs - 1L)); + primaryShard.updateLocalCheckpointForShard(replicaShard.shardRouting.allocationId().getId(), primaryShard.getLocalCheckpoint()); + long globalCheckpointOnReplica = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, primaryShard.getLocalCheckpoint()); + primaryShard.updateGlobalCheckpointForShard(replicaShard.shardRouting.allocationId().getId(), globalCheckpointOnReplica); + primaryShard.maybeSyncGlobalCheckpoint("test"); + assertFalse("closed indices should skip global checkpoint sync", synced.get()); + closeShards(primaryShard, replicaShard); + } + public void testRestoreLocalHistoryFromTranslogOnPromotion() throws IOException, InterruptedException { final IndexShard indexShard = newStartedShard(false); final int operations = 1024 - scaledRandomIntBetween(0, 1024); diff --git a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java index 740034f12ecc5..6f666483b18d0 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -36,7 +36,9 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndexClosedException; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase; @@ -421,6 +423,31 @@ public Settings onNodeStopped(String nodeName) throws Exception { } } + public void testResyncPropagatePrimaryTerm() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(3); + final String indexName = "closed_indices_promotion"; + createIndex(indexName, Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2) + .build()); + indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, randomIntBetween(0, 50)) + .mapToObj(n -> client().prepareIndex(indexName, "_doc").setSource("num", n)).collect(toList())); + ensureGreen(indexName); + assertAcked(client().admin().indices().prepareClose(indexName)); + assertIndexIsClosed(indexName); + ensureGreen(indexName); + String nodeWithPrimary = clusterService().state().nodes().get(clusterService().state() + .routingTable().index(indexName).shard(0).primaryShard().currentNodeId()).getName(); + internalCluster().restartNode(nodeWithPrimary, new InternalTestCluster.RestartCallback()); + ensureGreen(indexName); + long primaryTerm = clusterService().state().metaData().index(indexName).primaryTerm(0); + for (String nodeName : internalCluster().nodesInclude(indexName)) { + IndexShard shard = internalCluster().getInstance(IndicesService.class, nodeName) + .indexService(resolveIndex(indexName)).getShard(0); + assertThat(shard.routingEntry().toString(), shard.getOperationPrimaryTerm(), equalTo(primaryTerm)); + } + } + static void assertIndexIsClosed(final String... indices) { final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); for (String index : indices) { From 75be2a669e1e7e38cfc0e7b55bf99c792fb8925f Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 21 May 2019 22:35:51 -0400 Subject: [PATCH 014/224] Peer recovery should flush at the end (#41660) Flushing at the end of a peer recovery (if needed) can bring these benefits: 1. Closing an index won't end up with the red state for a recovering replica should always be ready for closing whether it performs the verifying-before-close step or not. 2. Good opportunities to compact store (i.e., flushing and merging Lucene, and trimming translog) Closes #40024 Closes #39588 --- .../indices/recovery/RecoveryTarget.java | 10 +++++ .../indices/recovery/IndexRecoveryIT.java | 42 +++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 1f2c9a0f578cc..b3c6d12ab96e3 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -27,6 +27,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; @@ -38,6 +39,7 @@ import org.elasticsearch.index.mapper.MapperException; import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.RetentionLeases; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardNotRecoveringException; import org.elasticsearch.index.shard.IndexShardState; @@ -298,11 +300,19 @@ public void finalizeRecovery(final long globalCheckpoint, ActionListener l // Persist the global checkpoint. indexShard.sync(); indexShard.persistRetentionLeases(); + if (hasUncommittedOperations()) { + indexShard.flush(new FlushRequest().force(true).waitIfOngoing(true)); + } indexShard.finalizeRecovery(); return null; }); } + private boolean hasUncommittedOperations() throws IOException { + long localCheckpointOfCommit = Long.parseLong(indexShard.commitStats().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + return indexShard.estimateNumberOfHistoryOperations("peer-recovery", localCheckpointOfCommit + 1) > 0; + } + @Override public void handoffPrimaryContext(final ReplicationTracker.PrimaryContext primaryContext) { indexShard.activateWithPrimaryContext(primaryContext); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 4196472334ca9..3130cebad7097 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; @@ -52,9 +53,12 @@ import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.recovery.RecoveryStats; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.indices.flush.SyncedFlushUtil; import org.elasticsearch.indices.recovery.RecoveryState.Stage; import org.elasticsearch.node.RecoverySettingsChunkSizePlugin; import org.elasticsearch.plugins.AnalysisPlugin; @@ -84,14 +88,19 @@ import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; import static java.util.Collections.singletonMap; +import static java.util.stream.Collectors.toList; import static org.elasticsearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -910,6 +919,39 @@ public void testDoNotInfinitelyWaitForMapping() { assertHitCount(client().prepareSearch().get(), numDocs); } + public void testRecoveryFlushReplica() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(3); + String indexName = "test-index"; + createIndex(indexName, Settings.builder().put("index.number_of_replicas", 0).put("index.number_of_shards", 1).build()); + int numDocs = randomIntBetween(0, 10); + indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, numDocs) + .mapToObj(n -> client().prepareIndex(indexName, "_doc").setSource("num", n)).collect(toList())); + assertAcked(client().admin().indices().prepareUpdateSettings(indexName) + .setSettings(Settings.builder().put("index.number_of_replicas", 1))); + ensureGreen(indexName); + ShardId shardId = null; + for (ShardStats shardStats : client().admin().indices().prepareStats(indexName).get().getIndex(indexName).getShards()) { + shardId = shardStats.getShardRouting().shardId(); + if (shardStats.getShardRouting().primary() == false) { + assertThat(shardStats.getCommitStats().getNumDocs(), equalTo(numDocs)); + SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit( + shardStats.getCommitStats().getUserData().entrySet()); + assertThat(commitInfo.localCheckpoint, equalTo(shardStats.getSeqNoStats().getLocalCheckpoint())); + assertThat(commitInfo.maxSeqNo, equalTo(shardStats.getSeqNoStats().getMaxSeqNo())); + } + } + SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); + assertBusy(() -> assertThat(client().admin().indices().prepareSyncedFlush(indexName).get().failedShards(), equalTo(0))); + assertAcked(client().admin().indices().prepareUpdateSettings(indexName) + .setSettings(Settings.builder().put("index.number_of_replicas", 2))); + ensureGreen(indexName); + // Recovery should keep syncId if no indexing activity on the primary after synced-flush. + Set syncIds = Stream.of(client().admin().indices().prepareStats(indexName).get().getIndex(indexName).getShards()) + .map(shardStats -> shardStats.getCommitStats().syncId()) + .collect(Collectors.toSet()); + assertThat(syncIds, hasSize(1)); + } + public static final class TestAnalysisPlugin extends Plugin implements AnalysisPlugin { final AtomicBoolean throwParsingError = new AtomicBoolean(); @Override From e5722145a629bd2afb8499f76ac5a5b2c9136ac2 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 21 May 2019 20:35:39 -0700 Subject: [PATCH 015/224] Remove the 6.7 version constants. (#42039) This PR removes all constants of the form `Version.V_6_7_*`, since master no longer needs to account for them. Relates to #41164. --- .../reindex/TransportUpdateByQueryAction.java | 10 ++---- .../main/java/org/elasticsearch/Build.java | 7 +--- .../elasticsearch/ElasticsearchException.java | 8 ++--- .../main/java/org/elasticsearch/Version.java | 12 ------- .../index/engine/ReadOnlyEngine.java | 2 +- .../recovery/RecoverySourceHandler.java | 4 +-- .../java/org/elasticsearch/BuildTests.java | 31 +---------------- .../ExceptionSerializationTests.java | 2 +- .../java/org/elasticsearch/VersionTests.java | 1 - .../action/shard/ShardStateActionTests.java | 6 +--- .../cluster/block/ClusterBlockTests.java | 2 +- .../xpack/core/ccr/AutoFollowStats.java | 25 +++++--------- .../action/PutAutoFollowPatternAction.java | 33 ++----------------- .../core/ccr/action/PutFollowAction.java | 9 ++--- .../deprecation/DeprecationInfoAction.java | 12 ++----- .../xpack/core/ml/MlMetadata.java | 20 +++-------- .../security/authc/TokenServiceTests.java | 12 +++---- 17 files changed, 37 insertions(+), 159 deletions(-) diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java index 5ea175d11a7cb..410ae1b51116d 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.reindex; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilters; @@ -82,18 +81,13 @@ protected void doExecute(Task task, UpdateByQueryRequest request, ActionListener */ static class AsyncIndexBySearchAction extends AbstractAsyncBulkByScrollAction { - private final boolean useSeqNoForCAS; - AsyncIndexBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, ThreadPool threadPool, TransportUpdateByQueryAction action, UpdateByQueryRequest request, ClusterState clusterState, ActionListener listener) { super(task, - // not all nodes support sequence number powered optimistic concurrency control, we fall back to version - clusterState.nodes().getMinNodeVersion().onOrAfter(Version.V_6_7_0) == false, - // all nodes support sequence number powered optimistic concurrency control and we can use it - clusterState.nodes().getMinNodeVersion().onOrAfter(Version.V_6_7_0), + // use sequence number powered optimistic concurrency control + false, true, logger, client, threadPool, action, request, listener); - useSeqNoForCAS = clusterState.nodes().getMinNodeVersion().onOrAfter(Version.V_6_7_0); } @Override diff --git a/server/src/main/java/org/elasticsearch/Build.java b/server/src/main/java/org/elasticsearch/Build.java index 1b1cd8d3e720a..1a1ee2744f77a 100644 --- a/server/src/main/java/org/elasticsearch/Build.java +++ b/server/src/main/java/org/elasticsearch/Build.java @@ -254,12 +254,7 @@ public static void writeBuild(Build build, StreamOutput out) throws IOException out.writeString(build.flavor().displayName()); } if (out.getVersion().onOrAfter(Version.V_6_3_0)) { - final Type buildType; - if (out.getVersion().before(Version.V_6_7_0) && build.type() == Type.DOCKER) { - buildType = Type.TAR; - } else { - buildType = build.type(); - } + final Type buildType = build.type(); out.writeString(buildType.displayName()); } out.writeString(build.shortHash()); diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index a2e53a1189f1b..260b443a6a557 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1010,19 +1010,19 @@ private enum ElasticsearchExceptionHandle { COORDINATION_STATE_REJECTED_EXCEPTION(org.elasticsearch.cluster.coordination.CoordinationStateRejectedException.class, org.elasticsearch.cluster.coordination.CoordinationStateRejectedException::new, 150, Version.V_7_0_0), SNAPSHOT_IN_PROGRESS_EXCEPTION(org.elasticsearch.snapshots.SnapshotInProgressException.class, - org.elasticsearch.snapshots.SnapshotInProgressException::new, 151, Version.V_6_7_0), + org.elasticsearch.snapshots.SnapshotInProgressException::new, 151, UNKNOWN_VERSION_ADDED), NO_SUCH_REMOTE_CLUSTER_EXCEPTION(org.elasticsearch.transport.NoSuchRemoteClusterException.class, - org.elasticsearch.transport.NoSuchRemoteClusterException::new, 152, Version.V_6_7_0), + org.elasticsearch.transport.NoSuchRemoteClusterException::new, 152, UNKNOWN_VERSION_ADDED), RETENTION_LEASE_ALREADY_EXISTS_EXCEPTION( org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException.class, org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException::new, 153, - Version.V_6_7_0), + UNKNOWN_VERSION_ADDED), RETENTION_LEASE_NOT_FOUND_EXCEPTION( org.elasticsearch.index.seqno.RetentionLeaseNotFoundException.class, org.elasticsearch.index.seqno.RetentionLeaseNotFoundException::new, 154, - Version.V_6_7_0), + UNKNOWN_VERSION_ADDED), SHARD_NOT_IN_PRIMARY_MODE_EXCEPTION( org.elasticsearch.index.shard.ShardNotInPrimaryModeException.class, org.elasticsearch.index.shard.ShardNotInPrimaryModeException::new, diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 90b7ae869e811..5089a7fe0cec9 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -78,12 +78,6 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_6_1 = new Version(V_6_6_1_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); public static final int V_6_6_2_ID = 6060299; public static final Version V_6_6_2 = new Version(V_6_6_2_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); - public static final int V_6_7_0_ID = 6070099; - public static final Version V_6_7_0 = new Version(V_6_7_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); - public static final int V_6_7_1_ID = 6070199; - public static final Version V_6_7_1 = new Version(V_6_7_1_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); - public static final int V_6_7_2_ID = 6070299; - public static final Version V_6_7_2 = new Version(V_6_7_2_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_6_8_0_ID = 6080099; public static final Version V_6_8_0 = new Version(V_6_8_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_6_8_1_ID = 6080199; @@ -130,12 +124,6 @@ public static Version fromId(int id) { return V_6_8_1; case V_6_8_0_ID: return V_6_8_0; - case V_6_7_1_ID: - return V_6_7_1; - case V_6_7_2_ID: - return V_6_7_2; - case V_6_7_0_ID: - return V_6_7_0; case V_6_6_2_ID: return V_6_6_2; case V_6_6_1_ID: diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index b981bdb8a8421..e7e0c4d927851 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -131,7 +131,7 @@ protected void ensureMaxSeqNoEqualsToGlobalCheckpoint(final SeqNoStats seqNoStat // that guarantee that all operations have been flushed to Lucene. final Version indexVersionCreated = engineConfig.getIndexSettings().getIndexVersionCreated(); if (indexVersionCreated.onOrAfter(Version.V_7_2_0) || - (seqNoStats.getGlobalCheckpoint() != SequenceNumbers.UNASSIGNED_SEQ_NO && indexVersionCreated.onOrAfter(Version.V_6_7_0))) { + (seqNoStats.getGlobalCheckpoint() != SequenceNumbers.UNASSIGNED_SEQ_NO)) { if (seqNoStats.getMaxSeqNo() != seqNoStats.getGlobalCheckpoint()) { throw new IllegalStateException("Maximum sequence number [" + seqNoStats.getMaxSeqNo() + "] from last commit does not match global checkpoint [" + seqNoStats.getGlobalCheckpoint() + "]"); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index aad460b821e62..4e82798e34128 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -30,7 +30,6 @@ import org.apache.lucene.store.RateLimiter; import org.apache.lucene.util.ArrayUtil; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.StepListener; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; @@ -112,8 +111,7 @@ public RecoverySourceHandler(final IndexShard shard, RecoveryTargetHandler recov this.shardId = this.request.shardId().id(); this.logger = Loggers.getLogger(getClass(), request.shardId(), "recover to " + request.targetNode().getName()); this.chunkSizeInBytes = fileChunkSizeInBytes; - // if the target is on an old version, it won't be able to handle out-of-order file chunks. - this.maxConcurrentFileChunks = request.targetNode().getVersion().onOrAfter(Version.V_6_7_0) ? maxConcurrentFileChunks : 1; + this.maxConcurrentFileChunks = maxConcurrentFileChunks; } public StartRecoveryRequest getRequest() { diff --git a/server/src/test/java/org/elasticsearch/BuildTests.java b/server/src/test/java/org/elasticsearch/BuildTests.java index 59e289b9e98ef..1945c51d1514f 100644 --- a/server/src/test/java/org/elasticsearch/BuildTests.java +++ b/server/src/test/java/org/elasticsearch/BuildTests.java @@ -30,7 +30,6 @@ import java.io.InputStream; import java.net.URL; import java.util.Arrays; -import java.util.List; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; @@ -193,35 +192,7 @@ public void testSerialization() { throw new AssertionError(); }); } - - public void testSerializationBWC() throws IOException { - final WriteableBuild dockerBuild = new WriteableBuild(new Build(randomFrom(Build.Flavor.values()), Build.Type.DOCKER, - randomAlphaOfLength(6), randomAlphaOfLength(6), randomBoolean(), randomAlphaOfLength(6))); - - final List versions = Version.getDeclaredVersions(Version.class); - final Version post63Pre67Version = randomFrom(versions.stream() - .filter(v -> v.onOrAfter(Version.V_6_3_0) && v.before(Version.V_6_7_0)).collect(Collectors.toList())); - final Version post67Pre70Version = randomFrom(versions.stream() - .filter(v -> v.onOrAfter(Version.V_6_7_0) && v.before(Version.V_7_0_0)).collect(Collectors.toList())); - final Version post70Version = randomFrom(versions.stream().filter(v -> v.onOrAfter(Version.V_7_0_0)).collect(Collectors.toList())); - - final WriteableBuild post63pre67 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post63Pre67Version); - final WriteableBuild post67pre70 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post67Pre70Version); - final WriteableBuild post70 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post70Version); - - assertThat(post63pre67.build.flavor(), equalTo(dockerBuild.build.flavor())); - assertThat(post67pre70.build.flavor(), equalTo(dockerBuild.build.flavor())); - assertThat(post70.build.flavor(), equalTo(dockerBuild.build.flavor())); - - assertThat(post63pre67.build.type(), equalTo(Build.Type.TAR)); - assertThat(post67pre70.build.type(), equalTo(dockerBuild.build.type())); - assertThat(post70.build.type(), equalTo(dockerBuild.build.type())); - - assertThat(post63pre67.build.getQualifiedVersion(), equalTo(post63Pre67Version.toString())); - assertThat(post67pre70.build.getQualifiedVersion(), equalTo(post67Pre70Version.toString())); - assertThat(post70.build.getQualifiedVersion(), equalTo(dockerBuild.build.getQualifiedVersion())); - } - + public void testFlavorParsing() { for (final Build.Flavor flavor : Build.Flavor.values()) { // strict or not should not impact parsing at all here diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 5b33068013965..61d8532b5652a 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -888,7 +888,7 @@ public void testShardLockObtainFailedException() throws IOException { public void testSnapshotInProgressException() throws IOException { SnapshotInProgressException orig = new SnapshotInProgressException("boom"); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_7_0, Version.CURRENT); + Version version = VersionUtils.randomIndexCompatibleVersion(random()); SnapshotInProgressException ex = serialize(orig, version); assertEquals(orig.getMessage(), ex.getMessage()); } diff --git a/server/src/test/java/org/elasticsearch/VersionTests.java b/server/src/test/java/org/elasticsearch/VersionTests.java index 21a18e4a26ba5..66d7af0a4b20e 100644 --- a/server/src/test/java/org/elasticsearch/VersionTests.java +++ b/server/src/test/java/org/elasticsearch/VersionTests.java @@ -346,7 +346,6 @@ public static void assertUnknownVersion(Version version) { public void testIsCompatible() { assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion())); assertFalse(isCompatible(Version.V_6_6_0, Version.V_7_0_0)); - assertFalse(isCompatible(Version.V_6_7_0, Version.V_7_0_0)); assertTrue(isCompatible(Version.V_6_8_0, Version.V_7_0_0)); assertFalse(isCompatible(Version.fromId(2000099), Version.V_7_0_0)); assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_5_0)); diff --git a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java index bcd080e0b45fd..7eb995e04f3fa 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java @@ -519,11 +519,7 @@ public void testStartedShardEntrySerialization() throws Exception { final StartedShardEntry deserialized = new StartedShardEntry(in); assertThat(deserialized.shardId, equalTo(shardId)); assertThat(deserialized.allocationId, equalTo(allocationId)); - if (version.onOrAfter(Version.V_6_7_0)) { - assertThat(deserialized.primaryTerm, equalTo(primaryTerm)); - } else { - assertThat(deserialized.primaryTerm, equalTo(0L)); - } + assertThat(deserialized.primaryTerm, equalTo(primaryTerm)); assertThat(deserialized.message, equalTo(message)); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java b/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java index 51a34d94b3a05..8d68684c9c368 100644 --- a/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java @@ -130,7 +130,7 @@ private ClusterBlock randomClusterBlock() { } private ClusterBlock randomClusterBlock(final Version version) { - final String uuid = (version.onOrAfter(Version.V_6_7_0) && randomBoolean()) ? UUIDs.randomBase64UUID() : null; + final String uuid = randomBoolean() ? UUIDs.randomBase64UUID() : null; final List levels = Arrays.asList(ClusterBlockLevel.values()); return new ClusterBlock(randomInt(), uuid, "cluster block #" + randomInt(), randomBoolean(), randomBoolean(), randomBoolean(), randomFrom(RestStatus.values()), copyOf(randomSubsetOf(randomIntBetween(1, levels.size()), levels))); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java index 600bd5fced3ae..6c605ede85e24 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java @@ -116,15 +116,9 @@ public AutoFollowStats(StreamInput in) throws IOException { numberOfFailedFollowIndices = in.readVLong(); numberOfFailedRemoteClusterStateRequests = in.readVLong(); numberOfSuccessfulFollowIndices = in.readVLong(); - if (in.getVersion().onOrAfter(Version.V_6_7_0)) { - // note: the casts to the following Writeable.Reader instances are needed by some IDEs (e.g. Eclipse 4.8) as a compiler help - recentAutoFollowErrors = new TreeMap<>(in.readMap((Writeable.Reader) StreamInput::readString, - (Writeable.Reader>) in1 -> new Tuple<>(in1.readZLong(), in1.readException()))); - } else { - // note: the casts to the following Writeable.Reader instances are needed by some IDEs (e.g. Eclipse 4.8) as a compiler help - recentAutoFollowErrors = new TreeMap<>(in.readMap((Writeable.Reader) StreamInput::readString, - (Writeable.Reader>) in1 -> new Tuple<>(-1L, in1.readException()))); - } + // note: the casts to the following Writeable.Reader instances are needed by some IDEs (e.g. Eclipse 4.8) as a compiler help + recentAutoFollowErrors = new TreeMap<>(in.readMap((Writeable.Reader) StreamInput::readString, + (Writeable.Reader>) in1 -> new Tuple<>(in1.readZLong(), in1.readException()))); if (in.getVersion().onOrAfter(Version.V_6_6_0)) { autoFollowedClusters = new TreeMap<>(in.readMap(StreamInput::readString, AutoFollowedCluster::new)); } else { @@ -137,14 +131,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(numberOfFailedFollowIndices); out.writeVLong(numberOfFailedRemoteClusterStateRequests); out.writeVLong(numberOfSuccessfulFollowIndices); - if (out.getVersion().onOrAfter(Version.V_6_7_0)) { - out.writeMap(recentAutoFollowErrors, StreamOutput::writeString, (out1, value) -> { - out1.writeZLong(value.v1()); - out1.writeException(value.v2()); - }); - } else { - out.writeMap(recentAutoFollowErrors, StreamOutput::writeString, (out1, value) -> out1.writeException(value.v2())); - } + out.writeMap(recentAutoFollowErrors, StreamOutput::writeString, (out1, value) -> { + out1.writeZLong(value.v1()); + out1.writeException(value.v2()); + }); + if (out.getVersion().onOrAfter(Version.V_6_6_0)) { out.writeMap(autoFollowedClusters, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java index ec946ce51e821..f26e8d7f82a51 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java @@ -5,14 +5,12 @@ */ package org.elasticsearch.xpack.core.ccr.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -153,21 +151,7 @@ public Request(StreamInput in) throws IOException { remoteCluster = in.readString(); leaderIndexPatterns = in.readStringList(); followIndexNamePattern = in.readOptionalString(); - if (in.getVersion().onOrAfter(Version.V_6_7_0)) { - parameters = new FollowParameters(in); - } else { - parameters = new FollowParameters(); - parameters.maxReadRequestOperationCount = in.readOptionalVInt(); - parameters.maxReadRequestSize = in.readOptionalWriteable(ByteSizeValue::new); - parameters.maxOutstandingReadRequests = in.readOptionalVInt(); - parameters.maxWriteRequestOperationCount = in.readOptionalVInt(); - parameters.maxWriteRequestSize = in.readOptionalWriteable(ByteSizeValue::new); - parameters.maxOutstandingWriteRequests = in.readOptionalVInt(); - parameters.maxWriteBufferCount = in.readOptionalVInt(); - parameters.maxWriteBufferSize = in.readOptionalWriteable(ByteSizeValue::new); - parameters.maxRetryDelay = in.readOptionalTimeValue(); - parameters.readPollTimeout = in.readOptionalTimeValue(); - } + parameters = new FollowParameters(in); } @Override @@ -177,20 +161,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(remoteCluster); out.writeStringCollection(leaderIndexPatterns); out.writeOptionalString(followIndexNamePattern); - if (out.getVersion().onOrAfter(Version.V_6_7_0)) { - parameters.writeTo(out); - } else { - out.writeOptionalVInt(parameters.maxReadRequestOperationCount); - out.writeOptionalWriteable(parameters.maxReadRequestSize); - out.writeOptionalVInt(parameters.maxOutstandingReadRequests); - out.writeOptionalVInt(parameters.maxWriteRequestOperationCount); - out.writeOptionalWriteable(parameters.maxWriteRequestSize); - out.writeOptionalVInt(parameters.maxOutstandingWriteRequests); - out.writeOptionalVInt(parameters.maxWriteBufferCount); - out.writeOptionalWriteable(parameters.maxWriteBufferSize); - out.writeOptionalTimeValue(parameters.maxRetryDelay); - out.writeOptionalTimeValue(parameters.readPollTimeout); - } + parameters.writeTo(out); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java index 89c18a9824ab4..4d20e6d820de2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.core.ccr.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -169,9 +168,7 @@ public Request(StreamInput in) throws IOException { this.leaderIndex = in.readString(); this.followerIndex = in.readString(); this.parameters = new FollowParameters(in); - if (in.getVersion().onOrAfter(Version.V_6_7_0)) { - waitForActiveShards(ActiveShardCount.readFrom(in)); - } + waitForActiveShards(ActiveShardCount.readFrom(in)); } @Override @@ -181,9 +178,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(leaderIndex); out.writeString(followerIndex); parameters.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_6_7_0)) { - waitForActiveShards.writeTo(out); - } + waitForActiveShards.writeTo(out); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java index 28aa09f6c1efb..54d260e32532f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.deprecation; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -29,7 +28,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -125,11 +123,7 @@ public void readFrom(StreamInput in) throws IOException { clusterSettingsIssues = in.readList(DeprecationIssue::new); nodeSettingsIssues = in.readList(DeprecationIssue::new); indexSettingsIssues = in.readMapOfLists(StreamInput::readString, DeprecationIssue::new); - if (in.getVersion().onOrAfter(Version.V_6_7_0)) { - mlSettingsIssues = in.readList(DeprecationIssue::new); - } else { - mlSettingsIssues = Collections.emptyList(); - } + mlSettingsIssues = in.readList(DeprecationIssue::new); } @Override @@ -138,9 +132,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeList(clusterSettingsIssues); out.writeList(nodeSettingsIssues); out.writeMapOfLists(indexSettingsIssues, StreamOutput::writeString, (o, v) -> v.writeTo(o)); - if (out.getVersion().onOrAfter(Version.V_6_7_0)) { - out.writeList(mlSettingsIssues); - } + out.writeList(mlSettingsIssues); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java index 0f502577195dd..dfe5560da3303 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -138,20 +138,14 @@ public MlMetadata(StreamInput in) throws IOException { } this.datafeeds = datafeeds; this.groupOrJobLookup = new GroupOrJobLookup(jobs.values()); - if (in.getVersion().onOrAfter(Version.V_6_7_0)) { - this.upgradeMode = in.readBoolean(); - } else { - this.upgradeMode = false; - } + this.upgradeMode = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { writeMap(jobs, out); writeMap(datafeeds, out); - if (out.getVersion().onOrAfter(Version.V_6_7_0)) { - out.writeBoolean(upgradeMode); - } + out.writeBoolean(upgradeMode); } private static void writeMap(Map map, StreamOutput out) throws IOException { @@ -202,11 +196,7 @@ public MlMetadataDiff(StreamInput in) throws IOException { MlMetadataDiff::readJobDiffFrom); this.datafeeds = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), DatafeedConfig::new, MlMetadataDiff::readDatafeedDiffFrom); - if (in.getVersion().onOrAfter(Version.V_6_7_0)) { - upgradeMode = in.readBoolean(); - } else { - upgradeMode = false; - } + upgradeMode = in.readBoolean(); } /** @@ -225,9 +215,7 @@ public MetaData.Custom apply(MetaData.Custom part) { public void writeTo(StreamOutput out) throws IOException { jobs.writeTo(out); datafeeds.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_6_7_0)) { - out.writeBoolean(upgradeMode); - } + out.writeBoolean(upgradeMode); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 42101b1f4ec97..49796333098ff 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -137,7 +137,7 @@ public void setupClient() { // tokens docs on a separate index), let's test the TokenService works in a mixed cluster with nodes with versions prior to these // developments if (randomBoolean()) { - oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_0_0)); + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_7_0_0, Version.V_7_1_0)); } } @@ -211,7 +211,7 @@ public void testRotateKey() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); // This test only makes sense in mixed clusters with pre v7.2.0 nodes where the Key is actually used if (null == oldNode) { - oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_7_0_0, Version.V_7_1_0)); } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); @@ -272,7 +272,7 @@ public void testKeyExchange() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); // This test only makes sense in mixed clusters with pre v7.2.0 nodes where the Key is actually used if (null == oldNode) { - oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_7_0_0, Version.V_7_1_0)); } int numRotations = randomIntBetween(1, 5); for (int i = 0; i < numRotations; i++) { @@ -314,7 +314,7 @@ public void testPruneKeys() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); // This test only makes sense in mixed clusters with pre v7.2.0 nodes where the Key is actually used if (null == oldNode) { - oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_7_0_0, Version.V_7_1_0)); } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); @@ -384,7 +384,7 @@ public void testPassphraseWorks() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); // This test only makes sense in mixed clusters with pre v7.1.0 nodes where the Key is actually used if (null == oldNode) { - oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_7_0_0, Version.V_7_1_0)); } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); @@ -418,7 +418,7 @@ public void testGetTokenWhenKeyCacheHasExpired() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); // This test only makes sense in mixed clusters with pre v7.1.0 nodes where the Key is actually used if (null == oldNode) { - oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_7_0_0, Version.V_7_1_0)); } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); From f00716389b974a81317f04fd2d693f05e0965007 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Wed, 22 May 2019 08:20:25 +0300 Subject: [PATCH 016/224] Allow Kibana user to use the OpenID Connect APIs (#42305) Add the manage_oidc privilege to the kibana user and to the role privileges list --- .../elasticsearch/client/security/user/privileges/Role.java | 3 ++- .../xpack/core/security/authz/store/ReservedRolesStore.java | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java index c6dc6910d97b0..a3263e7f6e920 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java @@ -311,6 +311,7 @@ public static class ClusterPrivilegeName { public static final String TRANSPORT_CLIENT = "transport_client"; public static final String MANAGE_SECURITY = "manage_security"; public static final String MANAGE_SAML = "manage_saml"; + public static final String MANAGE_OIDC = "manage_oidc"; public static final String MANAGE_TOKEN = "manage_token"; public static final String MANAGE_PIPELINE = "manage_pipeline"; public static final String MANAGE_CCR = "manage_ccr"; @@ -319,7 +320,7 @@ public static class ClusterPrivilegeName { public static final String READ_ILM = "read_ilm"; public static final String[] ALL_ARRAY = new String[] { NONE, ALL, MONITOR, MONITOR_ML, MONITOR_WATCHER, MONITOR_ROLLUP, MANAGE, MANAGE_ML, MANAGE_WATCHER, MANAGE_ROLLUP, MANAGE_INDEX_TEMPLATES, MANAGE_INGEST_PIPELINES, TRANSPORT_CLIENT, - MANAGE_SECURITY, MANAGE_SAML, MANAGE_TOKEN, MANAGE_PIPELINE, MANAGE_CCR, READ_CCR, MANAGE_ILM, READ_ILM }; + MANAGE_SECURITY, MANAGE_SAML, MANAGE_OIDC, MANAGE_TOKEN, MANAGE_PIPELINE, MANAGE_CCR, READ_CCR, MANAGE_ILM, READ_ILM}; } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index b767b56086159..2c86971b529f9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -109,7 +109,7 @@ private static Map initializeReservedRoles() { null)) .put(KibanaUser.ROLE_NAME, new RoleDescriptor(KibanaUser.ROLE_NAME, new String[] { - "monitor", "manage_index_templates", MonitoringBulkAction.NAME, "manage_saml", "manage_token" + "monitor", "manage_index_templates", MonitoringBulkAction.NAME, "manage_saml", "manage_token", "manage_oidc" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() From 18bff0c76417643ce8ceb7b77042d301dfda00b8 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Wed, 22 May 2019 08:20:51 +0300 Subject: [PATCH 017/224] Ensure SHA256 is not used in tests (#42289) SHA256 was recently added to the Hasher class in order to be used in the TokenService. A few tests were still using values() to get the available algorithms from the Enum and it could happen that SHA256 would be picked up by these. This change adds an extra convenience method (Hasher#getAvailableAlgoCacheHash) and enures that only this and Hasher#getAvailableAlgoStoredHash are used for getting the list of available password hashing algorithms in our tests. --- x-pack/plugin/core/build.gradle | 4 ++++ x-pack/plugin/core/forbidden/hasher-signatures.txt | 2 ++ .../xpack/core/security/authc/support/Hasher.java | 14 ++++++++++++++ .../xpack/security/authc/RealmSettingsTests.java | 4 +--- .../xpack/security/authc/file/FileRealmTests.java | 3 +-- .../support/CachingUsernamePasswordRealmTests.java | 3 +-- 6 files changed, 23 insertions(+), 7 deletions(-) create mode 100644 x-pack/plugin/core/forbidden/hasher-signatures.txt diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index c20449724f8e0..d805a491e093a 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -95,6 +95,10 @@ forbiddenPatterns { exclude '**/*.zip' } +forbiddenApisMain { + signaturesFiles += files('forbidden/hasher-signatures.txt') +} + if (isEclipse) { // in eclipse the project is under a fake root, we need to change around the source sets sourceSets { diff --git a/x-pack/plugin/core/forbidden/hasher-signatures.txt b/x-pack/plugin/core/forbidden/hasher-signatures.txt new file mode 100644 index 0000000000000..98271161096a7 --- /dev/null +++ b/x-pack/plugin/core/forbidden/hasher-signatures.txt @@ -0,0 +1,2 @@ +@defaultMessage values should not be used as it can contain unwanted algorithms. Use Hasher#getAvailableAlgoStoredHash and Hasher#getAvailableAlgoCacheHash instead +org.elasticsearch.xpack.core.security.authc.support.Hasher#values() \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java index 28f263748135f..5413a38bd6288 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java @@ -7,6 +7,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.CharArrays; +import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.settings.SecureString; @@ -565,12 +566,25 @@ private static boolean verifyBcryptHash(SecureString text, char[] hash) { * combinations that can be used for password hashing. The identifiers can be used to get * an instance of the appropriate {@link Hasher} by using {@link #resolve(String) resolve()} */ + @SuppressForbidden(reason = "This is the only allowed way to get available values") public static List getAvailableAlgoStoredHash() { return Arrays.stream(Hasher.values()).map(Hasher::name).map(name -> name.toLowerCase(Locale.ROOT)) .filter(name -> (name.startsWith("pbkdf2") || name.startsWith("bcrypt"))) .collect(Collectors.toList()); } + /** + * Returns a list of lower case String identifiers for the Hashing algorithm and parameter + * combinations that can be used for password hashing in the cache. The identifiers can be used to get + * an instance of the appropriate {@link Hasher} by using {@link #resolve(String) resolve()} + */ + @SuppressForbidden(reason = "This is the only allowed way to get available values") + public static List getAvailableAlgoCacheHash() { + return Arrays.stream(Hasher.values()).map(Hasher::name).map(name -> name.toLowerCase(Locale.ROOT)) + .filter(name -> (name.equals("sha256") == false)) + .collect(Collectors.toList()); + } + public abstract char[] hash(SecureString data); public abstract boolean verify(SecureString data, char[] hash); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java index 7d7fd135349b1..eb33408f338c6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java @@ -17,18 +17,16 @@ import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.support.Hasher; -import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.notNullValue; public class RealmSettingsTests extends ESTestCase { - private static final List CACHE_HASHING_ALGOS = Arrays.stream(Hasher.values()).map(Hasher::name).collect(Collectors.toList()); + private static final List CACHE_HASHING_ALGOS = Hasher.getAvailableAlgoCacheHash(); public void testRealmWithBlankTypeDoesNotValidate() throws Exception { final Settings.Builder builder = baseSettings(false); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java index 168f608951e09..67ab33bac7380 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java @@ -22,7 +22,6 @@ import org.junit.Before; import org.mockito.stubbing.Answer; -import java.util.Locale; import java.util.Map; import java.util.function.Supplier; @@ -94,7 +93,7 @@ private RealmConfig getRealmConfig(Settings settings) { public void testAuthenticateCaching() throws Exception { Settings settings = Settings.builder() .put(RealmSettings.realmSettingPrefix(REALM_IDENTIFIER) + "cache.hash_algo", - Hasher.values()[randomIntBetween(0, Hasher.values().length - 1)].name().toLowerCase(Locale.ROOT)) + randomFrom(Hasher.getAvailableAlgoCacheHash())) .put(globalSettings) .build(); RealmConfig config = getRealmConfig(settings); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java index 8b30cb85fed78..49f0d45966639 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java @@ -30,7 +30,6 @@ import java.util.ArrayList; import java.util.List; -import java.util.Locale; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -66,7 +65,7 @@ public void stop() { @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/42267") public void testCacheSettings() { - String cachingHashAlgo = Hasher.values()[randomIntBetween(0, Hasher.values().length - 1)].name().toLowerCase(Locale.ROOT); + String cachingHashAlgo = randomFrom(Hasher.getAvailableAlgoCacheHash()); int maxUsers = randomIntBetween(10, 100); TimeValue ttl = TimeValue.timeValueMinutes(randomIntBetween(10, 20)); final RealmConfig.RealmIdentifier identifier = new RealmConfig.RealmIdentifier("caching", "test_realm"); From db0fbf01cb19034060a148583f525d1a3b4ced9f Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 22 May 2019 07:36:44 +0200 Subject: [PATCH 018/224] Add Package Level Documentation to o.e.r.blobstore (#42101) * Add Package Level Documentation to o.e.r.blobstore * Added verbose documentation for the `o.e.r.blobstore` package similar to that added for the snapshot package in https://github.com/elastic/elasticsearch/pull/38108 * Moved the documentation on the BlobStoreRepository to the package level to have things in a single place for easier readability. --- .../blobstore/BlobStoreRepository.java | 42 +--- .../repositories/blobstore/package-info.java | 208 ++++++++++++++++++ 2 files changed, 211 insertions(+), 39 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/repositories/blobstore/package-info.java diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 5ed73a0058cc5..320b7ff2d5550 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -121,45 +121,9 @@ *

* This repository works with any {@link BlobStore} implementation. The blobStore could be (and preferred) lazy initialized in * {@link #createBlobStore()}. - *

- * BlobStoreRepository maintains the following structure in the blob store - *

- * {@code
- *   STORE_ROOT
- *   |- index-N           - JSON serialized {@link RepositoryData} containing a list of all snapshot ids and the indices belonging to
- *   |                      each snapshot, N is the generation of the file
- *   |- index.latest      - contains the numeric value of the latest generation of the index file (i.e. N from above)
- *   |- incompatible-snapshots - list of all snapshot ids that are no longer compatible with the current version of the cluster
- *   |- snap-20131010.dat - SMILE serialized {@link SnapshotInfo} for snapshot "20131010"
- *   |- meta-20131010.dat - SMILE serialized {@link MetaData} for snapshot "20131010" (includes only global metadata)
- *   |- snap-20131011.dat - SMILE serialized {@link SnapshotInfo} for snapshot "20131011"
- *   |- meta-20131011.dat - SMILE serialized {@link MetaData} for snapshot "20131011"
- *   .....
- *   |- indices/ - data for all indices
- *      |- Ac1342-B_x/ - data for index "foo" which was assigned the unique id of Ac1342-B_x in the repository
- *      |  |- meta-20131010.dat - JSON Serialized {@link IndexMetaData} for index "foo"
- *      |  |- 0/ - data for shard "0" of index "foo"
- *      |  |  |- __1                      \  (files with numeric names were created by older ES versions)
- *      |  |  |- __2                      |
- *      |  |  |- __VPO5oDMVT5y4Akv8T_AO_A |- files from different segments see snap-* for their mappings to real segment files
- *      |  |  |- __1gbJy18wS_2kv1qI7FgKuQ |
- *      |  |  |- __R8JvZAHlSMyMXyZc2SS8Zg /
- *      |  |  .....
- *      |  |  |- snap-20131010.dat - SMILE serialized {@link BlobStoreIndexShardSnapshot} for snapshot "20131010"
- *      |  |  |- snap-20131011.dat - SMILE serialized {@link BlobStoreIndexShardSnapshot} for snapshot "20131011"
- *      |  |  |- index-123 - SMILE serialized {@link BlobStoreIndexShardSnapshots} for the shard
- *      |  |
- *      |  |- 1/ - data for shard "1" of index "foo"
- *      |  |  |- __1
- *      |  |  .....
- *      |  |
- *      |  |-2/
- *      |  ......
- *      |
- *      |- 1xB0D8_B3y/ - data for index "bar" which was assigned the unique id of 1xB0D8_B3y in the repository
- *      ......
- * }
- * 
+ *

+ * For in depth documentation on how exactly implementations of this class interact with the snapshot functionality please refer to the + * documentation of the package {@link org.elasticsearch.repositories.blobstore}. */ public abstract class BlobStoreRepository extends AbstractLifecycleComponent implements Repository { private static final Logger logger = LogManager.getLogger(BlobStoreRepository.class); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/package-info.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/package-info.java new file mode 100644 index 0000000000000..9d6d72f0458c9 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/package-info.java @@ -0,0 +1,208 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + *

This package exposes the blobstore repository used by Elasticsearch Snapshots.

+ * + *

Preliminaries

+ * + *

The {@link org.elasticsearch.repositories.blobstore.BlobStoreRepository} forms the basis of implementations of + * {@link org.elasticsearch.repositories.Repository} on top of a blob store. A blobstore can be used as the basis for an implementation + * as long as it provides for GET, PUT, DELETE, and LIST operations. For a read-only repository, it suffices if the blobstore provides only + * GET operations. + * These operations are formally defined as specified by the {@link org.elasticsearch.common.blobstore.BlobContainer} interface that + * any {@code BlobStoreRepository} implementation must provide via its implementation of + * {@link org.elasticsearch.repositories.blobstore.BlobStoreRepository#getBlobContainer()}.

+ * + *

The blob store is written to and read from by master-eligible nodes and data nodes. All metadata related to a snapshot's + * scope and health is written by the master node.

+ *

The data-nodes on the other hand, write the data for each individual shard but do not write any blobs outside of shard directories for + * shards that they hold the primary of. For each shard, the data-node holding the shard's primary writes the actual data in form of + * the shard's segment files to the repository as well as metadata about all the segment files that the repository stores for the shard.

+ * + *

For the specifics on how the operations on the repository documented below are invoked during the snapshot process please refer to + * the documentation of the {@link org.elasticsearch.snapshots} package.

+ * + *

{@code BlobStoreRepository} maintains the following structure of blobs containing data and metadata in the blob store. The exact + * operations executed on these blobs are explained below.

+ *
+ * {@code
+ *   STORE_ROOT
+ *   |- index-N           - JSON serialized {@link org.elasticsearch.repositories.RepositoryData} containing a list of all snapshot ids
+ *   |                      and the indices belonging to each snapshot, N is the generation of the file
+ *   |- index.latest      - contains the numeric value of the latest generation of the index file (i.e. N from above)
+ *   |- incompatible-snapshots - list of all snapshot ids that are no longer compatible with the current version of the cluster
+ *   |- snap-20131010.dat - SMILE serialized {@link org.elasticsearch.snapshots.SnapshotInfo} for snapshot "20131010"
+ *   |- meta-20131010.dat - SMILE serialized {@link org.elasticsearch.cluster.metadata.MetaData} for snapshot "20131010"
+ *   |                      (includes only global metadata)
+ *   |- snap-20131011.dat - SMILE serialized {@link org.elasticsearch.snapshots.SnapshotInfo} for snapshot "20131011"
+ *   |- meta-20131011.dat - SMILE serialized {@link org.elasticsearch.cluster.metadata.MetaData} for snapshot "20131011"
+ *   .....
+ *   |- indices/ - data for all indices
+ *      |- Ac1342-B_x/ - data for index "foo" which was assigned the unique id Ac1342-B_x (not to be confused with the actual index uuid)
+ *      |  |             in the repository
+ *      |  |- meta-20131010.dat - JSON Serialized {@link org.elasticsearch.cluster.metadata.IndexMetaData} for index "foo"
+ *      |  |- 0/ - data for shard "0" of index "foo"
+ *      |  |  |- __1                      \  (files with numeric names were created by older ES versions)
+ *      |  |  |- __2                      |
+ *      |  |  |- __VPO5oDMVT5y4Akv8T_AO_A |- files from different segments see snap-* for their mappings to real segment files
+ *      |  |  |- __1gbJy18wS_2kv1qI7FgKuQ |
+ *      |  |  |- __R8JvZAHlSMyMXyZc2SS8Zg /
+ *      |  |  .....
+ *      |  |  |- snap-20131010.dat - SMILE serialized {@link org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot} for
+ *      |  |  |                      snapshot "20131010"
+ *      |  |  |- snap-20131011.dat - SMILE serialized {@link org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot} for
+ *      |  |  |                      snapshot "20131011"
+ *      |  |  |- index-123         - SMILE serialized {@link org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots} for
+ *      |  |  |                      the shard
+ *      |  |
+ *      |  |- 1/ - data for shard "1" of index "foo"
+ *      |  |  |- __1
+ *      |  |  .....
+ *      |  |
+ *      |  |-2/
+ *      |  ......
+ *      |
+ *      |- 1xB0D8_B3y/ - data for index "bar" which was assigned the unique id of 1xB0D8_B3y in the repository
+ *      ......
+ * }
+ * 
+ * + *

Getting the Repository's RepositoryData

+ * + *

Loading the {@link org.elasticsearch.repositories.RepositoryData} that holds the list of all snapshots as well as the mapping of + * indices' names to their repository {@link org.elasticsearch.repositories.IndexId} is done by invoking + * {@link org.elasticsearch.repositories.blobstore.BlobStoreRepository#getRepositoryData} and implemented as follows:

+ *
    + *
  1. + *
      + *
    1. The blobstore repository stores the {@code RepositoryData} in blobs named with incrementing suffix {@code N} at {@code /index-N} + * directly under the repository's root.
    2. + *
    3. The blobstore also stores the most recent {@code N} as a 64bit long in the blob {@code /index.latest} directly under the + * repository's root.
    4. + *
    + *
  2. + *
  3. + *
      + *
    1. First, find the most recent {@code RepositoryData} by getting a list of all index-N blobs through listing all blobs with prefix + * "index-" under the repository root and then selecting the one with the highest value for N.
    2. + *
    3. If this operation fails because the repository's {@code BlobContainer} does not support list operations (in the case of read-only + * repositories), read the highest value of N from the the index.latest blob.
    4. + *
    + *
  4. + *
  5. + *
      + *
    1. Use the just determined value of {@code N} and get the {@code /index-N} blob and deserialize the {@code RepositoryData} from it.
    2. + *
    3. If no value of {@code N} could be found since neither an {@code index.latest} nor any {@code index-N} blobs exist in the repository, + * it is assumed to be empty and {@link org.elasticsearch.repositories.RepositoryData#EMPTY} is returned.
    4. + *
    + *
  6. + *
+ *

Creating a Snapshot

+ * + *

Creating a snapshot in the repository happens in the three steps described in detail below.

+ * + *

Initializing a Snapshot in the Repository

+ * + *

Creating a snapshot in the repository starts with a call to {@link org.elasticsearch.repositories.Repository#initializeSnapshot} which + * the blob store repository implements via the following actions:

+ *
    + *
  1. Verify that no snapshot by the requested name exists.
  2. + *
  3. Write a blob containing the cluster metadata to the root of the blob store repository at {@code /meta-${snapshot-uuid}.dat}
  4. + *
  5. Write the metadata for each index to a blob in that index's directory at + * {@code /indices/${index-snapshot-uuid}/meta-${snapshot-uuid}.dat}
  6. + *
+ * TODO: This behavior is problematic, adjust these docs once https://github.com/elastic/elasticsearch/issues/41581 is fixed + * + *

Writing Shard Data (Segments)

+ * + *

Once all the metadata has been written by the snapshot initialization, the snapshot process moves on to writing the actual shard data + * to the repository by invoking {@link org.elasticsearch.repositories.Repository#snapshotShard} on the data-nodes that hold the primaries + * for the shards in the current snapshot. It is implemented as follows:

+ * + *

Note:

+ *
    + *
  • For each shard {@code i} in a given index, its path in the blob store is located at {@code /indices/${index-snapshot-uuid}/${i}}
  • + *
  • All the following steps are executed exclusively on the shard's primary's data node.
  • + *
+ * + *
    + *
  1. Create the {@link org.apache.lucene.index.IndexCommit} for the shard to snapshot.
  2. + *
  3. List all blobs in the shard's path. Find the {@link org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots} blob + * with name {@code index-${N}} for the highest possible value of {@code N} in the list to get the information of what segment files are + * already available in the blobstore.
  4. + *
  5. By comparing the files in the {@code IndexCommit} and the available file list from the previous step, determine the segment files + * that need to be written to the blob store. For each segment that needs to be added to the blob store, generate a unique name by combining + * the segment data blob prefix {@code __} and a UUID and write the segment to the blobstore.
  6. + *
  7. After completing all segment writes, a blob containing a + * {@link org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot} with name {@code snap-${snapshot-uuid}.dat} is written to + * the shard's path and contains a list of all the files referenced by the snapshot as well as some metadata about the snapshot. See the + * documentation of {@code BlobStoreIndexShardSnapshot} for details on its contents.
  8. + *
  9. Once all the segments and the {@code BlobStoreIndexShardSnapshot} blob have been written, an updated + * {@code BlobStoreIndexShardSnapshots} blob is written to the shard's path with name {@code index-${N+1}}.
  10. + *
+ * + *

Finalizing the Snapshot

+ * + *

After all primaries have finished writing the necessary segment files to the blob store in the previous step, the master node moves on + * to finalizing the snapshot by invoking {@link org.elasticsearch.repositories.Repository#finalizeSnapshot}. This method executes the + * following actions in order:

+ *
    + *
  1. Write the {@link org.elasticsearch.snapshots.SnapshotInfo} blob for the given snapshot to the key {@code /snap-${snapshot-uuid}.dat} + * directly under the repository root.
  2. + *
  3. Write an updated {@code RepositoryData} blob to the key {@code /index-${N+1}} using the {@code N} determined when initializing the + * snapshot in the first step. When doing this, the implementation checks that the blob for generation {@code N + 1} has not yet been + * written to prevent concurrent updates to the repository. If the blob for {@code N + 1} already exists the execution of finalization + * stops under the assumption that a master failover occurred and the snapshot has already been finalized by the new master.
  4. + *
  5. Write the updated {@code /index.latest} blob containing the new repository generation {@code N + 1}.
  6. + *
+ * + *

Deleting a Snapshot

+ * + *

Deleting a snapshot is an operation that is exclusively executed on the master node that runs through the following sequence of + * action when {@link org.elasticsearch.repositories.blobstore.BlobStoreRepository#deleteSnapshot} is invoked:

+ * + *
    + *
  1. Get the current {@code RepositoryData} from the latest {@code index-N} blob at the repository root.
  2. + *
  3. Write an updated {@code RepositoryData} blob with the deleted snapshot removed to key {@code /index-${N+1}} directly under the + * repository root.
  4. + *
  5. Write an updated {@code index.latest} blob containing {@code N + 1}.
  6. + *
  7. Delete the global {@code MetaData} blob {@code meta-${snapshot-uuid}.dat} stored directly under the repository root for the snapshot + * as well as the {@code SnapshotInfo} blob at {@code /snap-${snapshot-uuid}.dat}.
  8. + *
  9. For each index referenced by the snapshot: + *
      + *
    1. Delete the snapshot's {@code IndexMetaData} at {@code /indices/${index-snapshot-uuid}/meta-${snapshot-uuid}}.
    2. + *
    3. Go through all shard directories {@code /indices/${index-snapshot-uuid}/${i}} and: + *
        + *
      1. Remove the {@code BlobStoreIndexShardSnapshot} blob at {@code /indices/${index-snapshot-uuid}/${i}/snap-${snapshot-uuid}.dat}.
      2. + *
      3. List all blobs in the shard path {@code /indices/${index-snapshot-uuid}} and build a new {@code BlobStoreIndexShardSnapshots} from + * the remaining {@code BlobStoreIndexShardSnapshot} blobs in the shard. Afterwards, write it to the next shard generation blob at + * {@code /indices/${index-snapshot-uuid}/${i}/index-${N+1}} (The shard's generation is determined from the list of {@code index-N} blobs + * in the shard directory).
      4. + *
      5. Delete all segment blobs (identified by having the data blob prefix {@code __}) in the shard directory which are not referenced by + * the new {@code BlobStoreIndexShardSnapshots} that has been written in the previous step.
      6. + *
      + *
    4. + *
    + *
  10. + *
+ * TODO: The above sequence of actions can lead to leaking files when an index completely goes out of scope. Adjust this documentation once + * https://github.com/elastic/elasticsearch/issues/13159 is fixed. + */ +package org.elasticsearch.repositories.blobstore; From 9a152ee5fa058ed72952513ab78064dc71e6d32e Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Wed, 22 May 2019 08:12:58 +0200 Subject: [PATCH 019/224] move latch await to doNextSearch (#42275) move latch await to doNextSearch, fixes a race condition when the executor thread is faster than the coordinator thread fixes #42084 --- .../xpack/core/indexing/AsyncTwoPhaseIndexerTests.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index 27fba82338a1c..4249d7c61d0ad 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -64,7 +64,7 @@ protected String getJobId() { @Override protected IterationResult doProcess(SearchResponse searchResponse) { - awaitForLatch(); + assertFalse("should not be called as stoppedBeforeFinished is false", stoppedBeforeFinished); assertThat(step, equalTo(3)); ++step; return new IterationResult<>(Collections.emptyList(), 3, true); @@ -99,6 +99,9 @@ protected void doNextSearch(SearchRequest request, ActionListener state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); @@ -265,7 +267,6 @@ public void testStateMachineBrokenSearch() throws InterruptedException { } } - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42084") public void testStop_AfterIndexerIsFinished() throws InterruptedException { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); @@ -285,7 +286,6 @@ public void testStop_AfterIndexerIsFinished() throws InterruptedException { } } - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42084") public void testStop_WhileIndexing() throws InterruptedException { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); From 610230f8ca34e4aeef95dcc92a0f4de5b9c33ba6 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Wed, 22 May 2019 08:13:32 +0200 Subject: [PATCH 020/224] [ML-DataFrame] validate group name to not contain invalid characters (#42292) disallows of creating groupBy field with '[', ']', '>' in the name to be consistent with aggregations --- .../transforms/pivot/GroupConfig.java | 8 +++++ .../transforms/pivot/GroupConfigTests.java | 32 +++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfig.java index 807c2e8d339dd..532477c44bdf4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfig.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; @@ -29,6 +30,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.regex.Matcher; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; @@ -135,6 +137,7 @@ public static GroupConfig fromXContent(final XContentParser parser, boolean leni private static Map parseGroupConfig(final XContentParser parser, boolean lenient) throws IOException { + Matcher validAggMatcher = AggregatorFactories.VALID_AGG_NAME.matcher(""); LinkedHashMap groups = new LinkedHashMap<>(); // be parsing friendly, whether the token needs to be advanced or not (similar to what ObjectParser does) @@ -150,6 +153,11 @@ private static Map parseGroupConfig(final XContentPar ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation); String destinationFieldName = parser.currentName(); + if (validAggMatcher.reset(destinationFieldName).matches() == false) { + throw new ParsingException(parser.getTokenLocation(), "Invalid group name [" + destinationFieldName + + "]. Group names can contain any character except '[', ']', and '>'"); + } + token = parser.nextToken(); ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation); token = parser.nextToken(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfigTests.java index f7b9552584221..11dfc55264a21 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfigTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.ToXContent; @@ -27,6 +28,9 @@ public class GroupConfigTests extends AbstractSerializingTestCase { + // array of illegal characters, see {@link AggregatorFactories#VALID_AGG_NAME} + private static final char[] ILLEGAL_FIELD_NAME_CHARACTERS = {'[', ']', '>'}; + public static GroupConfig randomGroupConfig() { Map source = new LinkedHashMap<>(); Map groups = new LinkedHashMap<>(); @@ -88,6 +92,34 @@ public void testEmptyGroupBy() throws IOException { } } + public void testInvalidGroupByNames() throws IOException { + + String invalidName = randomAlphaOfLengthBetween(0, 5) + + ILLEGAL_FIELD_NAME_CHARACTERS[randomIntBetween(0, ILLEGAL_FIELD_NAME_CHARACTERS.length - 1)] + + randomAlphaOfLengthBetween(0, 5); + + XContentBuilder source = JsonXContent.contentBuilder() + .startObject() + .startObject(invalidName) + .startObject("terms") + .field("field", "user") + .endObject() + .endObject() + .endObject(); + + // lenient, passes but reports invalid + try (XContentParser parser = createParser(source)) { + GroupConfig groupConfig = GroupConfig.fromXContent(parser, true); + assertFalse(groupConfig.isValid()); + } + + // strict throws + try (XContentParser parser = createParser(source)) { + Exception e = expectThrows(ParsingException.class, () -> GroupConfig.fromXContent(parser, false)); + assertTrue(e.getMessage().startsWith("Invalid group name")); + } + } + private static Map getSource(SingleGroupSource groupSource) { try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) { XContentBuilder content = groupSource.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); From fccb7a2820147a76fdb69b9fc6817c6bd6aa5833 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Wed, 22 May 2019 10:32:30 +0300 Subject: [PATCH 021/224] [ML] Include node name when native controller cannot start process (#42225) This adds the node name where we fail to start a process via the native controller to facilitate debugging as otherwise it might not be known to which node the job was allocated. --- .../elasticsearch/xpack/ml/MachineLearning.java | 2 +- .../xpack/ml/MachineLearningFeatureSet.java | 3 ++- .../elasticsearch/xpack/ml/MlLifeCycleService.java | 4 +++- .../xpack/ml/process/NativeController.java | 14 +++++++++----- .../xpack/ml/process/NativeControllerHolder.java | 4 ++-- .../xpack/ml/process/NativeControllerTests.java | 11 +++++++---- 6 files changed, 24 insertions(+), 14 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index de945b9bc6c3d..f679170bc673d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -437,7 +437,7 @@ public Collection createComponents(Client client, ClusterService cluster NormalizerProcessFactory normalizerProcessFactory; if (MachineLearningField.AUTODETECT_PROCESS.get(settings) && MachineLearningFeatureSet.isRunningOnMlPlatform(true)) { try { - NativeController nativeController = NativeControllerHolder.getNativeController(environment); + NativeController nativeController = NativeControllerHolder.getNativeController(clusterService.getNodeName(), environment); if (nativeController == null) { // This will only only happen when path.home is not set, which is disallowed in production throw new ElasticsearchException("Failed to create native process controller for Machine Learning"); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java index c913babbaa405..bcfab50c21e00 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java @@ -79,7 +79,8 @@ public MachineLearningFeatureSet(Environment environment, ClusterService cluster if (enabled && XPackPlugin.transportClientMode(environment.settings()) == false) { try { if (isRunningOnMlPlatform(true)) { - NativeController nativeController = NativeControllerHolder.getNativeController(environment); + NativeController nativeController = NativeControllerHolder.getNativeController(clusterService.getNodeName(), + environment); if (nativeController != null) { nativeCodeInfo = nativeController.getNativeCodeInfo(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java index 06d9b749e1a89..7309afa6b3ab4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java @@ -19,6 +19,7 @@ public class MlLifeCycleService { private final Environment environment; + private final ClusterService clusterService; private final DatafeedManager datafeedManager; private final AutodetectProcessManager autodetectProcessManager; private final MlMemoryTracker memoryTracker; @@ -26,6 +27,7 @@ public class MlLifeCycleService { public MlLifeCycleService(Environment environment, ClusterService clusterService, DatafeedManager datafeedManager, AutodetectProcessManager autodetectProcessManager, MlMemoryTracker memoryTracker) { this.environment = environment; + this.clusterService = clusterService; this.datafeedManager = datafeedManager; this.autodetectProcessManager = autodetectProcessManager; this.memoryTracker = memoryTracker; @@ -46,7 +48,7 @@ public synchronized void stop() { if (datafeedManager != null) { datafeedManager.isolateAllDatafeedsOnThisNodeBeforeShutdown(); } - NativeController nativeController = NativeControllerHolder.getNativeController(environment); + NativeController nativeController = NativeControllerHolder.getNativeController(clusterService.getNodeName(), environment); if (nativeController != null) { // This kills autodetect processes WITHOUT closing the jobs, so they get reallocated. if (autodetectProcessManager != null) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeController.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeController.java index 2dc86825a1209..5dfa86ad22583 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeController.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeController.java @@ -43,15 +43,17 @@ public class NativeController { public static final Map UNKNOWN_NATIVE_CODE_INFO = Map.of("version", "N/A", "build_hash", "N/A"); + private final String localNodeName; private final CppLogMessageHandler cppLogHandler; private final OutputStream commandStream; - NativeController(Environment env, NamedPipeHelper namedPipeHelper) throws IOException { + NativeController(String localNodeName, Environment env, NamedPipeHelper namedPipeHelper) throws IOException { ProcessPipes processPipes = new ProcessPipes(env, namedPipeHelper, CONTROLLER, null, true, true, false, false, false, false); processPipes.connectStreams(CONTROLLER_CONNECT_TIMEOUT); - cppLogHandler = new CppLogMessageHandler(null, processPipes.getLogStream().get()); - commandStream = new BufferedOutputStream(processPipes.getCommandStream().get()); + this.localNodeName = localNodeName; + this.cppLogHandler = new CppLogMessageHandler(null, processPipes.getLogStream().get()); + this.commandStream = new BufferedOutputStream(processPipes.getCommandStream().get()); } void tailLogsInThread() { @@ -98,7 +100,8 @@ public void startProcess(List command) throws IOException { } if (cppLogHandler.hasLogStreamEnded()) { - String msg = "Cannot start process [" + command.get(0) + "]: native controller process has stopped"; + String msg = "Cannot start process [" + command.get(0) + "]: native controller process has stopped on node [" + + localNodeName + "]"; LOGGER.error(msg); throw new ElasticsearchException(msg); } @@ -124,7 +127,8 @@ public void killProcess(long pid) throws TimeoutException, IOException { } if (cppLogHandler.hasLogStreamEnded()) { - String msg = "Cannot kill process with PID [" + pid + "]: native controller process has stopped"; + String msg = "Cannot kill process with PID [" + pid + "]: native controller process has stopped on node [" + + localNodeName + "]"; LOGGER.error(msg); throw new ElasticsearchException(msg); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeControllerHolder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeControllerHolder.java index 67e24b44a8494..5365a11f6b560 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeControllerHolder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeControllerHolder.java @@ -32,12 +32,12 @@ private NativeControllerHolder() { * * Calls may throw an exception if initial connection to the C++ process fails. */ - public static NativeController getNativeController(Environment environment) throws IOException { + public static NativeController getNativeController(String localNodeName, Environment environment) throws IOException { if (MachineLearningField.AUTODETECT_PROCESS.get(environment.settings())) { synchronized (lock) { if (nativeController == null) { - nativeController = new NativeController(environment, new NamedPipeHelper()); + nativeController = new NativeController(localNodeName, environment, new NamedPipeHelper()); nativeController.tailLogsInThread(); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeControllerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeControllerTests.java index ac00e8a24e1cf..c799f14235920 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeControllerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeControllerTests.java @@ -30,6 +30,8 @@ public class NativeControllerTests extends ESTestCase { + private static final String NODE_NAME = "native-controller-tests-node"; + private static final String TEST_MESSAGE = "{\"logger\":\"controller\",\"timestamp\":1478261151445,\"level\":\"INFO\",\"pid\":10211," + "\"thread\":\"0x7fff7d2a8000\",\"message\":\"controller (64 bit): Version 6.0.0-alpha1-SNAPSHOT (Build a0d6ef8819418c) " + "Copyright (c) 2017 Elasticsearch BV\",\"method\":\"main\",\"file\":\"Main.cc\",\"line\":123}\n"; @@ -50,7 +52,7 @@ public void testStartProcessCommand() throws IOException { command.add("--arg2=42"); command.add("--arg3=something with spaces"); - NativeController nativeController = new NativeController(TestEnvironment.newEnvironment(settings), namedPipeHelper); + NativeController nativeController = new NativeController(NODE_NAME, TestEnvironment.newEnvironment(settings), namedPipeHelper); nativeController.startProcess(command); assertEquals("start\tmy_process\t--arg1\t--arg2=42\t--arg3=something with spaces\n", @@ -65,7 +67,7 @@ public void testGetNativeCodeInfo() throws IOException, TimeoutException { ByteArrayOutputStream commandStream = new ByteArrayOutputStream(); when(namedPipeHelper.openNamedPipeOutputStream(contains("command"), any(Duration.class))).thenReturn(commandStream); - NativeController nativeController = new NativeController(TestEnvironment.newEnvironment(settings), namedPipeHelper); + NativeController nativeController = new NativeController(NODE_NAME, TestEnvironment.newEnvironment(settings), namedPipeHelper); nativeController.tailLogsInThread(); Map nativeCodeInfo = nativeController.getNativeCodeInfo(); @@ -83,7 +85,7 @@ public void testControllerDeath() throws Exception { ByteArrayOutputStream commandStream = new ByteArrayOutputStream(); when(namedPipeHelper.openNamedPipeOutputStream(contains("command"), any(Duration.class))).thenReturn(commandStream); - NativeController nativeController = new NativeController(TestEnvironment.newEnvironment(settings), namedPipeHelper); + NativeController nativeController = new NativeController(NODE_NAME, TestEnvironment.newEnvironment(settings), namedPipeHelper); nativeController.tailLogsInThread(); // As soon as the log stream ends startProcess should think the native controller has died @@ -91,7 +93,8 @@ public void testControllerDeath() throws Exception { ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> nativeController.startProcess(Collections.singletonList("my process"))); - assertEquals("Cannot start process [my process]: native controller process has stopped", e.getMessage()); + assertEquals("Cannot start process [my process]: native controller process has stopped on node " + + "[native-controller-tests-node]", e.getMessage()); }); } } From 464f7699c516047021021061c748856ae7666535 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 22 May 2019 09:35:07 +0200 Subject: [PATCH 022/224] Use comparator for Reconfigurator (#42283) Simplifies the voting configuration reconfiguration logic by switching to an explicit Comparator for the priorities. Does not make changes to the behavior of the component. --- .../cluster/coordination/Reconfigurator.java | 132 +++++++++--------- 1 file changed, 66 insertions(+), 66 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Reconfigurator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Reconfigurator.java index b189b7ec2cc2d..7a3a54d73b2fe 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Reconfigurator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Reconfigurator.java @@ -27,15 +27,10 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.set.Sets; -import java.util.Collection; -import java.util.Collections; import java.util.Set; -import java.util.SortedSet; import java.util.TreeSet; import java.util.stream.Collectors; -import java.util.stream.Stream; /** * Computes the optimal configuration of voting nodes in the cluster. @@ -102,76 +97,36 @@ public VotingConfiguration reconfigure(Set liveNodes, Set logger.trace("{} reconfiguring {} based on liveNodes={}, retiredNodeIds={}, currentMaster={}", this, currentConfig, liveNodes, retiredNodeIds, currentMaster); - /* - * There are three true/false properties of each node in play: live/non-live, retired/non-retired and in-config/not-in-config. - * Firstly we divide the nodes into disjoint sets based on these properties: - * - * - nonRetiredMaster - * - nonRetiredNotMasterInConfigNotLiveIds - * - nonRetiredInConfigLiveIds - * - nonRetiredLiveNotInConfigIds - * - * The other 5 possibilities are not relevant: - * - retired, in-config, live -- retired nodes should be removed from the config - * - retired, in-config, non-live -- retired nodes should be removed from the config - * - retired, not-in-config, live -- cannot add a retired node back to the config - * - retired, not-in-config, non-live -- cannot add a retired node back to the config - * - non-retired, non-live, not-in-config -- no evidence this node exists at all - */ - final Set liveNodeIds = liveNodes.stream() .filter(DiscoveryNode::isMasterNode).map(DiscoveryNode::getId).collect(Collectors.toSet()); - final Set liveInConfigIds = new TreeSet<>(currentConfig.getNodeIds()); - liveInConfigIds.retainAll(liveNodeIds); - - final SortedSet inConfigNotLiveIds = Sets.unmodifiableSortedDifference(currentConfig.getNodeIds(), liveInConfigIds); - final SortedSet nonRetiredInConfigNotLiveIds = new TreeSet<>(inConfigNotLiveIds); - nonRetiredInConfigNotLiveIds.removeAll(retiredNodeIds); - - final Set nonRetiredInConfigLiveIds = new TreeSet<>(liveInConfigIds); - nonRetiredInConfigLiveIds.removeAll(retiredNodeIds); - - final Set nonRetiredInConfigLiveMasterIds; - final Set nonRetiredInConfigLiveNotMasterIds; - if (nonRetiredInConfigLiveIds.contains(currentMaster.getId())) { - nonRetiredInConfigLiveNotMasterIds = new TreeSet<>(nonRetiredInConfigLiveIds); - nonRetiredInConfigLiveNotMasterIds.remove(currentMaster.getId()); - nonRetiredInConfigLiveMasterIds = Collections.singleton(currentMaster.getId()); - } else { - nonRetiredInConfigLiveNotMasterIds = nonRetiredInConfigLiveIds; - nonRetiredInConfigLiveMasterIds = Collections.emptySet(); - } - - final SortedSet nonRetiredLiveNotInConfigIds = Sets.sortedDifference(liveNodeIds, currentConfig.getNodeIds()); - nonRetiredLiveNotInConfigIds.removeAll(retiredNodeIds); + final Set currentConfigNodeIds = currentConfig.getNodeIds(); + + final Set orderedCandidateNodes = new TreeSet<>(); + liveNodes.stream() + .filter(DiscoveryNode::isMasterNode) + .filter(n -> retiredNodeIds.contains(n.getId()) == false) + .forEach(n -> orderedCandidateNodes.add(new VotingConfigNode(n.getId(), true, + n.getId().equals(currentMaster.getId()), currentConfigNodeIds.contains(n.getId())))); + currentConfigNodeIds.stream() + .filter(nid -> liveNodeIds.contains(nid) == false) + .filter(nid -> retiredNodeIds.contains(nid) == false) + .forEach(nid -> orderedCandidateNodes.add(new VotingConfigNode(nid, false, false, true))); /* * Now we work out how many nodes should be in the configuration: */ - final int targetSize; - - final int nonRetiredLiveNodeCount = nonRetiredInConfigLiveIds.size() + nonRetiredLiveNotInConfigIds.size(); - final int nonRetiredConfigSize = nonRetiredInConfigLiveIds.size() + nonRetiredInConfigNotLiveIds.size(); - if (autoShrinkVotingConfiguration) { - if (nonRetiredLiveNodeCount >= 3) { - targetSize = roundDownToOdd(nonRetiredLiveNodeCount); - } else { - // only have one or two available nodes; may not shrink below 3 nodes automatically, but if - // the config (excluding retired nodes) is already smaller than 3 then it's ok. - targetSize = nonRetiredConfigSize < 3 ? 1 : 3; - } - } else { - targetSize = Math.max(roundDownToOdd(nonRetiredLiveNodeCount), nonRetiredConfigSize); - } + final int nonRetiredConfigSize = Math.toIntExact(orderedCandidateNodes.stream().filter(n -> n.inCurrentConfig).count()); + final int minimumConfigEnforcedSize = autoShrinkVotingConfiguration ? (nonRetiredConfigSize < 3 ? 1 : 3) : nonRetiredConfigSize; + final int nonRetiredLiveNodeCount = Math.toIntExact(orderedCandidateNodes.stream().filter(n -> n.live).count()); + final int targetSize = Math.max(roundDownToOdd(nonRetiredLiveNodeCount), minimumConfigEnforcedSize); - /* - * The new configuration is formed by taking this many nodes in the following preference order: - */ final VotingConfiguration newConfig = new VotingConfiguration( - // live master first, then other live nodes, preferring the current config, and if we need more then use non-live nodes - Stream.of(nonRetiredInConfigLiveMasterIds, nonRetiredInConfigLiveNotMasterIds, nonRetiredLiveNotInConfigIds, - nonRetiredInConfigNotLiveIds).flatMap(Collection::stream).limit(targetSize).collect(Collectors.toSet())); + orderedCandidateNodes.stream() + .limit(targetSize) + .map(n -> n.id) + .collect(Collectors.toSet())); + // new configuration should have a quorum if (newConfig.hasQuorum(liveNodeIds)) { return newConfig; } else { @@ -179,4 +134,49 @@ public VotingConfiguration reconfigure(Set liveNodes, Set return currentConfig; } } + + static class VotingConfigNode implements Comparable { + final String id; + final boolean live; + final boolean currentMaster; + final boolean inCurrentConfig; + + VotingConfigNode(String id, boolean live, boolean currentMaster, boolean inCurrentConfig) { + this.id = id; + this.live = live; + this.currentMaster = currentMaster; + this.inCurrentConfig = inCurrentConfig; + } + + @Override + public int compareTo(VotingConfigNode other) { + // prefer nodes that are live + final int liveComp = Boolean.compare(other.live, live); + if (liveComp != 0) { + return liveComp; + } + // prefer nodes that are in current config for stability + final int inCurrentConfigComp = Boolean.compare(other.inCurrentConfig, inCurrentConfig); + if (inCurrentConfigComp != 0) { + return inCurrentConfigComp; + } + // prefer current master + final int currentMasterComp = Boolean.compare(other.currentMaster, currentMaster); + if (currentMasterComp != 0) { + return currentMasterComp; + } + // tiebreak by node id to have stable ordering + return id.compareTo(other.id); + } + + @Override + public String toString() { + return "VotingConfigNode{" + + "id='" + id + '\'' + + ", live=" + live + + ", currentMaster=" + currentMaster + + ", inCurrentConfig=" + inCurrentConfig + + '}'; + } + } } From 2ddd39aaa9d4613a92334fb5a21fedd7a64fefb7 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 22 May 2019 09:47:23 +0200 Subject: [PATCH 023/224] Introduce ShardState Enum + Slight Cleanup SnapshotsInProgress (#41940) * Added separate enum for the state of each shard, it was really confusing that we used the same enum for the state of the snapshot overall and the state of each individual shard * relates https://github.com/elastic/elasticsearch/pull/40943#issuecomment-488664150 * Shortened some obvious spots in equals method and saved a few lines via `computeIfAbsent` to make up for adding 50 new lines to this class --- .../TransportSnapshotsStatusAction.java | 1 - .../cluster/SnapshotsInProgress.java | 98 ++++++++++++------- .../snapshots/SnapshotShardsService.java | 12 ++- .../snapshots/SnapshotsService.java | 21 ++-- .../cluster/SnapshotsInProgressTests.java | 11 ++- .../SharedClusterSnapshotRestoreIT.java | 7 +- ...SnapshotsInProgressSerializationTests.java | 3 +- 7 files changed, 95 insertions(+), 58 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index c2f0d3dd0c074..8430d1868c88d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -174,7 +174,6 @@ private SnapshotsStatusResponse buildResponse(SnapshotsStatusRequest request, Li break; case INIT: case WAITING: - case STARTED: stage = SnapshotIndexShardStage.STARTED; break; case SUCCESS: diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 5190adf7ba2d9..ae9506706e36a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -42,6 +42,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; /** * Meta data about snapshots that are currently executing @@ -53,12 +54,7 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - - SnapshotsInProgress that = (SnapshotsInProgress) o; - - if (!entries.equals(that.entries)) return false; - - return true; + return entries.equals(((SnapshotsInProgress) o).entries); } @Override @@ -208,18 +204,11 @@ public String toString() { return snapshot.toString(); } - // package private for testing - ImmutableOpenMap> findWaitingIndices(ImmutableOpenMap shards) { + private ImmutableOpenMap> findWaitingIndices(ImmutableOpenMap shards) { Map> waitingIndicesMap = new HashMap<>(); for (ObjectObjectCursor entry : shards) { - if (entry.value.state() == State.WAITING) { - final String indexName = entry.key.getIndexName(); - List waitingShards = waitingIndicesMap.get(indexName); - if (waitingShards == null) { - waitingShards = new ArrayList<>(); - waitingIndicesMap.put(indexName, waitingShards); - } - waitingShards.add(entry.key); + if (entry.value.state() == ShardState.WAITING) { + waitingIndicesMap.computeIfAbsent(entry.key.getIndexName(), k -> new ArrayList<>()).add(entry.key); } } if (waitingIndicesMap.isEmpty()) { @@ -241,28 +230,27 @@ ImmutableOpenMap> findWaitingIndices(ImmutableOpenMap shards) { for (ObjectCursor status : shards) { - if (status.value.state().completed() == false) { + if (status.value.state().completed == false) { return false; } } return true; } - public static class ShardSnapshotStatus { - private final State state; + private final ShardState state; private final String nodeId; private final String reason; public ShardSnapshotStatus(String nodeId) { - this(nodeId, State.INIT); + this(nodeId, ShardState.INIT); } - public ShardSnapshotStatus(String nodeId, State state) { + public ShardSnapshotStatus(String nodeId, ShardState state) { this(nodeId, state, null); } - public ShardSnapshotStatus(String nodeId, State state, String reason) { + public ShardSnapshotStatus(String nodeId, ShardState state, String reason) { this.nodeId = nodeId; this.state = state; this.reason = reason; @@ -272,11 +260,11 @@ public ShardSnapshotStatus(String nodeId, State state, String reason) { public ShardSnapshotStatus(StreamInput in) throws IOException { nodeId = in.readOptionalString(); - state = State.fromValue(in.readByte()); + state = ShardState.fromValue(in.readByte()); reason = in.readOptionalString(); } - public State state() { + public ShardState state() { return state; } @@ -298,14 +286,9 @@ public void writeTo(StreamOutput out) throws IOException { public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - ShardSnapshotStatus status = (ShardSnapshotStatus) o; + return Objects.equals(nodeId, status.nodeId) && Objects.equals(reason, status.reason) && state == status.state; - if (nodeId != null ? !nodeId.equals(status.nodeId) : status.nodeId != null) return false; - if (reason != null ? !reason.equals(status.reason) : status.reason != null) return false; - if (state != status.state) return false; - - return true; } @Override @@ -331,11 +314,11 @@ public enum State { MISSING((byte) 5, true, true), WAITING((byte) 6, false, false); - private byte value; + private final byte value; - private boolean completed; + private final boolean completed; - private boolean failed; + private final boolean failed; State(byte value, boolean completed, boolean failed) { this.value = value; @@ -379,7 +362,6 @@ public static State fromValue(byte value) { private final List entries; - public SnapshotsInProgress(List entries) { this.entries = entries; } @@ -534,4 +516,52 @@ public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params p builder.endArray(); builder.endObject(); } + + public enum ShardState { + INIT((byte) 0, false, false), + SUCCESS((byte) 2, true, false), + FAILED((byte) 3, true, true), + ABORTED((byte) 4, false, true), + MISSING((byte) 5, true, true), + WAITING((byte) 6, false, false); + + private final byte value; + + private final boolean completed; + + private final boolean failed; + + ShardState(byte value, boolean completed, boolean failed) { + this.value = value; + this.completed = completed; + this.failed = failed; + } + + public boolean completed() { + return completed; + } + + public boolean failed() { + return failed; + } + + public static ShardState fromValue(byte value) { + switch (value) { + case 0: + return INIT; + case 2: + return SUCCESS; + case 3: + return FAILED; + case 4: + return ABORTED; + case 5: + return MISSING; + case 6: + return WAITING; + default: + throw new IllegalArgumentException("No shard snapshot state for value [" + value + "]"); + } + } + } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index f052a1c7abeb8..a0c5ea9392c67 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -39,6 +39,7 @@ import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; +import org.elasticsearch.cluster.SnapshotsInProgress.ShardState; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -248,7 +249,8 @@ private void startNewSnapshots(SnapshotsInProgress snapshotsInProgress) { // Add all new shards to start processing on final ShardId shardId = shard.key; final ShardSnapshotStatus shardSnapshotStatus = shard.value; - if (localNodeId.equals(shardSnapshotStatus.nodeId()) && shardSnapshotStatus.state() == State.INIT + if (localNodeId.equals(shardSnapshotStatus.nodeId()) + && shardSnapshotStatus.state() == ShardState.INIT && snapshotShards.containsKey(shardId) == false) { logger.trace("[{}] - Adding shard to the queue", shardId); if (startedShards == null) { @@ -286,7 +288,7 @@ private void startNewSnapshots(SnapshotsInProgress snapshotsInProgress) { } else { // due to CS batching we might have missed the INIT state and straight went into ABORTED // notify master that abort has completed by moving to FAILED - if (shard.value.state() == State.ABORTED) { + if (shard.value.state() == ShardState.ABORTED) { notifyFailedSnapshotShard(snapshot, shard.key, shard.value.reason()); } } @@ -480,12 +482,14 @@ public String toString() { /** Notify the master node that the given shard has been successfully snapshotted **/ private void notifySuccessfulSnapshotShard(final Snapshot snapshot, final ShardId shardId) { - sendSnapshotShardUpdate(snapshot, shardId, new ShardSnapshotStatus(clusterService.localNode().getId(), State.SUCCESS)); + sendSnapshotShardUpdate(snapshot, shardId, + new ShardSnapshotStatus(clusterService.localNode().getId(), ShardState.SUCCESS)); } /** Notify the master node that the given shard failed to be snapshotted **/ private void notifyFailedSnapshotShard(final Snapshot snapshot, final ShardId shardId, final String failure) { - sendSnapshotShardUpdate(snapshot, shardId, new ShardSnapshotStatus(clusterService.localNode().getId(), State.FAILED, failure)); + sendSnapshotShardUpdate(snapshot, shardId, + new ShardSnapshotStatus(clusterService.localNode().getId(), ShardState.FAILED, failure)); } /** Updates the shard snapshot status by sending a {@link UpdateIndexShardSnapshotStatusRequest} to the master node */ diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 1559bae8259b0..e606bff0cb9e4 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -37,6 +37,7 @@ import org.elasticsearch.cluster.SnapshotDeletionsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; +import org.elasticsearch.cluster.SnapshotsInProgress.ShardState; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -776,7 +777,7 @@ public ClusterState execute(ClusterState currentState) { logger.warn("failing snapshot of shard [{}] on closed node [{}]", shardEntry.key, shardStatus.nodeId()); shards.put(shardEntry.key, - new ShardSnapshotStatus(shardStatus.nodeId(), State.FAILED, "node shutdown")); + new ShardSnapshotStatus(shardStatus.nodeId(), ShardState.FAILED, "node shutdown")); } } } @@ -872,7 +873,7 @@ private static ImmutableOpenMap processWaitingShar for (ObjectObjectCursor shardEntry : snapshotShards) { ShardSnapshotStatus shardStatus = shardEntry.value; ShardId shardId = shardEntry.key; - if (shardStatus.state() == State.WAITING) { + if (shardStatus.state() == ShardState.WAITING) { IndexRoutingTable indexShardRoutingTable = routingTable.index(shardId.getIndex()); if (indexShardRoutingTable != null) { IndexShardRoutingTable shardRouting = indexShardRoutingTable.shard(shardId.id()); @@ -893,7 +894,7 @@ private static ImmutableOpenMap processWaitingShar // Shard that we were waiting for went into unassigned state or disappeared - giving up snapshotChanged = true; logger.warn("failing snapshot of shard [{}] on unassigned shard [{}]", shardId, shardStatus.nodeId()); - shards.put(shardId, new ShardSnapshotStatus(shardStatus.nodeId(), State.FAILED, "shard is unassigned")); + shards.put(shardId, new ShardSnapshotStatus(shardStatus.nodeId(), ShardState.FAILED, "shard is unassigned")); } else { shards.put(shardId, shardStatus); } @@ -943,7 +944,7 @@ private static Tuple, Set> indicesWithMissingShards( Set missing = new HashSet<>(); Set closed = new HashSet<>(); for (ObjectObjectCursor entry : shards) { - if (entry.value.state() == State.MISSING) { + if (entry.value.state() == ShardState.MISSING) { if (metaData.hasIndex(entry.key.getIndex().getName()) && metaData.getIndexSafe(entry.key.getIndex()).getState() == IndexMetaData.State.CLOSE) { closed.add(entry.key.getIndex().getName()); @@ -1195,7 +1196,7 @@ public ClusterState execute(ClusterState currentState) { for (ObjectObjectCursor shardEntry : snapshotEntry.shards()) { ShardSnapshotStatus status = shardEntry.value; if (status.state().completed() == false) { - status = new ShardSnapshotStatus(status.nodeId(), State.ABORTED, "aborted by snapshot deletion"); + status = new ShardSnapshotStatus(status.nodeId(), ShardState.ABORTED, "aborted by snapshot deletion"); } shardsBuilder.put(shardEntry.key, status); } @@ -1385,7 +1386,7 @@ private static ImmutableOpenMap shards = ImmutableOpenMap.builder(); // test more than one waiting shard in an index - shards.put(new ShardId(idx1Name, idx1UUID, 0), new ShardSnapshotStatus(randomAlphaOfLength(2), State.WAITING)); - shards.put(new ShardId(idx1Name, idx1UUID, 1), new ShardSnapshotStatus(randomAlphaOfLength(2), State.WAITING)); + shards.put(new ShardId(idx1Name, idx1UUID, 0), new ShardSnapshotStatus(randomAlphaOfLength(2), ShardState.WAITING)); + shards.put(new ShardId(idx1Name, idx1UUID, 1), new ShardSnapshotStatus(randomAlphaOfLength(2), ShardState.WAITING)); shards.put(new ShardId(idx1Name, idx1UUID, 2), new ShardSnapshotStatus(randomAlphaOfLength(2), randomNonWaitingState(), "")); // test exactly one waiting shard in an index - shards.put(new ShardId(idx2Name, idx2UUID, 0), new ShardSnapshotStatus(randomAlphaOfLength(2), State.WAITING)); + shards.put(new ShardId(idx2Name, idx2UUID, 0), new ShardSnapshotStatus(randomAlphaOfLength(2), ShardState.WAITING)); shards.put(new ShardId(idx2Name, idx2UUID, 1), new ShardSnapshotStatus(randomAlphaOfLength(2), randomNonWaitingState(), "")); // test no waiting shards in an index shards.put(new ShardId(idx3Name, idx3UUID, 0), new ShardSnapshotStatus(randomAlphaOfLength(2), randomNonWaitingState(), "")); @@ -72,7 +73,7 @@ public void testWaitingIndices() { assertFalse(waitingIndices.containsKey(idx3Name)); } - private State randomNonWaitingState() { - return randomFrom(Arrays.stream(State.values()).filter(s -> s != State.WAITING).collect(Collectors.toSet())); + private ShardState randomNonWaitingState() { + return randomFrom(Arrays.stream(ShardState.values()).filter(s -> s != ShardState.WAITING).collect(Collectors.toSet())); } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 3a78b4786fc5c..0aa9fe1a9e2a6 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -53,6 +53,7 @@ import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.Entry; import org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; +import org.elasticsearch.cluster.SnapshotsInProgress.ShardState; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -2701,9 +2702,9 @@ public void testDeleteOrphanSnapshot() throws Exception { public ClusterState execute(ClusterState currentState) { // Simulate orphan snapshot ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder(); - shards.put(new ShardId(idxName, "_na_", 0), new ShardSnapshotStatus("unknown-node", State.ABORTED, "aborted")); - shards.put(new ShardId(idxName, "_na_", 1), new ShardSnapshotStatus("unknown-node", State.ABORTED, "aborted")); - shards.put(new ShardId(idxName, "_na_", 2), new ShardSnapshotStatus("unknown-node", State.ABORTED, "aborted")); + shards.put(new ShardId(idxName, "_na_", 0), new ShardSnapshotStatus("unknown-node", ShardState.ABORTED, "aborted")); + shards.put(new ShardId(idxName, "_na_", 1), new ShardSnapshotStatus("unknown-node", ShardState.ABORTED, "aborted")); + shards.put(new ShardId(idxName, "_na_", 2), new ShardSnapshotStatus("unknown-node", ShardState.ABORTED, "aborted")); return ClusterState.builder(currentState) .putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(List.of(new Entry( new Snapshot(repositoryName, createSnapshotResponse.getSnapshotInfo().snapshotId()), diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java index 3f23c8f0a2ded..6c8ddfb56c1cf 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.Entry; +import org.elasticsearch.cluster.SnapshotsInProgress.ShardState; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -65,7 +66,7 @@ private Entry randomSnapshot() { for (int j = 0; j < shardsCount; j++) { ShardId shardId = new ShardId(new Index(randomAlphaOfLength(10), randomAlphaOfLength(10)), randomIntBetween(0, 10)); String nodeId = randomAlphaOfLength(10); - State shardState = randomFrom(State.values()); + ShardState shardState = randomFrom(ShardState.values()); builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(nodeId, shardState, shardState.failed() ? randomAlphaOfLength(10) : null)); } From 4a9788eeb31c01c8f6039884eae9bec40707cc5b Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 22 May 2019 10:02:14 +0200 Subject: [PATCH 024/224] Fix testTracerLog Network Tests (#42286) * Fix testTracerLog Network Tests * Start appender before using it like we do for e.g. the Netty leak detection appender to avoid interference from actions on the network threads that might still be dangling from previous tests in the same suite * Closes #41890 --- .../transport/AbstractSimpleTransportTestCase.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 783fe77b9bf9c..441044328be96 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -1047,10 +1047,9 @@ public String executor() { .build()); MockLogAppender appender = new MockLogAppender(); - Loggers.addAppender(LogManager.getLogger("org.elasticsearch.transport.TransportService.tracer"), appender); try { appender.start(); - + Loggers.addAppender(LogManager.getLogger("org.elasticsearch.transport.TransportService.tracer"), appender); final String requestSent = ".*\\[internal:test].*sent to.*\\{TS_B}.*"; final MockLogAppender.LoggingExpectation requestSentExpectation = new MockLogAppender.PatternSeenEventExpectation( From a28d405a30ce92cf8fd7abc86915a0ff4cef2166 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Wed, 22 May 2019 11:08:11 +0300 Subject: [PATCH 025/224] Revert "mute failing filerealm hash caching tests (#42304)" This reverts commit 8907dc9598667a1fa29be0ba22c7030ebee1101b. --- .../elasticsearch/xpack/security/authc/file/FileRealmTests.java | 1 - .../authc/support/CachingUsernamePasswordRealmTests.java | 1 - 2 files changed, 2 deletions(-) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java index 67ab33bac7380..d2ab879d4d4ff 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java @@ -89,7 +89,6 @@ private RealmConfig getRealmConfig(Settings settings) { return new RealmConfig(REALM_IDENTIFIER, settings, TestEnvironment.newEnvironment(settings), threadContext); } - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/42267") public void testAuthenticateCaching() throws Exception { Settings settings = Settings.builder() .put(RealmSettings.realmSettingPrefix(REALM_IDENTIFIER) + "cache.hash_algo", diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java index 49f0d45966639..91a0fc9d94e2e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java @@ -63,7 +63,6 @@ public void stop() { } } - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/42267") public void testCacheSettings() { String cachingHashAlgo = randomFrom(Hasher.getAvailableAlgoCacheHash()); int maxUsers = randomIntBetween(10, 100); From b31482e65992f90d76b13286043f6986899c4403 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 22 May 2019 10:25:11 +0200 Subject: [PATCH 026/224] Remove usage of max_local_storage_nodes in test infrastructure (#41652) Moves the test infrastructure away from using node.max_local_storage_nodes, allowing us in a follow-up PR to deprecate this setting in 7.x and to remove it in 8.0. This also changes the behavior of InternalTestCluster so that starting up nodes will not automatically reuse data folders of previously stopped nodes. If this behavior is desired, it needs to be explicitly done by passing the data path from the stopped node to the new node that is started. --- .../gradle/test/ClusterFormationTasks.groovy | 1 - .../ingest/common/IngestRestartIT.java | 10 +- .../ClusterAllocationExplainIT.java | 9 +- .../cluster/ClusterInfoServiceIT.java | 10 -- .../cluster/MinimumMasterNodesIT.java | 15 ++- .../cluster/SpecificMasterNodesIT.java | 6 +- .../UnsafeBootstrapAndDetachCommandIT.java | 126 ++++++++++++------ .../cluster/routing/AllocationIdIT.java | 6 +- .../cluster/routing/DelayedAllocationIT.java | 6 +- .../cluster/routing/PrimaryAllocationIT.java | 15 ++- .../elasticsearch/env/NodeEnvironmentIT.java | 10 +- .../env/NodeRepurposeCommandIT.java | 36 +++-- .../env/NodeRepurposeCommandTests.java | 8 +- .../gateway/RecoveryFromGatewayIT.java | 9 +- .../RemoveCorruptedShardDataCommandIT.java | 75 +++++------ .../memory/breaker/CircuitBreakerNoopIT.java | 2 - .../indices/recovery/IndexRecoveryIT.java | 10 +- .../store/IndicesStoreIntegrationIT.java | 11 +- .../elasticsearch/test/ESIntegTestCase.java | 2 - .../test/InternalTestCluster.java | 48 ++++--- .../test/test/InternalTestClusterTests.java | 51 +++---- .../elasticsearch/xpack/CcrIntegTestCase.java | 2 - .../integration/BasicDistributedJobsIT.java | 7 +- .../integration/MlDistributedFailureIT.java | 4 +- 24 files changed, 270 insertions(+), 209 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index e7ffb88f13702..c0bf2a5dccee5 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -382,7 +382,6 @@ class ClusterFormationTasks { // Don't wait for state, just start up quickly. This will also allow new and old nodes in the BWC case to become the master 'discovery.initial_state_timeout' : '0s' ] - esConfig['node.max_local_storage_nodes'] = node.config.numNodes esConfig['http.port'] = node.config.httpPort if (node.nodeVersion.onOrAfter('6.7.0')) { esConfig['transport.port'] = node.config.transportPort diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestRestartIT.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestRestartIT.java index 8c3976d2b175c..6c79c68df1df1 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestRestartIT.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestRestartIT.java @@ -91,9 +91,15 @@ public void testScriptDisabled() throws Exception { checkPipelineExists.accept(pipelineIdWithoutScript); - internalCluster().stopCurrentMasterNode(); - internalCluster().startNode(Settings.builder().put("script.allowed_types", "none")); + internalCluster().restartNode(internalCluster().getMasterName(), new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) { + return Settings.builder().put("script.allowed_types", "none").build(); + } + + }); + checkPipelineExists.accept(pipelineIdWithoutScript); checkPipelineExists.accept(pipelineIdWithScript); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java index 941ad3c658aba..25765ab1ee667 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java @@ -277,6 +277,8 @@ public void testUnassignedReplicaWithPriorCopy() throws Exception { nodes.remove(primaryNodeName); logger.info("--> shutting down all nodes except the one that holds the primary"); + Settings node0DataPathSettings = internalCluster().dataPathSettings(nodes.get(0)); + Settings node1DataPathSettings = internalCluster().dataPathSettings(nodes.get(1)); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(0))); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(1))); ensureStableCluster(1); @@ -286,8 +288,8 @@ public void testUnassignedReplicaWithPriorCopy() throws Exception { Settings.builder().put("index.routing.allocation.include._name", primaryNodeName)).get(); logger.info("--> restarting the stopped nodes"); - internalCluster().startNode(Settings.builder().put("node.name", nodes.get(0)).build()); - internalCluster().startNode(Settings.builder().put("node.name", nodes.get(1)).build()); + internalCluster().startNode(Settings.builder().put("node.name", nodes.get(0)).put(node0DataPathSettings).build()); + internalCluster().startNode(Settings.builder().put("node.name", nodes.get(1)).put(node1DataPathSettings).build()); ensureStableCluster(3); boolean includeYesDecisions = randomBoolean(); @@ -1017,6 +1019,7 @@ public void testCannotAllocateStaleReplicaExplanation() throws Exception { // start replica node first, so it's path will be used first when we start a node after // stopping all of them at end of test. final String replicaNode = internalCluster().startNode(); + Settings replicaDataPathSettings = internalCluster().dataPathSettings(replicaNode); final String primaryNode = internalCluster().startNode(); prepareIndex(IndexMetaData.State.OPEN, 1, 1, @@ -1057,7 +1060,7 @@ public void testCannotAllocateStaleReplicaExplanation() throws Exception { internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); logger.info("--> restart the node with the stale replica"); - String restartedNode = internalCluster().startDataOnlyNode(); + String restartedNode = internalCluster().startDataOnlyNode(replicaDataPathSettings); ensureClusterSizeConsistency(); // wait for the master to finish processing join. // wait until the system has fetched shard data and we know there is no valid shard copy diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java index 99f89548524f5..aa897f10bb895 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java @@ -36,7 +36,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.store.Store; @@ -105,15 +104,6 @@ public void blockActions(String... actions) { } } - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - // manual collection or upon cluster forming. - .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2) - .build(); - } - @Override protected Collection> nodePlugins() { return Arrays.asList(TestPlugin.class, MockTransportService.TestPlugin.class); diff --git a/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index 4bd5d2e675b75..cb1443bdf3765 100644 --- a/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -43,10 +44,12 @@ import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; @@ -124,6 +127,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { logger.info("--> add voting config exclusion for non-master node, to be sure it's not elected"); client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(new String[]{otherNode})).get(); logger.info("--> stop master node, no master block should appear"); + Settings masterDataPathSettings = internalCluster().dataPathSettings(masterNode); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNode)); awaitBusy(() -> { @@ -137,7 +141,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { assertThat(state.nodes().getMasterNode(), equalTo(null)); logger.info("--> starting the previous master node again..."); - node2Name = internalCluster().startNode(settings); + node2Name = internalCluster().startNode(Settings.builder().put(settings).put(masterDataPathSettings).build()); clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus().setWaitForNodes("2").execute().actionGet(); @@ -169,6 +173,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { logger.info("--> add voting config exclusion for master node, to be sure it's not elected"); client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(new String[]{masterNode})).get(); logger.info("--> stop non-master node, no master block should appear"); + Settings otherNodeDataPathSettings = internalCluster().dataPathSettings(otherNode); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(otherNode)); assertBusy(() -> { @@ -177,7 +182,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { }); logger.info("--> starting the previous master node again..."); - internalCluster().startNode(settings); + internalCluster().startNode(Settings.builder().put(settings).put(otherNodeDataPathSettings).build()); ensureGreen(); clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) @@ -251,6 +256,10 @@ public void testThreeNodesNoMasterBlock() throws Exception { assertHitCount(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 100); } + List nonMasterNodes = new ArrayList<>(Sets.difference(Sets.newHashSet(internalCluster().getNodeNames()), + Collections.singleton(internalCluster().getMasterName()))); + Settings nonMasterDataPathSettings1 = internalCluster().dataPathSettings(nonMasterNodes.get(0)); + Settings nonMasterDataPathSettings2 = internalCluster().dataPathSettings(nonMasterNodes.get(1)); internalCluster().stopRandomNonMasterNode(); internalCluster().stopRandomNonMasterNode(); @@ -262,7 +271,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { }); logger.info("--> start back the 2 nodes "); - internalCluster().startNodes(2, settings); + internalCluster().startNodes(nonMasterDataPathSettings1, nonMasterDataPathSettings2); internalCluster().validateClusterFormed(); ensureGreen(); diff --git a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index 5ce996a2e77fd..f80a5befa83d9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -65,6 +65,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); logger.info("--> stop master node"); + Settings masterDataPathSettings = internalCluster().dataPathSettings(internalCluster().getMasterName()); internalCluster().stopCurrentMasterNode(); try { @@ -75,9 +76,10 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { // all is well, no master elected } - logger.info("--> start master node"); + logger.info("--> start previous master node again"); final String nextMasterEligibleNodeName = internalCluster() - .startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + .startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true) + .put(masterDataPathSettings)); assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); assertThat(internalCluster().masterClient().admin().cluster().prepareState() diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index 88cd5bdee1278..3bbf8378483dd 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -79,26 +79,26 @@ private MockTerminal executeCommand(ElasticsearchNodeCommand command, Environmen return terminal; } - private MockTerminal unsafeBootstrap(Environment environment, int nodeOrdinal, boolean abort) throws Exception { - final MockTerminal terminal = executeCommand(new UnsafeBootstrapMasterCommand(), environment, nodeOrdinal, abort); + private MockTerminal unsafeBootstrap(Environment environment, boolean abort) throws Exception { + final MockTerminal terminal = executeCommand(new UnsafeBootstrapMasterCommand(), environment, 0, abort); assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.CONFIRMATION_MSG)); assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.MASTER_NODE_BOOTSTRAPPED_MSG)); return terminal; } - private MockTerminal detachCluster(Environment environment, int nodeOrdinal, boolean abort) throws Exception { - final MockTerminal terminal = executeCommand(new DetachClusterCommand(), environment, nodeOrdinal, abort); + private MockTerminal detachCluster(Environment environment, boolean abort) throws Exception { + final MockTerminal terminal = executeCommand(new DetachClusterCommand(), environment, 0, abort); assertThat(terminal.getOutput(), containsString(DetachClusterCommand.CONFIRMATION_MSG)); assertThat(terminal.getOutput(), containsString(DetachClusterCommand.NODE_DETACHED_MSG)); return terminal; } private MockTerminal unsafeBootstrap(Environment environment) throws Exception { - return unsafeBootstrap(environment, 0, false); + return unsafeBootstrap(environment, false); } private MockTerminal detachCluster(Environment environment) throws Exception { - return detachCluster(environment, 0, false); + return detachCluster(environment, false); } private void expectThrows(ThrowingRunnable runnable, String message) { @@ -151,7 +151,7 @@ public void testBootstrapNoNodeMetaData() throws IOException { } public void testBootstrapNotBootstrappedCluster() throws Exception { - internalCluster().startNode( + String node = internalCluster().startNode( Settings.builder() .put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") // to ensure quick node startup .build()); @@ -161,14 +161,17 @@ public void testBootstrapNotBootstrappedCluster() throws Exception { assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); + Settings dataPathSettings = internalCluster().dataPathSettings(node); + internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.GLOBAL_GENERATION_MISSING_MSG); } public void testDetachNotBootstrappedCluster() throws Exception { - internalCluster().startNode( + String node = internalCluster().startNode( Settings.builder() .put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") // to ensure quick node startup .build()); @@ -178,19 +181,24 @@ public void testDetachNotBootstrappedCluster() throws Exception { assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); + Settings dataPathSettings = internalCluster().dataPathSettings(node); + internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.GLOBAL_GENERATION_MISSING_MSG); } public void testBootstrapNoManifestFile() throws IOException { internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startNode(); + String node = internalCluster().startNode(); + Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); Manifest.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_MANIFEST_FILE_FOUND_MSG); @@ -198,11 +206,13 @@ public void testBootstrapNoManifestFile() throws IOException { public void testDetachNoManifestFile() throws IOException { internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startNode(); + String node = internalCluster().startNode(); + Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); Manifest.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.NO_MANIFEST_FILE_FOUND_MSG); @@ -210,12 +220,14 @@ public void testDetachNoManifestFile() throws IOException { public void testBootstrapNoMetaData() throws IOException { internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startNode(); + String node = internalCluster().startNode(); + Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); MetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_GLOBAL_METADATA_MSG); @@ -223,12 +235,14 @@ public void testBootstrapNoMetaData() throws IOException { public void testDetachNoMetaData() throws IOException { internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startNode(); + String node = internalCluster().startNode(); + Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); MetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.NO_GLOBAL_METADATA_MSG); @@ -236,22 +250,26 @@ public void testDetachNoMetaData() throws IOException { public void testBootstrapAbortedByUser() throws IOException { internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startNode(); + String node = internalCluster().startNode(); + Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - expectThrows(() -> unsafeBootstrap(environment, 0, true), ElasticsearchNodeCommand.ABORTED_BY_USER_MSG); + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); + expectThrows(() -> unsafeBootstrap(environment, true), ElasticsearchNodeCommand.ABORTED_BY_USER_MSG); } public void testDetachAbortedByUser() throws IOException { internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startNode(); + String node = internalCluster().startNode(); + Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - expectThrows(() -> detachCluster(environment, 0, true), ElasticsearchNodeCommand.ABORTED_BY_USER_MSG); + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); + expectThrows(() -> detachCluster(environment, true), ElasticsearchNodeCommand.ABORTED_BY_USER_MSG); } public void test3MasterNodes2Failed() throws Exception { @@ -278,6 +296,11 @@ public void test3MasterNodes2Failed() throws Exception { createIndex("test"); ensureGreen("test"); + Settings master1DataPathSettings = internalCluster().dataPathSettings(masterNodes.get(0)); + Settings master2DataPathSettings = internalCluster().dataPathSettings(masterNodes.get(1)); + Settings master3DataPathSettings = internalCluster().dataPathSettings(masterNodes.get(2)); + Settings dataNodeDataPathSettings = internalCluster().dataPathSettings(dataNode); + logger.info("--> stop 2nd and 3d master eligible node"); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(1))); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(2))); @@ -290,8 +313,9 @@ public void test3MasterNodes2Failed() throws Exception { }); logger.info("--> try to unsafely bootstrap 1st master-eligible node, while node lock is held"); - final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); + Environment environmentMaster1 = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(master1DataPathSettings).build()); + expectThrows(() -> unsafeBootstrap(environmentMaster1), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); logger.info("--> stop 1st master-eligible node and data-only node"); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); @@ -299,20 +323,22 @@ public void test3MasterNodes2Failed() throws Exception { internalCluster().stopRandomDataNode(); logger.info("--> unsafely-bootstrap 1st master-eligible node"); - MockTerminal terminal = unsafeBootstrap(environment); + MockTerminal terminal = unsafeBootstrap(environmentMaster1); MetaData metaData = MetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodeEnvironment.nodeDataPaths()); assertThat(terminal.getOutput(), containsString( String.format(Locale.ROOT, UnsafeBootstrapMasterCommand.CLUSTER_STATE_TERM_VERSION_MSG_FORMAT, metaData.coordinationMetaData().term(), metaData.version()))); logger.info("--> start 1st master-eligible node"); - internalCluster().startMasterOnlyNode(); + internalCluster().startMasterOnlyNode(master1DataPathSettings); logger.info("--> detach-cluster on data-only node"); - detachCluster(environment, 1, false); + Environment environmentData = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataNodeDataPathSettings).build()); + detachCluster(environmentData, false); logger.info("--> start data-only node"); - String dataNode2 = internalCluster().startDataOnlyNode(); + String dataNode2 = internalCluster().startDataOnlyNode(dataNodeDataPathSettings); logger.info("--> ensure there is no NO_MASTER_BLOCK and unsafe-bootstrap is reflected in cluster state"); assertBusy(() -> { @@ -326,11 +352,16 @@ public void test3MasterNodes2Failed() throws Exception { ensureGreen("test"); logger.info("--> detach-cluster on 2nd and 3rd master-eligible nodes"); - detachCluster(environment, 2, false); - detachCluster(environment, 3, false); + Environment environmentMaster2 = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(master2DataPathSettings).build()); + detachCluster(environmentMaster2, false); + Environment environmentMaster3 = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(master3DataPathSettings).build()); + detachCluster(environmentMaster3, false); logger.info("--> start 2nd and 3rd master-eligible nodes and ensure 4 nodes stable cluster"); - internalCluster().startMasterOnlyNodes(2); + internalCluster().startMasterOnlyNode(master2DataPathSettings); + internalCluster().startMasterOnlyNode(master3DataPathSettings); ensureStableCluster(4); } @@ -353,9 +384,11 @@ public void testAllMasterEligibleNodesFailedDanglingIndexImport() throws Excepti assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); logger.info("--> stop data-only node and detach it from the old cluster"); + Settings dataNodeDataPathSettings = internalCluster().dataPathSettings(dataNode); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataNode)); - final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - detachCluster(environment, 1, false); + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataNodeDataPathSettings).build()); + detachCluster(environment, false); logger.info("--> stop master-eligible node, clear its data and start it again - new cluster should form"); internalCluster().restartNode(masterNode, new InternalTestCluster.RestartCallback(){ @@ -366,7 +399,7 @@ public boolean clearData(String nodeName) { }); logger.info("--> start data-only only node and ensure 2 nodes stable cluster"); - internalCluster().startDataOnlyNode(); + internalCluster().startDataOnlyNode(dataNodeDataPathSettings); ensureStableCluster(2); logger.info("--> verify that the dangling index exists and has green status"); @@ -381,15 +414,18 @@ public boolean clearData(String nodeName) { public void testNoInitialBootstrapAfterDetach() throws Exception { internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startMasterOnlyNode(); + String masterNode = internalCluster().startMasterOnlyNode(); + Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode); internalCluster().stopCurrentMasterNode(); - final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(masterNodeDataPathSettings).build()); detachCluster(environment); String node = internalCluster().startMasterOnlyNode(Settings.builder() // give the cluster 2 seconds to elect the master (it should not) .put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "2s") + .put(masterNodeDataPathSettings) .build()); ClusterState state = internalCluster().client().admin().cluster().prepareState().setLocal(true) @@ -401,7 +437,8 @@ public void testNoInitialBootstrapAfterDetach() throws Exception { public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetaData() throws Exception { internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startMasterOnlyNode(); + String masterNode = internalCluster().startMasterOnlyNode(); + Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode); ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings( Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb")); internalCluster().client().admin().cluster().updateSettings(req).get(); @@ -412,11 +449,12 @@ public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetaData( internalCluster().stopCurrentMasterNode(); - final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(masterNodeDataPathSettings).build()); detachCluster(environment); unsafeBootstrap(environment); - internalCluster().startMasterOnlyNode(); + internalCluster().startMasterOnlyNode(masterNodeDataPathSettings); ensureGreen(); state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState(); @@ -430,8 +468,10 @@ private static class SimulatedDeleteFailureException extends RuntimeException { public void testCleanupOldMetaDataFails() throws Exception { // establish some metadata. internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + String node = internalCluster().startNode(); + Settings dataPathSettings = internalCluster().dataPathSettings(node); + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); internalCluster().stopRandomDataNode(); // find data paths diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdIT.java index c6d7f925a2c85..d92624f539c9e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdIT.java @@ -105,6 +105,8 @@ public void testFailedRecoveryOnAllocateStalePrimaryRequiresAnotherAllocateStale internalCluster().assertSameDocIdsOnShards(); // initial set up is done + Settings node1DataPathSettings = internalCluster().dataPathSettings(node1); + Settings node2DataPathSettings = internalCluster().dataPathSettings(node2); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1)); // index more docs to node2 that marks node1 as stale @@ -117,7 +119,7 @@ public void testFailedRecoveryOnAllocateStalePrimaryRequiresAnotherAllocateStale putFakeCorruptionMarker(indexSettings, shardId, indexPath); // thanks to master node1 is out of sync - node1 = internalCluster().startNode(); + node1 = internalCluster().startNode(node1DataPathSettings); // there is only _stale_ primary checkNoValidShardCopy(indexName, shardId); @@ -157,7 +159,7 @@ public void testFailedRecoveryOnAllocateStalePrimaryRequiresAnotherAllocateStale ensureYellow(indexName); // bring node2 back - node2 = internalCluster().startNode(); + node2 = internalCluster().startNode(node2DataPathSettings); ensureGreen(indexName); assertThat(historyUUID(node1, indexName), not(equalTo(historyUUID))); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java index c175624125e50..e11c2ddaa79da 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java @@ -67,11 +67,13 @@ public void testDelayedAllocationNodeLeavesAndComesBack() throws Exception { .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1))).get(); ensureGreen("test"); indexRandomData(); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodeWithShard())); + String nodeWithShard = findNodeWithShard(); + Settings nodeWithShardDataPathSettings = internalCluster().dataPathSettings(nodeWithShard); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeWithShard)); assertBusy(() -> assertThat(client().admin().cluster().prepareState().all().get().getState() .getRoutingNodes().unassigned().size() > 0, equalTo(true))); assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1)); - internalCluster().startNode(); // this will use the same data location as the stopped node + internalCluster().startNode(nodeWithShardDataPathSettings); // this will use the same data location as the stopped node ensureGreen("test"); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index f4b834e4d29a6..00a2f5e34a791 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -118,7 +118,8 @@ public void testBulkWeirdScenario() throws Exception { assertThat(bulkResponse.getItems()[1].getResponse().getResult(), equalTo(DocWriteResponse.Result.UPDATED)); } - private void createStaleReplicaScenario(String master) throws Exception { + // returns data paths settings of in-sync shard copy + private Settings createStaleReplicaScenario(String master) throws Exception { client().prepareIndex("test", "type1").setSource(jsonBuilder() .startObject().field("field", "value1").endObject()).get(); refresh(); @@ -150,6 +151,7 @@ private void createStaleReplicaScenario(String master) throws Exception { .startObject().field("field", "value1").endObject()).get(); logger.info("--> shut down node that has new acknowledged document"); + final Settings inSyncDataPathSettings = internalCluster().dataPathSettings(replicaNode); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); ensureStableCluster(1, master); @@ -167,6 +169,7 @@ private void createStaleReplicaScenario(String master) throws Exception { // kick reroute a second time and check that all shards are unassigned assertThat(client(master).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(), equalTo(2)); + return inSyncDataPathSettings; } public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception { @@ -177,10 +180,10 @@ public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception .setSettings(Settings.builder().put("index.number_of_shards", 1) .put("index.number_of_replicas", 1)).get()); ensureGreen(); - createStaleReplicaScenario(master); + final Settings inSyncDataPathSettings = createStaleReplicaScenario(master); logger.info("--> starting node that reuses data folder with the up-to-date primary shard"); - internalCluster().startDataOnlyNode(Settings.EMPTY); + internalCluster().startDataOnlyNode(inSyncDataPathSettings); logger.info("--> check that the up-to-date primary shard gets promoted and that documents are available"); ensureYellow("test"); @@ -373,6 +376,7 @@ public void testDoNotRemoveAllocationIdOnNodeLeave() throws Exception { .put("index.unassigned.node_left.delayed_timeout", "0ms")).get()); String replicaNode = internalCluster().startDataOnlyNode(Settings.EMPTY); ensureGreen("test"); + final Settings inSyncDataPathSettings = internalCluster().dataPathSettings(replicaNode); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); ensureYellow("test"); assertEquals(2, client().admin().cluster().prepareState().get().getState().metaData().index("test") @@ -390,7 +394,7 @@ public boolean clearData(String nodeName) { .metaData().index("test").inSyncAllocationIds(0).size()); logger.info("--> starting node that reuses data folder with the up-to-date shard"); - internalCluster().startDataOnlyNode(Settings.EMPTY); + internalCluster().startDataOnlyNode(inSyncDataPathSettings); ensureGreen("test"); } @@ -402,6 +406,7 @@ public void testRemoveAllocationIdOnWriteAfterNodeLeave() throws Exception { 1).put("index.unassigned.node_left.delayed_timeout", "0ms")).get()); String replicaNode = internalCluster().startDataOnlyNode(Settings.EMPTY); ensureGreen("test"); + final Settings inSyncDataPathSettings = internalCluster().dataPathSettings(replicaNode); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); ensureYellow("test"); assertEquals(2, client().admin().cluster().prepareState().get().getState() @@ -424,7 +429,7 @@ public boolean clearData(String nodeName) { .metaData().index("test").inSyncAllocationIds(0).size()); logger.info("--> starting node that reuses data folder with the up-to-date shard"); - internalCluster().startDataOnlyNode(Settings.EMPTY); + internalCluster().startDataOnlyNode(inSyncDataPathSettings); assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState() .getRoutingTable().index("test").allPrimaryShardsUnassigned())); } diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java index 37e260a01d069..74de578426f2c 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -39,7 +39,8 @@ public void testStartFailureOnDataForNonDataNode() throws Exception { final String indexName = "test-fail-on-data"; logger.info("--> starting one node"); - internalCluster().startNode(); + String node = internalCluster().startNode(); + Settings dataPathSettings = internalCluster().dataPathSettings(node); logger.info("--> creating index"); prepareCreate(indexName, Settings.builder() @@ -69,13 +70,12 @@ public Settings onNodeStopped(String nodeName) { + Node.NODE_MASTER_SETTING.getKey() + "=false, but has index metadata")); - // client() also starts the node + logger.info("--> start the node again with node.data=true and node.master=true"); + internalCluster().startNode(dataPathSettings); + logger.info("--> indexing a simple document"); client().prepareIndex(indexName, "type1", "1").setSource("field1", "value1").get(); - logger.info("--> restarting the node with node.data=true and node.master=true"); - internalCluster().restartRandomDataNode(); - logger.info("--> restarting the node with node.data=false"); ex = expectThrows(IllegalStateException.class, "Node started with node.data=false while having existing shard data must fail", diff --git a/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandIT.java b/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandIT.java index c07d710f60508..a6229b16c3055 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandIT.java +++ b/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandIT.java @@ -37,8 +37,8 @@ public void testRepurpose() throws Exception { final String indexName = "test-repurpose"; logger.info("--> starting two nodes"); - internalCluster().startMasterOnlyNode(); - internalCluster().startDataOnlyNode(); + final String masterNode = internalCluster().startMasterOnlyNode(); + final String dataNode = internalCluster().startDataOnlyNode(); logger.info("--> creating index"); prepareCreate(indexName, Settings.builder() @@ -54,31 +54,44 @@ public void testRepurpose() throws Exception { assertTrue(client().prepareGet(indexName, "type1", "1").get().isExists()); + final Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode); + final Settings dataNodeDataPathSettings = internalCluster().dataPathSettings(dataNode); + final Settings noMasterNoDataSettings = Settings.builder() .put(Node.NODE_DATA_SETTING.getKey(), false) .put(Node.NODE_MASTER_SETTING.getKey(), false) .build(); + final Settings noMasterNoDataSettingsForMasterNode = Settings.builder() + .put(noMasterNoDataSettings) + .put(masterNodeDataPathSettings) + .build(); + + final Settings noMasterNoDataSettingsForDataNode = Settings.builder() + .put(noMasterNoDataSettings) + .put(dataNodeDataPathSettings) + .build(); + internalCluster().stopRandomDataNode(); // verify test setup logger.info("--> restarting node with node.data=false and node.master=false"); IllegalStateException ex = expectThrows(IllegalStateException.class, "Node started with node.data=false and node.master=false while having existing index metadata must fail", - () -> internalCluster().startCoordinatingOnlyNode(Settings.EMPTY) + () -> internalCluster().startCoordinatingOnlyNode(dataNodeDataPathSettings) ); logger.info("--> Repurposing node 1"); - executeRepurposeCommandForOrdinal(noMasterNoDataSettings, indexUUID, 1, 1); + executeRepurposeCommand(noMasterNoDataSettingsForDataNode, indexUUID, 1); ElasticsearchException lockedException = expectThrows(ElasticsearchException.class, - () -> executeRepurposeCommandForOrdinal(noMasterNoDataSettings, indexUUID, 0, 1) + () -> executeRepurposeCommand(noMasterNoDataSettingsForMasterNode, indexUUID, 1) ); assertThat(lockedException.getMessage(), containsString(NodeRepurposeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG)); logger.info("--> Starting node after repurpose"); - internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + internalCluster().startCoordinatingOnlyNode(dataNodeDataPathSettings); assertTrue(indexExists(indexName)); expectThrows(NoShardAvailableActionException.class, () -> client().prepareGet(indexName, "type1", "1").get()); @@ -88,12 +101,12 @@ public void testRepurpose() throws Exception { internalCluster().stopRandomNode(s -> true); internalCluster().stopRandomNode(s -> true); - executeRepurposeCommandForOrdinal(noMasterNoDataSettings, indexUUID, 0, 0); + executeRepurposeCommand(noMasterNoDataSettingsForMasterNode, indexUUID, 0); // by restarting as master and data node, we can check that the index definition was really deleted and also that the tool // does not mess things up so much that the nodes cannot boot as master or data node any longer. - internalCluster().startMasterOnlyNode(); - internalCluster().startDataOnlyNode(); + internalCluster().startMasterOnlyNode(masterNodeDataPathSettings); + internalCluster().startDataOnlyNode(dataNodeDataPathSettings); ensureGreen(); @@ -101,8 +114,7 @@ public void testRepurpose() throws Exception { assertFalse(indexExists(indexName)); } - private void executeRepurposeCommandForOrdinal(Settings settings, String indexUUID, int ordinal, - int expectedShardCount) throws Exception { + private void executeRepurposeCommand(Settings settings, String indexUUID, int expectedShardCount) throws Exception { boolean verbose = randomBoolean(); Settings settingsWithPath = Settings.builder().put(internalCluster().getDefaultSettings()).put(settings).build(); int expectedIndexCount = TestEnvironment.newEnvironment(settingsWithPath).dataFiles().length; @@ -111,6 +123,6 @@ private void executeRepurposeCommandForOrdinal(Settings settings, String indexUU not(contains(NodeRepurposeCommand.PRE_V7_MESSAGE)), NodeRepurposeCommandTests.conditionalNot(containsString(indexUUID), verbose == false)); NodeRepurposeCommandTests.verifySuccess(settingsWithPath, matcher, - verbose, ordinal); + verbose); } } diff --git a/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandTests.java b/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandTests.java index 436439d64db1f..8f713e57bf4da 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandTests.java @@ -190,14 +190,10 @@ public void testCleanupShardData() throws Exception { } } - private static void verifySuccess(Settings settings, Matcher outputMatcher, boolean verbose) throws Exception { - verifySuccess(settings, outputMatcher, verbose, 0); - } - - static void verifySuccess(Settings settings, Matcher outputMatcher, boolean verbose, int ordinal) throws Exception { + static void verifySuccess(Settings settings, Matcher outputMatcher, boolean verbose) throws Exception { withTerminal(verbose, outputMatcher, terminal -> { terminal.addTextInput(randomFrom("y", "Y")); - executeRepurposeCommand(terminal, settings, ordinal); + executeRepurposeCommand(terminal, settings, 0); assertThat(terminal.getOutput(), containsString("Node successfully repurposed")); }); } diff --git a/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 3ea0663d7d4c0..962788f09d23b 100644 --- a/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -64,6 +64,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.IntStream; @@ -339,7 +340,9 @@ public boolean clearData(String nodeName) { public void testLatestVersionLoaded() throws Exception { // clean two nodes - internalCluster().startNodes(2, Settings.builder().put("gateway.recover_after_nodes", 2).build()); + List nodes = internalCluster().startNodes(2, Settings.builder().put("gateway.recover_after_nodes", 2).build()); + Settings node1DataPathSettings = internalCluster().dataPathSettings(nodes.get(0)); + Settings node2DataPathSettings = internalCluster().dataPathSettings(nodes.get(1)); assertAcked(client().admin().indices().prepareCreate("test")); client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute() @@ -393,7 +396,9 @@ public void testLatestVersionLoaded() throws Exception { logger.info("--> starting the two nodes back"); - internalCluster().startNodes(2, Settings.builder().put("gateway.recover_after_nodes", 2).build()); + internalCluster().startNodes( + Settings.builder().put(node1DataPathSettings).put("gateway.recover_after_nodes", 2).build(), + Settings.builder().put(node2DataPathSettings).put("gateway.recover_after_nodes", 2).build()); logger.info("--> running cluster_health (wait for the shards to startup)"); ensureGreen(); diff --git a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java index 7ca2e0a64070b..b7881adf76285 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java @@ -140,7 +140,10 @@ public void testCorruptIndex() throws Exception { final MockTerminal terminal = new MockTerminal(); final OptionParser parser = command.getParser(); - final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + final Settings nodePathSettings = internalCluster().dataPathSettings(node); + + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(nodePathSettings).build()); final OptionSet options = parser.parse("-index", indexName, "-shard-id", "0"); // Try running it before the node is stopped (and shard is closed) @@ -305,6 +308,9 @@ public void testCorruptTranslogTruncation() throws Exception { logger.info("--> performed extra flushing on replica"); } + final Settings node1PathSettings = internalCluster().dataPathSettings(node1); + final Settings node2PathSettings = internalCluster().dataPathSettings(node2); + // shut down the replica node to be tested later internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node2)); @@ -348,8 +354,8 @@ public Settings onNodeStopped(String nodeName) throws Exception { } }); - final Settings defaultSettings = internalCluster().getDefaultSettings(); - final Environment environment = TestEnvironment.newEnvironment(defaultSettings); + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(node1PathSettings).build()); terminal.addTextInput("y"); OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString()); @@ -411,7 +417,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { assertHitCount(client().prepareSearch(indexName).setQuery(matchAllQuery()).get(), numDocsToKeep); logger.info("--> starting the replica node to test recovery"); - internalCluster().startNode(); + internalCluster().startNode(node2PathSettings); ensureGreen(indexName); for (String node : internalCluster().nodesInclude(indexName)) { SearchRequestBuilder q = client().prepareSearch(indexName).setPreference("_only_nodes:" + node).setQuery(matchAllQuery()); @@ -473,7 +479,10 @@ public void testCorruptTranslogTruncationOfReplica() throws Exception { final ShardId shardId = new ShardId(resolveIndex(indexName), 0); final Set translogDirs = getDirs(node2, shardId, ShardPath.TRANSLOG_FOLDER_NAME); - // stop data nodes. After the restart the 1st node will be primary and the 2nd node will be replica + final Settings node1PathSettings = internalCluster().dataPathSettings(node1); + final Settings node2PathSettings = internalCluster().dataPathSettings(node2); + + // stop data nodes internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode(); @@ -481,53 +490,32 @@ public void testCorruptTranslogTruncationOfReplica() throws Exception { logger.info("--> corrupting translog"); TestTranslog.corruptRandomTranslogFile(logger, random(), translogDirs); - // Restart the single node + // Start the node with the non-corrupted data path logger.info("--> starting node"); - internalCluster().startNode(); + internalCluster().startNode(node1PathSettings); ensureYellow(); // Run a search and make sure it succeeds assertHitCount(client().prepareSearch(indexName).setQuery(matchAllQuery()).get(), totalDocs); + // check replica corruption final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); final MockTerminal terminal = new MockTerminal(); final OptionParser parser = command.getParser(); - final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - - internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - logger.info("--> node {} stopped", nodeName); - for (Path translogDir : translogDirs) { - final Path idxLocation = translogDir.getParent().resolve(ShardPath.INDEX_FOLDER_NAME); - assertBusy(() -> { - logger.info("--> checking that lock has been released for {}", idxLocation); - try (Directory dir = FSDirectory.open(idxLocation, NativeFSLockFactory.INSTANCE); - Lock writeLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { - // Great, do nothing, we just wanted to obtain the lock - } catch (LockObtainFailedException lofe) { - logger.info("--> failed acquiring lock for {}", idxLocation); - fail("still waiting for lock release at [" + idxLocation + "]"); - } catch (IOException ioe) { - fail("Got an IOException: " + ioe); - } - }); - - terminal.addTextInput("y"); - OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString()); - logger.info("--> running command for [{}]", translogDir.toAbsolutePath()); - command.execute(terminal, options, environment); - logger.info("--> output:\n{}", terminal.getOutput()); - } - - return super.onNodeStopped(nodeName); - } - }); + for (Path translogDir : translogDirs) { + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(node2PathSettings).build()); + terminal.addTextInput("y"); + OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString()); + logger.info("--> running command for [{}]", translogDir.toAbsolutePath()); + command.execute(terminal, options, environment); + logger.info("--> output:\n{}", terminal.getOutput()); + } logger.info("--> starting the replica node to test recovery"); - internalCluster().startNode(); + internalCluster().startNode(node2PathSettings); ensureGreen(indexName); for (String node : internalCluster().nodesInclude(indexName)) { assertHitCount(client().prepareSearch(indexName) @@ -574,15 +562,18 @@ public void testResolvePath() throws Exception { final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); final OptionParser parser = command.getParser(); - final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - final Map indexPathByNodeName = new HashMap<>(); + final Map environmentByNodeName = new HashMap<>(); for (String nodeName : nodeNames) { final String nodeId = nodeNameToNodeId.get(nodeName); final Set indexDirs = getDirs(nodeId, shardId, ShardPath.INDEX_FOLDER_NAME); assertThat(indexDirs, hasSize(1)); indexPathByNodeName.put(nodeName, indexDirs.iterator().next()); + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(internalCluster().dataPathSettings(nodeName)).build()); + environmentByNodeName.put(nodeName, environment); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeName)); logger.info(" -- stopped {}", nodeName); } @@ -590,7 +581,7 @@ public void testResolvePath() throws Exception { for (String nodeName : nodeNames) { final Path indexPath = indexPathByNodeName.get(nodeName); final OptionSet options = parser.parse("--dir", indexPath.toAbsolutePath().toString()); - command.findAndProcessShardPath(options, environment, + command.findAndProcessShardPath(options, environmentByNodeName.get(nodeName), shardPath -> assertThat(shardPath.resolveIndex(), equalTo(indexPath))); } } diff --git a/server/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java b/server/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java index 627eb74007bae..02eab6dc0aad2 100644 --- a/server/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java +++ b/server/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java @@ -22,7 +22,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; @@ -41,7 +40,6 @@ public class CircuitBreakerNoopIT extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() - .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2) .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop") // This is set low, because if the "noop" is not a noop, it will break .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b") diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 3130cebad7097..0ea8eb8e9b447 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -853,8 +853,12 @@ public void testHistoryRetention() throws Exception { flush(indexName); } - internalCluster().stopRandomNode(s -> true); - internalCluster().stopRandomNode(s -> true); + String firstNodeToStop = randomFrom(internalCluster().getNodeNames()); + Settings firstNodeToStopDataPathSettings = internalCluster().dataPathSettings(firstNodeToStop); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(firstNodeToStop)); + String secondNodeToStop = randomFrom(internalCluster().getNodeNames()); + Settings secondNodeToStopDataPathSettings = internalCluster().dataPathSettings(secondNodeToStop); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(secondNodeToStop)); final long desyncNanoTime = System.nanoTime(); while (System.nanoTime() <= desyncNanoTime) { @@ -871,7 +875,7 @@ public void testHistoryRetention() throws Exception { assertAcked(client().admin().indices().prepareUpdateSettings(indexName) .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1))); - internalCluster().startNode(); + internalCluster().startNode(randomFrom(firstNodeToStopDataPathSettings, secondNodeToStopDataPathSettings)); ensureGreen(indexName); final RecoveryResponse recoveryResponse = client().admin().indices().recoveries(new RecoveryRequest(indexName)).get(); diff --git a/server/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/server/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index 23c1a837d4346..f388e49b31fb2 100644 --- a/server/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -74,7 +74,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { // simplify this and only use a single data path - return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(Environment.PATH_DATA_SETTING.getKey(), "") + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(Environment.PATH_DATA_SETTING.getKey(), createTempDir()) // by default this value is 1 sec in tests (30 sec in practice) but we adding disruption here // which is between 1 and 2 sec can cause each of the shard deletion requests to timeout. // to prevent this we are setting the timeout here to something highish ie. the default in practice @@ -335,8 +335,11 @@ public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception { .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"))); logger.debug("--> shutting down two random nodes"); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1, node2, node3)); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1, node2, node3)); + List nodesToShutDown = randomSubsetOf(2, node1, node2, node3); + Settings node1DataPathSettings = internalCluster().dataPathSettings(nodesToShutDown.get(0)); + Settings node2DataPathSettings = internalCluster().dataPathSettings(nodesToShutDown.get(1)); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodesToShutDown.get(0))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodesToShutDown.get(1))); logger.debug("--> verifying index is red"); ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("3").get(); @@ -369,7 +372,7 @@ public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception { logger.debug("--> starting the two old nodes back"); - internalCluster().startDataOnlyNodes(2); + internalCluster().startNodes(node1DataPathSettings, node2DataPathSettings); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("5").get().isTimedOut()); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 134a91ac296c1..d45c83444b2fc 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -107,7 +107,6 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexModule; @@ -1777,7 +1776,6 @@ private int getNumClientNodes() { */ protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder() - .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE) // Default the watermarks to absurdly low to prevent the tests // from failing on nodes without enough disk space .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b") diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 7ff928c4413d2..2e88a018e5a0d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -138,6 +138,7 @@ import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; +import java.util.stream.IntStream; import java.util.stream.Stream; import static java.util.Collections.emptyList; @@ -238,6 +239,8 @@ public final class InternalTestCluster extends TestCluster { private final boolean forbidPrivateIndexSettings; + private final int numDataPaths; + /** * All nodes started by the cluster will have their name set to nodePrefix followed by a positive number */ @@ -353,20 +356,8 @@ public InternalTestCluster( numSharedDedicatedMasterNodes, numSharedDataNodes, numSharedCoordOnlyNodes, autoManageMinMasterNodes ? "auto-managed" : "manual"); this.nodeConfigurationSource = nodeConfigurationSource; + numDataPaths = random.nextInt(5) == 0 ? 2 + random.nextInt(3) : 1; Builder builder = Settings.builder(); - if (random.nextInt(5) == 0) { // sometimes set this - // randomize (multi/single) data path, special case for 0, don't set it at all... - final int numOfDataPaths = random.nextInt(5); - if (numOfDataPaths > 0) { - StringBuilder dataPath = new StringBuilder(); - for (int i = 0; i < numOfDataPaths; i++) { - dataPath.append(baseDir.resolve("d" + i).toAbsolutePath()).append(','); - } - builder.put(Environment.PATH_DATA_SETTING.getKey(), dataPath.toString()); - } - } - builder.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE); - builder.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), baseDir.resolve("custom")); builder.put(Environment.PATH_HOME_SETTING.getKey(), baseDir); builder.put(Environment.PATH_REPO_SETTING.getKey(), baseDir.resolve("repos")); builder.put(TransportSettings.PORT.getKey(), 0); @@ -625,11 +616,24 @@ private Settings getNodeSettings(final int nodeId, final long seed, final Settin final String name = buildNodeName(nodeId, settings); - final Settings.Builder updatedSettings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), baseDir) // allow overriding path.home - .put(settings) - .put("node.name", name) - .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), seed); + final Settings.Builder updatedSettings = Settings.builder(); + + updatedSettings.put(Environment.PATH_HOME_SETTING.getKey(), baseDir); + + if (numDataPaths > 1) { + updatedSettings.putList(Environment.PATH_DATA_SETTING.getKey(), IntStream.range(0, numDataPaths).mapToObj(i -> + baseDir.resolve(name).resolve("d" + i).toString()).collect(Collectors.toList())); + } else { + updatedSettings.put(Environment.PATH_DATA_SETTING.getKey(), baseDir.resolve(name)); + } + + updatedSettings.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), baseDir.resolve(name + "-shared")); + + // allow overriding the above + updatedSettings.put(settings); + // force certain settings + updatedSettings.put("node.name", name); + updatedSettings.put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), seed); if (autoManageMinMasterNodes) { assertThat("automatically managing min master nodes require nodes to complete a join cycle when starting", @@ -965,7 +969,7 @@ private void recreateNode(final Settings newSettings, final Runnable onTransport if (closed.get() == false) { throw new IllegalStateException("node " + name + " should be closed before recreating it"); } - // use a new seed to make sure we have new node id + // use a new seed to make sure we generate a fresh new node id if the data folder has been wiped final long newIdSeed = NodeEnvironment.NODE_ID_SEED_SETTING.get(node.settings()) + 1; Settings finalSettings = Settings.builder() .put(originalNodeSettings) @@ -1531,6 +1535,12 @@ private static T getInstanceFromNode(Class clazz, Node node) { return node.injector().getInstance(clazz); } + public Settings dataPathSettings(String node) { + return nodes.values().stream().filter(nc -> nc.name.equals(node)).findFirst().get().node().settings() + .filter(key -> + key.equals(Environment.PATH_DATA_SETTING.getKey()) || key.equals(Environment.PATH_SHARED_DATA_SETTING.getKey())); + } + @Override public int size() { return nodes.size(); diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index 8461e6ade09ac..a690e4bbbdd21 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -20,7 +20,6 @@ import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.network.NetworkModule; @@ -28,6 +27,7 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.SettingsBasedSeedHostsProvider; +import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; @@ -35,7 +35,6 @@ import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.MockHttpTransport; import org.elasticsearch.test.NodeConfigurationSource; -import org.elasticsearch.transport.TransportSettings; import java.io.IOException; import java.nio.file.Files; @@ -91,20 +90,21 @@ public void testInitializiationIsConsistent() { InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, randomBoolean(), minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, nodePrefix, Collections.emptyList(), Function.identity()); - // TODO: this is not ideal - we should have a way to make sure ports are initialized in the same way - assertClusters(cluster0, cluster1, false); + assertClusters(cluster0, cluster1, true); } /** - * a set of settings that are expected to have different values betweem clusters, even they have been initialized with the same + * a set of settings that are expected to have different values between clusters, even they have been initialized with the same * base settings. */ static final Set clusterUniqueSettings = new HashSet<>(); static { - clusterUniqueSettings.add(ClusterName.CLUSTER_NAME_SETTING.getKey()); - clusterUniqueSettings.add(TransportSettings.PORT.getKey()); - clusterUniqueSettings.add("http.port"); + clusterUniqueSettings.add(Environment.PATH_HOME_SETTING.getKey()); + clusterUniqueSettings.add(Environment.PATH_DATA_SETTING.getKey()); + clusterUniqueSettings.add(Environment.PATH_REPO_SETTING.getKey()); + clusterUniqueSettings.add(Environment.PATH_SHARED_DATA_SETTING.getKey()); + clusterUniqueSettings.add(Environment.PATH_LOGS_SETTING.getKey()); } public static void assertClusters(InternalTestCluster cluster0, InternalTestCluster cluster1, boolean checkClusterUniqueSettings) { @@ -112,9 +112,6 @@ public static void assertClusters(InternalTestCluster cluster0, InternalTestClus Settings defaultSettings1 = cluster1.getDefaultSettings(); assertSettings(defaultSettings0, defaultSettings1, checkClusterUniqueSettings); assertThat(cluster0.numDataNodes(), equalTo(cluster1.numDataNodes())); - if (checkClusterUniqueSettings) { - assertThat(cluster0.getClusterName(), equalTo(cluster1.getClusterName())); - } } public static void assertSettings(Settings left, Settings right, boolean checkClusterUniqueSettings) { @@ -127,7 +124,7 @@ public static void assertSettings(Settings left, Settings right, boolean checkCl continue; } assertTrue("key [" + key + "] is missing in " + keys1, keys1.contains(key)); - assertEquals(right.get(key), left.get(key)); + assertEquals(key, right.get(key), left.get(key)); } } @@ -151,16 +148,11 @@ public void testBeforeTest() throws Exception { bootstrapMasterNodeIndex = maxNumDataNodes == 0 ? -1 : randomIntBetween(0, maxNumDataNodes - 1); } final int numClientNodes = randomIntBetween(0, 2); - final String clusterName1 = "shared1"; - final String clusterName2 = "shared2"; String transportClient = getTestTransportType(); NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() { @Override public Settings nodeSettings(int nodeOrdinal) { final Settings.Builder settings = Settings.builder() - .put( - NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), - 2 * ((masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes)) .put(DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file") .putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey()) .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()); @@ -185,14 +177,13 @@ public Settings transportClientSettings() { String nodePrefix = "foobar"; - Path baseDir = createTempDir(); - InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, - autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, + InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, createTempDir(), masterNodes, + autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, "clustername", nodeConfigurationSource, numClientNodes, nodePrefix, mockPlugins(), Function.identity()); cluster0.setBootstrapMasterNodeIndex(bootstrapMasterNodeIndex); - InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, - autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, + InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, createTempDir(), masterNodes, + autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, "clustername", nodeConfigurationSource, numClientNodes, nodePrefix, mockPlugins(), Function.identity()); cluster1.setBootstrapMasterNodeIndex(bootstrapMasterNodeIndex); @@ -234,9 +225,6 @@ public void testDataFolderAssignmentAndCleaning() throws IOException, Interrupte @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() - .put( - NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), - 2 + (masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes) .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()) .putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file") .putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey()) @@ -269,12 +257,9 @@ public Settings transportClientSettings() { String poorNode = randomValueOtherThanMany(n -> originalMasterCount == 1 && n.equals(cluster.getMasterName()), () -> randomFrom(cluster.getNodeNames())); Path dataPath = getNodePaths(cluster, poorNode)[0]; + final Settings poorNodeDataPathSettings = cluster.dataPathSettings(poorNode); final Path testMarker = dataPath.resolve("testMarker"); Files.createDirectories(testMarker); - int expectedMasterCount = originalMasterCount; - if (cluster.getInstance(ClusterService.class, poorNode).localNode().isMasterNode()) { - expectedMasterCount--; - } cluster.stopRandomNode(InternalTestCluster.nameFilter(poorNode)); assertFileExists(testMarker); // stopping a node half way shouldn't clean data @@ -285,15 +270,15 @@ public Settings transportClientSettings() { Files.createDirectories(stableTestMarker); final String newNode1 = cluster.startNode(); - expectedMasterCount++; - assertThat(getNodePaths(cluster, newNode1)[0], equalTo(dataPath)); + assertThat(getNodePaths(cluster, newNode1)[0], not(dataPath)); assertFileExists(testMarker); // starting a node should re-use data folders and not clean it final String newNode2 = cluster.startNode(); - expectedMasterCount++; final Path newDataPath = getNodePaths(cluster, newNode2)[0]; final Path newTestMarker = newDataPath.resolve("newTestMarker"); assertThat(newDataPath, not(dataPath)); Files.createDirectories(newTestMarker); + final String newNode3 = cluster.startNode(poorNodeDataPathSettings); + assertThat(getNodePaths(cluster, newNode3)[0], equalTo(dataPath)); cluster.beforeTest(random(), 0.0); assertFileNotExists(newTestMarker); // the cluster should be reset for a new test, cleaning up the extra path we made assertFileNotExists(testMarker); // a new unknown node used this path, it should be cleaned @@ -333,7 +318,6 @@ public void testDifferentRolesMaintainPathOnRestart() throws Exception { @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() - .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), numNodes) .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()) .put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0) .putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file") @@ -417,7 +401,6 @@ public void testTwoNodeCluster() throws Exception { @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() - .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2) .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()) .putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file") .putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey()) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index dea3da2a3ba1b..eba3532f063bf 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -46,7 +46,6 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.DocIdSeqNoAndSource; @@ -213,7 +212,6 @@ public void afterTest() throws Exception { private NodeConfigurationSource createNodeConfigurationSource(final String leaderSeedAddress, final boolean leaderCluster) { Settings.Builder builder = Settings.builder(); - builder.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE); // Default the watermarks to absurdly low to prevent the tests // from failing on nodes without enough disk space builder.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b"); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java index 7779f4e13d0ea..97d0824d2ac0f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; @@ -331,7 +332,7 @@ public void testMlStateAndResultsIndicesNotAvailable() throws Exception { internalCluster().ensureAtMostNumDataNodes(0); // start non ml node that will hold the state and results indices logger.info("Start non ml node:"); - internalCluster().startNode(Settings.builder() + String nonMLNode = internalCluster().startNode(Settings.builder() .put("node.data", true) .put("node.attr.ml-indices", "state-and-results") .put(MachineLearning.ML_ENABLED.getKey(), false)); @@ -389,7 +390,8 @@ public void testMlStateAndResultsIndicesNotAvailable() throws Exception { assertEquals(0, tasks.taskMap().size()); }); logger.info("Stop non ml node"); - internalCluster().stopRandomNode(settings -> settings.getAsBoolean(MachineLearning.ML_ENABLED.getKey(), false) == false); + Settings nonMLNodeDataPathSettings = internalCluster().dataPathSettings(nonMLNode); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nonMLNode)); ensureStableCluster(1); Exception e = expectThrows(ElasticsearchStatusException.class, @@ -406,6 +408,7 @@ public void testMlStateAndResultsIndicesNotAvailable() throws Exception { logger.info("Start data node"); String nonMlNode = internalCluster().startNode(Settings.builder() + .put(nonMLNodeDataPathSettings) .put("node.data", true) .put(MachineLearning.ML_ENABLED.getKey(), false)); ensureStableCluster(2, mlNode); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java index b2f6a662c1167..480f85798800b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java @@ -90,7 +90,8 @@ public void testLoseDedicatedMasterNode() throws Exception { ensureStableClusterOnAllNodes(2); run("lose-dedicated-master-node-job", () -> { logger.info("Stopping dedicated master node"); - internalCluster().stopRandomNode(settings -> settings.getAsBoolean("node.master", false)); + Settings masterDataPathSettings = internalCluster().dataPathSettings(internalCluster().getMasterName()); + internalCluster().stopCurrentMasterNode(); assertBusy(() -> { ClusterState state = client(mlAndDataNode).admin().cluster().prepareState() .setLocal(true).get().getState(); @@ -98,6 +99,7 @@ public void testLoseDedicatedMasterNode() throws Exception { }); logger.info("Restarting dedicated master node"); internalCluster().startNode(Settings.builder() + .put(masterDataPathSettings) .put("node.master", true) .put("node.data", false) .put("node.ml", false) From d589cad84251e1f7bc7dbd77e407887dbb967682 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 22 May 2019 09:26:41 +0100 Subject: [PATCH 027/224] Rework discovery-ec2 docs (#41630) This commit reworks and clarifies the docs for the `discovery-ec2` plugin: - folds the tiny "Getting started with AWS" into the page on configuration - spells out the name of each setting in full instead of noting the `discovery.ec2` prefix at the top of the page. - replaces each `(Secure)` marker with a sentence describing what that means in situ - notes some missing defaults - clarifies the behaviour of `discovery.ec2.groups` (dependent on `.any_group`) - clarifies what `discovery.ec2.host_type` is for - adds `discovery.ec2.tag.TAGNAME` as a (meta-)setting rather than describing it in a separate section - notes that the tags mentioned in `discovery.ec2.tag.TAGNAME` cannot contain colons (see #38406) - clarifies the EC2-specific interface names and what they're for - reorders and rewords the recommendations for storage - expands on why you should not span a cluster across regions - adds a suggestion on protecting instances against termination during scale-in - reformat to 80 columns where possible Fixes #38406 --- docs/plugins/discovery-ec2.asciidoc | 391 ++++++++++++++++++---------- 1 file changed, 259 insertions(+), 132 deletions(-) diff --git a/docs/plugins/discovery-ec2.asciidoc b/docs/plugins/discovery-ec2.asciidoc index 9ec6b7bab054f..fd51bd881daf0 100644 --- a/docs/plugins/discovery-ec2.asciidoc +++ b/docs/plugins/discovery-ec2.asciidoc @@ -1,34 +1,52 @@ [[discovery-ec2]] === EC2 Discovery Plugin -The EC2 discovery plugin uses the https://github.com/aws/aws-sdk-java[AWS API] -to identify the addresses of seed hosts. +The EC2 discovery plugin provides a list of seed addresses to the +{ref}/modules-discovery-hosts-providers.html[discovery process] by querying the +https://github.com/aws/aws-sdk-java[AWS API] for a list of EC2 instances +matching certain criteria determined by the <>. -*If you are looking for a hosted solution of Elasticsearch on AWS, please visit http://www.elastic.co/cloud.* +*If you are looking for a hosted solution of {es} on AWS, please visit +http://www.elastic.co/cloud.* :plugin_name: discovery-ec2 include::install_remove.asciidoc[] [[discovery-ec2-usage]] -==== Getting started with AWS +==== Using the EC2 discovery plugin -The plugin adds a seed hosts provider named `ec2`. This seed hosts provider -finds other Elasticsearch instances in EC2 by querying the AWS metadata -service. Authentication is done using -http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html[IAM -Role] credentials by default. To enable the plugin, configure {es} to use the -`ec2` seed hosts provider: +The `discovery-ec2` plugin allows {es} to find the master-eligible nodes in a +cluster running on AWS EC2 by querying the +https://github.com/aws/aws-sdk-java[AWS API] for the addresses of the EC2 +instances running these nodes. + +It is normally a good idea to restrict the discovery process just to the +master-eligible nodes in the cluster. This plugin allows you to identify these +nodes by certain criteria including their tags, their membership of security +groups, and their placement within availability zones. The discovery process +will work correctly even if it finds master-ineligible nodes, but master +elections will be more efficient if this can be avoided. + +The interaction with the AWS API can be authenticated using the +http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html[instance +role], or else custom credentials can be supplied. + +===== Enabling EC2 discovery + +To enable EC2 discovery, configure {es} to use the `ec2` seed hosts provider: [source,yaml] ---- discovery.seed_providers: ec2 ---- -==== Settings +===== Configuring EC2 discovery -EC2 discovery supports a number of settings. Some settings are sensitive and -must be stored in the {ref}/secure-settings.html[elasticsearch keystore]. For -example, to use explicit AWS access keys: +EC2 discovery supports a number of settings. Some settings are sensitive and +must be stored in the {ref}/secure-settings.html[{es} keystore]. For example, +to authenticate using a particular access key and secret key, add these keys to +the keystore by running the following commands: [source,sh] ---- @@ -36,132 +54,163 @@ bin/elasticsearch-keystore add discovery.ec2.access_key bin/elasticsearch-keystore add discovery.ec2.secret_key ---- -The following are the available discovery settings. All should be prefixed with `discovery.ec2.`. -Those that must be stored in the keystore are marked as `Secure`. +The available settings for the EC2 discovery plugin are as follows. + +`discovery.ec2.access_key`:: -`access_key`:: + An EC2 access key. If set, you must also set `discovery.ec2.secret_key`. + If unset, `discovery-ec2` will instead use the instance role. This setting + is sensitive and must be stored in the {ref}/secure-settings.html[{es} + keystore]. - An ec2 access key. The `secret_key` setting must also be specified. (Secure) +`discovery.ec2.secret_key`:: -`secret_key`:: + An EC2 secret key. If set, you must also set `discovery.ec2.access_key`. + This setting is sensitive and must be stored in the + {ref}/secure-settings.html[{es} keystore]. - An ec2 secret key. The `access_key` setting must also be specified. (Secure) +`discovery.ec2.session_token`:: -`session_token`:: - An ec2 session token. The `access_key` and `secret_key` settings must also - be specified. (Secure) + An EC2 session token. If set, you must also set `discovery.ec2.access_key` + and `discovery.ec2.secret_key`. This setting is sensitive and must be + stored in the {ref}/secure-settings.html[{es} keystore]. -`endpoint`:: +`discovery.ec2.endpoint`:: - The ec2 service endpoint to connect to. See - http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region. This - defaults to `ec2.us-east-1.amazonaws.com`. + The EC2 service endpoint to which to connect. See + http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region to find + the appropriate endpoint for the region. This setting defaults to + `ec2.us-east-1.amazonaws.com` which is appropriate for clusters running in + the `us-east-1` region. -`protocol`:: +`discovery.ec2.protocol`:: - The protocol to use to connect to ec2. Valid values are either `http` - or `https`. Defaults to `https`. + The protocol to use to connect to the EC2 service endpoint, which may be + either `http` or `https`. Defaults to `https`. -`proxy.host`:: +`discovery.ec2.proxy.host`:: - The host name of a proxy to connect to ec2 through. + The address or host name of an HTTP proxy through which to connect to EC2. + If not set, no proxy is used. -`proxy.port`:: +`discovery.ec2.proxy.port`:: - The port of a proxy to connect to ec2 through. + When the address of an HTTP proxy is given in `discovery.ec2.proxy.host`, + this setting determines the port to use to connect to the proxy. Defaults to + `80`. -`proxy.username`:: +`discovery.ec2.proxy.username`:: - The username to connect to the `proxy.host` with. (Secure) + When the address of an HTTP proxy is given in `discovery.ec2.proxy.host`, + this setting determines the username to use to connect to the proxy. When + not set, no username is used. This setting is sensitive and must be stored + in the {ref}/secure-settings.html[{es} keystore]. -`proxy.password`:: +`discovery.ec2.proxy.password`:: - The password to connect to the `proxy.host` with. (Secure) + When the address of an HTTP proxy is given in `discovery.ec2.proxy.host`, + this setting determines the password to use to connect to the proxy. When + not set, no password is used. This setting is sensitive and must be stored + in the {ref}/secure-settings.html[{es} keystore]. -`read_timeout`:: +`discovery.ec2.read_timeout`:: - The socket timeout for connecting to ec2. The value should specify the unit. For example, - a value of `5s` specifies a 5 second timeout. The default value is 50 seconds. + The socket timeout for connections to EC2, + {ref}/common-options.html#time-units[including the units]. For example, a + value of `60s` specifies a 60-second timeout. Defaults to 50 seconds. -`groups`:: +`discovery.ec2.groups`:: - Either a comma separated list or array based list of (security) groups. - Only instances with the provided security groups will be used in the - cluster discovery. (NOTE: You could provide either group NAME or group - ID.) + A list of the names or IDs of the security groups to use for discovery. The + `discovery.ec2.any_group` setting determines the behaviour of this setting. + Defaults to an empty list, meaning that security group membership is + ignored by EC2 discovery. -`host_type`:: +`discovery.ec2.any_group`:: + + Defaults to `true`, meaning that instances belonging to _any_ of the + security groups specified in `discovery.ec2.groups` will be used for + discovery. If set to `false`, only instances that belong to _all_ of the + security groups specified in `discovery.ec2.groups` will be used for + discovery. + +`discovery.ec2.host_type`:: + -- -The type of host type to use to communicate with other instances. Can be -one of `private_ip`, `public_ip`, `private_dns`, `public_dns` or `tag:TAGNAME` where -`TAGNAME` refers to a name of a tag configured for all EC2 instances. Instances which don't -have this tag set will be ignored by the discovery process. -For example if you defined a tag `my-elasticsearch-host` in ec2 and set it to `myhostname1.mydomain.com`, then -setting `host_type: tag:my-elasticsearch-host` will tell Discovery Ec2 plugin to read the host name from the -`my-elasticsearch-host` tag. In this case, it will be resolved to `myhostname1.mydomain.com`. -http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html[Read more about EC2 Tags]. +Each EC2 instance has a number of different addresses that might be suitable +for discovery. This setting allows you to select which of these addresses is +used by the discovery process. It can be set to one of `private_ip`, +`public_ip`, `private_dns`, `public_dns` or `tag:TAGNAME` where `TAGNAME` +refers to a name of a tag. This setting defaults to `private_ip`. -Defaults to `private_ip`. --- +If you set `discovery.ec2.host_type` to a value of the form `tag:TAGNAME` then +the value of the tag `TAGNAME` attached to each instance will be used as that +instance's address for discovery. Instances which do not have this tag set will +be ignored by the discovery process. -`availability_zones`:: +For example if you tag some EC2 instances with a tag named +`elasticsearch-host-name` and set `host_type: tag:elasticsearch-host-name` then +the `discovery-ec2` plugin will read each instance's host name from the value +of the `elasticsearch-host-name` tag. +http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html[Read more +about EC2 Tags]. - Either a comma separated list or array based list of availability zones. - Only instances within the provided availability zones will be used in the - cluster discovery. +-- -`any_group`:: +`discovery.ec2.availability_zones`:: - If set to `false`, will require all security groups to be present for the - instance to be used for the discovery. Defaults to `true`. + A list of the names of the availability zones to use for discovery. The + name of an availability zone is the + https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html[region + code followed by a letter], such as `us-east-1a`. Only instances placed in + one of the given availability zones will be used for discovery. -`node_cache_time`:: +[[discovery-ec2-filtering]] +`discovery.ec2.tag.TAGNAME`:: - How long the list of hosts is cached to prevent further requests to the AWS API. - Defaults to `10s`. ++ +-- -*All* secure settings of this plugin are {ref}/secure-settings.html#reloadable-secure-settings[reloadable]. -After you reload the settings, an aws sdk client with the latest settings -from the keystore will be used. +A list of the values of a tag called `TAGNAME` to use for discovery. If set, +only instances that are tagged with one of the given values will be used for +discovery. For instance, the following settings will only use nodes with a +`role` tag set to `master` and an `environment` tag set to either `dev` or +`staging`. -[IMPORTANT] -.Binding the network host -============================================== +[source,yaml] +---- +discovery.ec2.tags.role: master +discovery.ec2.tags.environment: dev,staging +---- -It's important to define `network.host` as by default it's bound to `localhost`. +NOTE: The names of tags used for discovery may only contain ASCII letters, +numbers, hyphens and underscores. In particular you cannot use tags whose name +includes a colon. -You can use {ref}/modules-network.html[core network host settings] or -<>: +-- -============================================== +`discovery.ec2.node_cache_time`:: -[[discovery-ec2-network-host]] -===== EC2 Network Host + Sets the length of time for which the collection of discovered instances is + cached. {es} waits at least this long between requests for discovery + information from the EC2 API. AWS may reject discovery requests if they are + made too often, and this would cause discovery to fail. Defaults to `10s`. -When the `discovery-ec2` plugin is installed, the following are also allowed -as valid network host settings: +All **secure** settings of this plugin are +{ref}/secure-settings.html#reloadable-secure-settings[reloadable], allowing you +to update the secure settings for this plugin without needing to restart each +node. -[cols="<,<",options="header",] -|================================================================== -|EC2 Host Value |Description -|`_ec2:privateIpv4_` |The private IP address (ipv4) of the machine. -|`_ec2:privateDns_` |The private host of the machine. -|`_ec2:publicIpv4_` |The public IP address (ipv4) of the machine. -|`_ec2:publicDns_` |The public host of the machine. -|`_ec2:privateIp_` |equivalent to `_ec2:privateIpv4_`. -|`_ec2:publicIp_` |equivalent to `_ec2:publicIpv4_`. -|`_ec2_` |equivalent to `_ec2:privateIpv4_`. -|================================================================== [[discovery-ec2-permissions]] -===== Recommended EC2 Permissions +===== Recommended EC2 permissions -EC2 discovery requires making a call to the EC2 service. You'll want to setup -an IAM policy to allow this. You can create a custom policy via the IAM -Management Console. It should look similar to this. +The `discovery-ec2` plugin works by making a `DescribeInstances` call to the AWS +EC2 API. You must configure your AWS account to allow this, which is normally +done using an IAM policy. You can create a custom policy via the IAM Management +Console. It should look similar to this. [source,js] ---- @@ -182,60 +231,138 @@ Management Console. It should look similar to this. ---- // NOTCONSOLE -[[discovery-ec2-filtering]] -===== Filtering by Tags - -The ec2 discovery plugin can also filter machines to include in the cluster -based on tags (and not just groups). The settings to use include the -`discovery.ec2.tag.` prefix. For example, if you defined a tag `stage` in EC2 -and set it to `dev`, setting `discovery.ec2.tag.stage` to `dev` will only -filter instances with a tag key set to `stage`, and a value of `dev`. Adding -multiple `discovery.ec2.tag` settings will require all of those tags to be set -for the instance to be included. - -One practical use for tag filtering is when an ec2 cluster contains many nodes -that are not master-eligible {es} nodes. In this case, tagging the ec2 -instances that _are_ running the master-eligible {es} nodes, and then filtering -by that tag, will help discovery to run more efficiently. - [[discovery-ec2-attributes]] -===== Automatic Node Attributes +===== Automatic node attributes -Though not dependent on actually using `ec2` as discovery (but still requires the `discovery-ec2` plugin installed), the -plugin can automatically add node attributes relating to ec2. In the future this may support other attributes, but this will -currently only add an `aws_availability_zone` node attribute, which is the availability zone of the current node. Attributes -can be used to isolate primary and replica shards across availability zones by using the +The `discovery-ec2` plugin can automatically set the `aws_availability_zone` +node attribute to the availability zone of each node. This node attribute +allows you to ensure that each shard has copies allocated redundantly across +multiple availability zones by using the {ref}/allocation-awareness.html[Allocation Awareness] feature. -In order to enable it, set `cloud.node.auto_attributes` to `true` in the settings. For example: +In order to enable the automatic definition of the `aws_availability_zone` +attribute, set `cloud.node.auto_attributes` to `true`. For example: [source,yaml] ---- cloud.node.auto_attributes: true - cluster.routing.allocation.awareness.attributes: aws_availability_zone ---- +The `aws_availability_zone` attribute can be automatically set like this when +using any discovery type. It is not necessary to set `discovery.seed_providers: +ec2`. However this feature does require that the `discovery-ec2` plugin is +installed. + +[[discovery-ec2-network-host]] +===== Binding to the correct address + +It is important to define `network.host` correctly when deploying a cluster on +EC2. By default each {es} node only binds to `localhost`, which will prevent it +from being discovered by nodes running on any other instances. + +You can use the {ref}/modules-network.html[core network host settings] to bind +each node to the desired address, or you can set `network.host` to one of the +following EC2-specific settings provided by the `discovery-ec2` plugin: + +[cols="<,<",options="header",] +|================================================================== +|EC2 Host Value |Description +|`_ec2:privateIpv4_` |The private IP address (ipv4) of the machine. +|`_ec2:privateDns_` |The private host of the machine. +|`_ec2:publicIpv4_` |The public IP address (ipv4) of the machine. +|`_ec2:publicDns_` |The public host of the machine. +|`_ec2:privateIp_` |Equivalent to `_ec2:privateIpv4_`. +|`_ec2:publicIp_` |Equivalent to `_ec2:publicIpv4_`. +|`_ec2_` |Equivalent to `_ec2:privateIpv4_`. +|================================================================== + +These values are acceptable when using any discovery type. They do not require +you to set `discovery.seed_providers: ec2`. However they do require that the +`discovery-ec2` plugin is installed. + [[cloud-aws-best-practices]] ==== Best Practices in AWS -Collection of best practices and other information around running Elasticsearch on AWS. +This section contains some other information about designing and managing an +{es} cluster on your own AWS infrastructure. If you would prefer to avoid these +operational details then you may be interested in a hosted {es} installation +available on AWS-based infrastructure from http://www.elastic.co/cloud. + +===== Storage + +EC2 instances offer a number of different kinds of storage. Please be aware of +the folowing when selecting the storage for your cluster: + +* http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html[Instance +Store] is recommended for {es} clusters as it offers excellent performance and +is cheaper than EBS-based storage. {es} is designed to work well with this kind +of ephemeral storage because it replicates each shard across multiple nodes. If +a node fails and its Instance Store is lost then {es} will rebuild any lost +shards from other copies. -===== Instance/Disk -When selecting disk please be aware of the following order of preference: +* https://aws.amazon.com/ebs/[EBS-based storage] may be acceptable +for smaller clusters (1-2 nodes). Be sure to use provisioned IOPS to ensure +your cluster has satisfactory performance. -* https://aws.amazon.com/efs/[EFS] - Avoid as the sacrifices made to offer durability, shared storage, and grow/shrink come at performance cost, such file systems have been known to cause corruption of indices, and due to Elasticsearch being distributed and having built-in replication, the benefits that EFS offers are not needed. -* https://aws.amazon.com/ebs/[EBS] - Works well if running a small cluster (1-2 nodes) and cannot tolerate the loss all storage backing a node easily or if running indices with no replicas. If EBS is used, then leverage provisioned IOPS to ensure performance. -* http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html[Instance Store] - When running clusters of larger size and with replicas the ephemeral nature of Instance Store is ideal since Elasticsearch can tolerate the loss of shards. With Instance Store one gets the performance benefit of having disk physically attached to the host running the instance and also the cost benefit of avoiding paying extra for EBS. +* https://aws.amazon.com/efs/[EFS-based storage] is not +recommended or supported as it does not offer satisfactory performance. +Historically, shared network filesystems such as EFS have not always offered +precisely the behaviour that {es} requires of its filesystem, and this has been +known to lead to index corruption. Although EFS offers durability, shared +storage, and the ability to grow and shrink filesystems dynamically, you can +achieve the same benefits using {es} directly. +===== Choice of AMI -Prefer https://aws.amazon.com/amazon-linux-ami/[Amazon Linux AMIs] as since Elasticsearch runs on the JVM, OS dependencies are very minimal and one can benefit from the lightweight nature, support, and performance tweaks specific to EC2 that the Amazon Linux AMIs offer. +Prefer the https://aws.amazon.com/amazon-linux-ami/[Amazon Linux AMIs] as these +allow you to benefit from the lightweight nature, support, and EC2-specific +performance enhancements that these images offer. ===== Networking -* Networking throttling takes place on smaller instance types in both the form of https://lab.getbase.com/how-we-discovered-limitations-on-the-aws-tcp-stack/[bandwidth and number of connections]. Therefore if large number of connections are needed and networking is becoming a bottleneck, avoid https://aws.amazon.com/ec2/instance-types/[instance types] with networking labeled as `Moderate` or `Low`. -* When running in multiple http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html[availability zones] be sure to leverage {ref}/allocation-awareness.html[shard allocation awareness] so that not all copies of shard data reside in the same availability zone. -* Do not span a cluster across regions. If necessary, use a cross cluster search. -===== Misc -* If you have split your nodes into roles, consider https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html[tagging the EC2 instances] by role to make it easier to filter and view your EC2 instances in the AWS console. -* Consider https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#Using_ChangingDisableAPITermination[enabling termination protection] for all of your instances to avoid accidentally terminating a node in the cluster and causing a potentially disruptive reallocation. +* Smaller instance types have limited network performance, in terms of both +https://lab.getbase.com/how-we-discovered-limitations-on-the-aws-tcp-stack/[bandwidth +and number of connections]. If networking is a bottleneck, avoid +https://aws.amazon.com/ec2/instance-types/[instance types] with networking +labelled as `Moderate` or `Low`. + +* It is a good idea to distribute your nodes across multiple +http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html[availability +zones] and use {ref}/allocation-awareness.html[shard allocation awareness] to +ensure that each shard has copies in more than one availability zone. + +* Do not span a cluster across regions. {es} expects that node-to-node +connections within a cluster are reasonably reliable and offer high bandwidth +and low latency, and these properties do not hold for connections between +regions. Although an {es} cluster will behave correctly when node-to-node +connections are unreliable or slow, it is not optimised for this case and its +performance may suffer. If you wish to geographically distribute your data, you +should provision multiple clusters and use features such as +{ref}/modules-cross-cluster-search.html[cross-cluster search] and +{stack-ov}/xpack-ccr.html[cross-cluster replication]. + +===== Other recommendations + +* If you have split your nodes into roles, consider +https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html[tagging the +EC2 instances] by role to make it easier to filter and view your EC2 instances +in the AWS console. + +* Consider +https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#Using_ChangingDisableAPITermination[enabling +termination protection] for all of your data and master-eligible nodes. This +will help to prevent accidental termination of these nodes which could +temporarily reduce the resilience of the cluster and which could cause a +potentially disruptive reallocation of shards. + +* If running your cluster using one or more +https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroup.html[auto-scaling +groups], consider protecting your data and master-eligible nodes +https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#instance-protection-instance[against +termination during scale-in]. This will help to prevent automatic termination +of these nodes which could temporarily reduce the resilience of the cluster and +which could cause a potentially disruptive reallocation of shards. If these +instances are protected against termination during scale-in then you can use +{ref}/shard-allocation-filtering.html[shard allocation filtering] to gracefully +migrate any data off these nodes before terminating them manually. From 23e4d4606b06a0dd082fbf1c6bfe12282466f187 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Wed, 22 May 2019 12:00:21 +0300 Subject: [PATCH 028/224] Merge claims from userinfo and ID Token correctly (#42277) Enhance the handling of merging the claims sets of the ID Token and the UserInfo response. JsonObject#merge would throw a runtime exception when attempting to merge two objects with the same key and different values. This could happen for an OP that returns different vales for the same claim in the ID Token and the UserInfo response ( Google does that for profile claim ). If a claim is contained in both sets, we attempt to merge the values if they are objects or arrays, otherwise the ID Token claim value takes presedence and overwrites the userinfo response. --- .../oidc/OpenIdConnectAuthenticator.java | 88 ++++++++++- .../oidc/OpenIdConnectAuthenticatorTests.java | 141 +++++++++++++++++- 2 files changed, 226 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java index 32cffc80071c3..c652a39b90912 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java @@ -37,6 +37,7 @@ import com.nimbusds.openid.connect.sdk.token.OIDCTokens; import com.nimbusds.openid.connect.sdk.validators.AccessTokenValidator; import com.nimbusds.openid.connect.sdk.validators.IDTokenValidator; +import net.minidev.json.JSONArray; import net.minidev.json.JSONObject; import org.apache.commons.codec.Charsets; import org.apache.http.Header; @@ -401,15 +402,16 @@ private void handleUserinfoResponse(HttpResponse httpResponse, JWTClaimsSet veri if (httpResponse.getStatusLine().getStatusCode() == 200) { if (ContentType.parse(contentHeader.getValue()).getMimeType().equals("application/json")) { final JWTClaimsSet userInfoClaims = JWTClaimsSet.parse(contentAsString); + validateUserInfoResponse(userInfoClaims, verifiedIdTokenClaims.getSubject(), claimsListener); if (LOGGER.isTraceEnabled()) { LOGGER.trace("Successfully retrieved user information: [{}]", userInfoClaims.toJSONObject().toJSONString()); } final JSONObject combinedClaims = verifiedIdTokenClaims.toJSONObject(); - combinedClaims.merge(userInfoClaims.toJSONObject()); + mergeObjects(combinedClaims, userInfoClaims.toJSONObject()); claimsListener.onResponse(JWTClaimsSet.parse(combinedClaims)); } else if (ContentType.parse(contentHeader.getValue()).getMimeType().equals("application/jwt")) { //TODO Handle validating possibly signed responses - claimsListener.onFailure(new IllegalStateException("Unable to parse Userinfo Response. Signed/encryopted JWTs are" + + claimsListener.onFailure(new IllegalStateException("Unable to parse Userinfo Response. Signed/encrypted JWTs are" + "not currently supported")); } else { claimsListener.onFailure(new IllegalStateException("Unable to parse Userinfo Response. Content type was expected to " + @@ -435,6 +437,19 @@ private void handleUserinfoResponse(HttpResponse httpResponse, JWTClaimsSet veri } } + /** + * Validates that the userinfo response contains a sub Claim and that this claim value is the same as the one returned in the ID Token + */ + private void validateUserInfoResponse(JWTClaimsSet userInfoClaims, String expectedSub, ActionListener claimsListener) { + if (userInfoClaims.getSubject().isEmpty()) { + claimsListener.onFailure(new ElasticsearchSecurityException("Userinfo Response did not contain a sub Claim")); + } else if (userInfoClaims.getSubject().equals(expectedSub) == false) { + claimsListener.onFailure(new ElasticsearchSecurityException("Userinfo Response is not valid as it is for " + + "subject [{}] while the ID Token was for subject [{}]", userInfoClaims.getSubject(), + expectedSub)); + } + } + /** * Attempts to make a request to the Token Endpoint of the OpenID Connect provider in order to exchange an * authorization code for an Id Token (and potentially an Access Token) @@ -606,6 +621,75 @@ private void setMetadataFileWatcher(String jwkSetPath) throws IOException { watcherService.add(watcher, ResourceWatcherService.Frequency.MEDIUM); } + /** + * Merges the JsonObject with the claims of the ID Token with the JsonObject with the claims of the UserInfo response. This is + * necessary as some OPs return slightly different values for some claims (i.e. Google for the profile picture) and + * {@link JSONObject#merge(Object)} would throw a runtime exception. The merging is performed based on the following rules: + *
    + *
  • If the values for a given claim are primitives (of the the same type), the value from the ID Token is retained
  • + *
  • If the values for a given claim are Objects, the values are merged
  • + *
  • If the values for a given claim are Arrays, the values are merged without removing duplicates
  • + *
  • If the values for a given claim are of different types, an exception is thrown
  • + *
+ * + * @param userInfo The JsonObject with the ID Token claims + * @param idToken The JsonObject with the UserInfo Response claims + * @return the merged JsonObject + */ + // pkg protected for testing + static JSONObject mergeObjects(JSONObject idToken, JSONObject userInfo) { + for (Map.Entry entry : idToken.entrySet()) { + Object value1 = entry.getValue(); + Object value2 = userInfo.get(entry.getKey()); + if (value2 == null) { + continue; + } + if (value1 instanceof JSONArray) { + idToken.put(entry.getKey(), mergeArrays((JSONArray) value1, value2)); + } else if (value1 instanceof JSONObject) { + idToken.put(entry.getKey(), mergeObjects((JSONObject) value1, value2)); + } else if (value1.getClass().equals(value2.getClass()) == false) { + throw new IllegalStateException("Error merging ID token and userinfo claim value for claim [" + entry.getKey() + "]. " + + "Cannot merge [" + value1.getClass().getName() + "] with [" + value2.getClass().getName() + "]"); + } + } + for (Map.Entry entry : userInfo.entrySet()) { + if (idToken.containsKey(entry.getKey()) == false) { + idToken.put(entry.getKey(), entry.getValue()); + } + } + return idToken; + } + + private static JSONObject mergeObjects(JSONObject jsonObject1, Object jsonObject2) { + if (jsonObject2 == null) { + return jsonObject1; + } + if (jsonObject2 instanceof JSONObject) { + return mergeObjects(jsonObject1, (JSONObject) jsonObject2); + } + throw new IllegalStateException("Error while merging ID token and userinfo claims. " + + "Cannot merge JSONObject with [" + jsonObject2.getClass().getName() + "]"); + } + + private static JSONArray mergeArrays(JSONArray jsonArray1, Object jsonArray2) { + if (jsonArray2 == null) { + return jsonArray1; + } + if (jsonArray2 instanceof JSONArray) { + return mergeArrays(jsonArray1, (JSONArray) jsonArray2); + } + if (jsonArray2 instanceof String) { + jsonArray1.add(jsonArray2); + } + return jsonArray1; + } + + private static JSONArray mergeArrays(JSONArray jsonArray1, JSONArray jsonArray2) { + jsonArray1.addAll(jsonArray2); + return jsonArray1; + } + protected void close() { try { this.httpClient.close(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java index 64e976d90d1e3..43b58b8d4b521 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java @@ -37,6 +37,8 @@ import com.nimbusds.openid.connect.sdk.claims.AccessTokenHash; import com.nimbusds.openid.connect.sdk.validators.IDTokenValidator; import com.nimbusds.openid.connect.sdk.validators.InvalidHashException; +import net.minidev.json.JSONArray; +import net.minidev.json.JSONObject; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; @@ -72,6 +74,7 @@ import java.util.UUID; import static java.time.Instant.now; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -96,7 +99,9 @@ public void setup() { @After public void cleanup() { - authenticator.close(); + if (authenticator != null) { + authenticator.close(); + } } private OpenIdConnectAuthenticator buildAuthenticator() throws URISyntaxException { @@ -632,6 +637,140 @@ public void testImplicitFlowFailsWithUnsignedJwt() throws Exception { assertThat(e.getCause().getMessage(), containsString("Signed ID token expected")); } + public void testJsonObjectMerging() throws Exception { + final Nonce nonce = new Nonce(); + final String subject = "janedoe"; + final Tuple keyMaterial = getRandomJwkForType(randomFrom("ES", "RS")); + final JWK jwk = keyMaterial.v2().getKeys().get(0); + RelyingPartyConfiguration rpConfig = getRpConfig(jwk.getAlgorithm().getName()); + OpenIdConnectProviderConfiguration opConfig = getOpConfig(); + JSONObject address = new JWTClaimsSet.Builder() + .claim("street_name", "12, Test St.") + .claim("locality", "New York") + .claim("region", "NY") + .claim("country", "USA") + .build() + .toJSONObject(); + JSONObject idTokenObject = new JWTClaimsSet.Builder() + .jwtID(randomAlphaOfLength(8)) + .audience(rpConfig.getClientId().getValue()) + .expirationTime(Date.from(now().plusSeconds(3600))) + .issuer(opConfig.getIssuer().getValue()) + .issueTime(Date.from(now().minusSeconds(200))) + .notBeforeTime(Date.from(now().minusSeconds(200))) + .claim("nonce", nonce) + .claim("given_name", "Jane Doe") + .claim("family_name", "Doe") + .claim("profile", "https://test-profiles.com/jane.doe") + .claim("name", "Jane") + .claim("email", "jane.doe@example.com") + .claim("roles", new JSONArray().appendElement("role1").appendElement("role2").appendElement("role3")) + .claim("address", address) + .subject(subject) + .build() + .toJSONObject(); + + JSONObject userinfoObject = new JWTClaimsSet.Builder() + .claim("given_name", "Jane Doe") + .claim("family_name", "Doe") + .claim("profile", "https://test-profiles.com/jane.doe") + .claim("name", "Jane") + .claim("email", "jane.doe@example.com") + .subject(subject) + .build() + .toJSONObject(); + + OpenIdConnectAuthenticator.mergeObjects(idTokenObject, userinfoObject); + assertTrue(idTokenObject.containsKey("given_name")); + assertTrue(idTokenObject.containsKey("family_name")); + assertTrue(idTokenObject.containsKey("profile")); + assertTrue(idTokenObject.containsKey("name")); + assertTrue(idTokenObject.containsKey("email")); + assertTrue(idTokenObject.containsKey("address")); + assertTrue(idTokenObject.containsKey("roles")); + assertTrue(idTokenObject.containsKey("nonce")); + assertTrue(idTokenObject.containsKey("sub")); + assertTrue(idTokenObject.containsKey("jti")); + assertTrue(idTokenObject.containsKey("aud")); + assertTrue(idTokenObject.containsKey("exp")); + assertTrue(idTokenObject.containsKey("iss")); + assertTrue(idTokenObject.containsKey("iat")); + assertTrue(idTokenObject.containsKey("email")); + + // Claims with different types throw an error + JSONObject wrongTypeInfo = new JWTClaimsSet.Builder() + .claim("given_name", "Jane Doe") + .claim("family_name", 123334434) + .claim("profile", "https://test-profiles.com/jane.doe") + .claim("name", "Jane") + .claim("email", "jane.doe@example.com") + .subject(subject) + .build() + .toJSONObject(); + + final IllegalStateException e = expectThrows(IllegalStateException.class, () -> { + OpenIdConnectAuthenticator.mergeObjects(idTokenObject, wrongTypeInfo); + }); + + // Userinfo Claims overwrite ID Token claims + JSONObject overwriteUserInfo = new JWTClaimsSet.Builder() + .claim("given_name", "Jane Doe") + .claim("family_name", "Doe") + .claim("profile", "https://test-profiles.com/jane.doe2") + .claim("name", "Jane") + .claim("email", "jane.doe@mail.com") + .subject(subject) + .build() + .toJSONObject(); + + OpenIdConnectAuthenticator.mergeObjects(idTokenObject, overwriteUserInfo); + assertThat(idTokenObject.getAsString("email"), equalTo("jane.doe@example.com")); + assertThat(idTokenObject.getAsString("profile"), equalTo("https://test-profiles.com/jane.doe")); + + // Merging Arrays + JSONObject userInfoWithRoles = new JWTClaimsSet.Builder() + .claim("given_name", "Jane Doe") + .claim("family_name", "Doe") + .claim("profile", "https://test-profiles.com/jane.doe") + .claim("name", "Jane") + .claim("email", "jane.doe@example.com") + .claim("roles", new JSONArray().appendElement("role4").appendElement("role5")) + .subject(subject) + .build() + .toJSONObject(); + + OpenIdConnectAuthenticator.mergeObjects(idTokenObject, userInfoWithRoles); + assertThat((JSONArray) idTokenObject.get("roles"), containsInAnyOrder("role1", "role2", "role3", "role4", "role5")); + + // Merging nested objects + JSONObject addressUserInfo = new JWTClaimsSet.Builder() + .claim("street_name", "12, Test St.") + .claim("locality", "New York") + .claim("postal_code", "10024") + .build() + .toJSONObject(); + JSONObject userInfoWithAddress = new JWTClaimsSet.Builder() + .claim("given_name", "Jane Doe") + .claim("family_name", "Doe") + .claim("profile", "https://test-profiles.com/jane.doe") + .claim("name", "Jane") + .claim("email", "jane.doe@example.com") + .claim("roles", new JSONArray().appendElement("role4").appendElement("role5")) + .claim("address", addressUserInfo) + .subject(subject) + .build() + .toJSONObject(); + OpenIdConnectAuthenticator.mergeObjects(idTokenObject, userInfoWithAddress); + assertTrue(idTokenObject.containsKey("address")); + JSONObject combinedAddress = (JSONObject) idTokenObject.get("address"); + assertTrue(combinedAddress.containsKey("street_name")); + assertTrue(combinedAddress.containsKey("locality")); + assertTrue(combinedAddress.containsKey("street_name")); + assertTrue(combinedAddress.containsKey("postal_code")); + assertTrue(combinedAddress.containsKey("region")); + assertTrue(combinedAddress.containsKey("country")); + } + private OpenIdConnectProviderConfiguration getOpConfig() throws URISyntaxException { return new OpenIdConnectProviderConfiguration( new Issuer("https://op.example.com"), From dcf2929e46dfd4d593f7473f3f764554ed63dca0 Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Wed, 22 May 2019 19:10:40 +1000 Subject: [PATCH 029/224] Fix settings prefix for realm truststore password (#42336) As part of #30241 realm settings were changed to be true affix settings. In the process of this change, the "ssl." prefix was lost from the realm truststore password. It should be: xpack.security.authc.realms...ssl.truststore.password Due to a mismatch between the way we define SSL settings and load SSL contexts, there was no way to define this legacy password setting in a realm config. The settings validation would reject "ssl.truststore.password" but the SSL service would ignore "truststore.password" Resolves: #41663 --- .../core/ssl/SSLConfigurationSettings.java | 2 +- .../ssl/SSLConfigurationSettingsTests.java | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java index c16035f1cabe3..ae31966a34712 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java @@ -117,7 +117,7 @@ public class SSLConfigurationSettings { public static final Setting LEGACY_TRUSTSTORE_PASSWORD_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.truststore.password", LEGACY_TRUSTSTORE_PASSWORD_TEMPLATE); public static final Function> LEGACY_TRUST_STORE_PASSWORD_REALM = realmType -> - Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "truststore.password", + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.truststore.password", LEGACY_TRUSTSTORE_PASSWORD_TEMPLATE); public static final Function> TRUSTSTORE_PASSWORD_TEMPLATE = key -> diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettingsTests.java index 072f7d0d57da7..2d98dbb6aadee 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettingsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettingsTests.java @@ -5,15 +5,17 @@ */ package org.elasticsearch.xpack.core.ssl; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManagerFactory; - import java.util.Arrays; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.startsWith; public class SSLConfigurationSettingsTests extends ESTestCase { @@ -91,4 +93,19 @@ public void testEmptySettingsParsesToDefaults() { assertThat(SSLConfigurationSettings.getKeyStoreType(ssl.truststoreType, settings, null), is("jks")); } + public void testRealmSettingPrefixes() { + SSLConfigurationSettings.getRealmSettings("_type").forEach(affix -> { + final String key = affix.getConcreteSettingForNamespace("_name").getKey(); + assertThat(key, startsWith("xpack.security.authc.realms._type._name.ssl.")); + }); + } + + public void testProfileSettingPrefixes() { + SSLConfigurationSettings.getProfileSettings().forEach(affix -> { + assertThat(affix, instanceOf(Setting.AffixSetting.class)); + final String key = ((Setting.AffixSetting) affix).getConcreteSettingForNamespace("_name").getKey(); + assertThat(key, startsWith("transport.profiles._name.xpack.security.ssl.")); + }); + } + } From 5fb55f62be68add6530cc883e317cb764bf4ad1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 22 May 2019 05:15:54 -0400 Subject: [PATCH 030/224] Remove type-related methods from QueryBuilders (#42284) Removes all deprecated type-related methods from the QueryBuilders helper class and from tests using them. Also removing related docs tests and doc pages refering to the `type` query. All removed methods have been deprecated since version 7.0. --- .../QueryDSLDocumentationTests.java | 7 --- .../query-dsl/term-level-queries.asciidoc | 6 -- docs/java-api/query-dsl/type-query.asciidoc | 15 ----- .../high-level/query-builders.asciidoc | 1 - .../query-dsl/term-level-queries.asciidoc | 8 +-- docs/reference/query-dsl/type-query.asciidoc | 19 ------ docs/reference/redirects.asciidoc | 9 +-- .../join/query/ChildQuerySearchIT.java | 2 +- .../http/ContextAndHeaderTransportIT.java | 2 +- .../index/query/QueryBuilders.java | 63 +------------------ .../highlight/HighlighterSearchIT.java | 7 ++- .../search/geo/GeoShapeIntegrationIT.java | 2 +- .../search/geo/GeoShapeQueryTests.java | 5 +- .../geo/LegacyGeoShapeIntegrationIT.java | 2 +- .../search/query/SearchQueryIT.java | 17 ++--- 15 files changed, 21 insertions(+), 144 deletions(-) delete mode 100644 docs/java-api/query-dsl/type-query.asciidoc delete mode 100644 docs/reference/query-dsl/type-query.asciidoc diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/QueryDSLDocumentationTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/QueryDSLDocumentationTests.java index cfe9e98f643e6..51670b29de1b6 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/QueryDSLDocumentationTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/QueryDSLDocumentationTests.java @@ -74,7 +74,6 @@ import static org.elasticsearch.index.query.QueryBuilders.spanWithinQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.index.query.QueryBuilders.termsQuery; -import static org.elasticsearch.index.query.QueryBuilders.typeQuery; import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery; import static org.elasticsearch.index.query.QueryBuilders.wrapperQuery; import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.exponentialDecayFunction; @@ -433,12 +432,6 @@ public void testTerms() { // end::terms } - public void testType() { - // tag::type - typeQuery("my_type"); // <1> - // end::type - } - public void testWildcard() { // tag::wildcard wildcardQuery( diff --git a/docs/java-api/query-dsl/term-level-queries.asciidoc b/docs/java-api/query-dsl/term-level-queries.asciidoc index e7d5ad4e52b74..7d3649e372bbd 100644 --- a/docs/java-api/query-dsl/term-level-queries.asciidoc +++ b/docs/java-api/query-dsl/term-level-queries.asciidoc @@ -53,10 +53,6 @@ The queries in this group are: http://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance[Levenshtein edit distance] of 1 or 2. -<>:: - - Find documents of the specified type. - <>:: Find documents with the specified type and IDs. @@ -78,6 +74,4 @@ include::regexp-query.asciidoc[] include::fuzzy-query.asciidoc[] -include::type-query.asciidoc[] - include::ids-query.asciidoc[] diff --git a/docs/java-api/query-dsl/type-query.asciidoc b/docs/java-api/query-dsl/type-query.asciidoc deleted file mode 100644 index 160deedb9eaca..0000000000000 --- a/docs/java-api/query-dsl/type-query.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -[[java-query-dsl-type-query]] -==== Type Query - -deprecated[7.0.0] - -Types are being removed, prefer filtering on a field instead. For -more information, see {ref}/removal-of-types.html[Removal of mapping types]. - -See {ref}/query-dsl-type-query.html[Type Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[type] --------------------------------------------------- -<1> type diff --git a/docs/java-rest/high-level/query-builders.asciidoc b/docs/java-rest/high-level/query-builders.asciidoc index 32a3b06505b1d..53d9b9af97d12 100644 --- a/docs/java-rest/high-level/query-builders.asciidoc +++ b/docs/java-rest/high-level/query-builders.asciidoc @@ -40,7 +40,6 @@ This page lists all the available search queries with their corresponding `Query | {ref}/query-dsl-wildcard-query.html[Wildcard] | {query-ref}/WildcardQueryBuilder.html[WildcardQueryBuilder] | {query-ref}/QueryBuilders.html#wildcardQuery-java.lang.String-java.lang.String-[QueryBuilders.wildcardQuery()] | {ref}/query-dsl-regexp-query.html[Regexp] | {query-ref}/RegexpQueryBuilder.html[RegexpQueryBuilder] | {query-ref}/QueryBuilders.html#regexpQuery-java.lang.String-java.lang.String-[QueryBuilders.regexpQuery()] | {ref}/query-dsl-fuzzy-query.html[Fuzzy] | {query-ref}/FuzzyQueryBuilder.html[FuzzyQueryBuilder] | {query-ref}/QueryBuilders.html#fuzzyQuery-java.lang.String-java.lang.String-[QueryBuilders.fuzzyQuery()] -| {ref}/query-dsl-type-query.html[Type] | {query-ref}/TypeQueryBuilder.html[TypeQueryBuilder] | {query-ref}/QueryBuilders.html#typeQuery-java.lang.String-[QueryBuilders.typeQuery()] | {ref}/query-dsl-ids-query.html[Ids] | {query-ref}/IdsQueryBuilder.html[IdsQueryBuilder] | {query-ref}/QueryBuilders.html#idsQuery--[QueryBuilders.idsQuery()] |====== diff --git a/docs/reference/query-dsl/term-level-queries.asciidoc b/docs/reference/query-dsl/term-level-queries.asciidoc index f4e185ba9597a..dd7ea38819f01 100644 --- a/docs/reference/query-dsl/term-level-queries.asciidoc +++ b/docs/reference/query-dsl/term-level-queries.asciidoc @@ -60,13 +60,9 @@ The queries in this group are: http://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance[Levenshtein edit distance] of 1 or 2. -<>:: - - Find documents of the specified type. - <>:: - Find documents with the specified type and IDs. + Find documents with the specified IDs. include::term-query.asciidoc[] @@ -87,6 +83,4 @@ include::regexp-query.asciidoc[] include::fuzzy-query.asciidoc[] -include::type-query.asciidoc[] - include::ids-query.asciidoc[] diff --git a/docs/reference/query-dsl/type-query.asciidoc b/docs/reference/query-dsl/type-query.asciidoc deleted file mode 100644 index 4364d1e14e90d..0000000000000 --- a/docs/reference/query-dsl/type-query.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -[[query-dsl-type-query]] -=== Type Query - -deprecated[7.0.0,Types and the `type` query are deprecated and in the process of being removed. See <>.] - -Filters documents matching the provided document / mapping type. - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "type" : { - "value" : "_doc" - } - } -} --------------------------------------------------- -// CONSOLE diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 1c6850542a971..b5f0e08a45232 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -248,13 +248,6 @@ The `terms` filter has been replaced by the <>. It behave as a query in ``query context'' and as a filter in ``filter context'' (see <>). -[role="exclude",id="query-dsl-type-filter"] -=== Type Filter - -The `type` filter has been replaced by the <>. It behaves -as a query in ``query context'' and as a filter in ``filter context'' (see -<>). - [role="exclude",id="query-dsl-flt-query"] === Fuzzy Like This Query @@ -601,4 +594,4 @@ See <>. [role="exclude",id="_faster_prefix_queries_with_literal_index_prefixes_literal.html"] -See <>. \ No newline at end of file +See <>. diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ChildQuerySearchIT.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ChildQuerySearchIT.java index bc825cfb381ba..f3ef60ea215fe 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ChildQuerySearchIT.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ChildQuerySearchIT.java @@ -179,7 +179,7 @@ public void testSimpleChildQuery() throws Exception { // TEST FETCHING _parent from child SearchResponse searchResponse; searchResponse = client().prepareSearch("test") - .setQuery(idsQuery("doc").addIds("c1")).get(); + .setQuery(idsQuery().addIds("c1")).get(); assertNoFailures(searchResponse); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("c1")); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java index 04f01cf0f0e4c..47cce87c4b959 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java @@ -177,7 +177,7 @@ public void testThatGeoShapeQueryGetRequestContainsContextAndHeaders() throws Ex .get(); transportClient().admin().indices().prepareRefresh(lookupIndex, queryIndex).get(); - GeoShapeQueryBuilder queryBuilder = QueryBuilders.geoShapeQuery("location", "1", "type") + GeoShapeQueryBuilder queryBuilder = QueryBuilders.geoShapeQuery("location", "1") .indexedShapeIndex(lookupIndex) .indexedShapePath("location"); diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java b/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java index 5ac70781286a4..30284703e8d6b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java @@ -117,18 +117,6 @@ public static IdsQueryBuilder idsQuery() { return new IdsQueryBuilder(); } - /** - * Constructs a query that will match only specific ids within types. - * - * @param types The mapping/doc type - * - * @deprecated Types are in the process of being removed, use {@link #idsQuery()} instead. - */ - @Deprecated - public static IdsQueryBuilder idsQuery(String... types) { - return new IdsQueryBuilder().types(types); - } - /** * A Query that matches documents containing a term. * @@ -426,7 +414,7 @@ public static FunctionScoreQueryBuilder functionScoreQuery(FunctionScoreQueryBui * * @param function The function builder used to custom score */ - public static FunctionScoreQueryBuilder functionScoreQuery(ScoreFunctionBuilder function) { + public static FunctionScoreQueryBuilder functionScoreQuery(ScoreFunctionBuilder function) { return new FunctionScoreQueryBuilder(function); } @@ -436,7 +424,7 @@ public static FunctionScoreQueryBuilder functionScoreQuery(ScoreFunctionBuilder * @param queryBuilder The query to custom score * @param function The function builder used to custom score */ - public static FunctionScoreQueryBuilder functionScoreQuery(QueryBuilder queryBuilder, ScoreFunctionBuilder function) { + public static FunctionScoreQueryBuilder functionScoreQuery(QueryBuilder queryBuilder, ScoreFunctionBuilder function) { return (new FunctionScoreQueryBuilder(queryBuilder, function)); } @@ -586,15 +574,6 @@ public static WrapperQueryBuilder wrapperQuery(byte[] source) { return new WrapperQueryBuilder(source); } - /** - * A filter based on doc/mapping type. - * @deprecated Types are going away, prefer filtering on a field. - */ - @Deprecated - public static TypeQueryBuilder typeQuery(String type) { - return new TypeQueryBuilder(type); - } - /** * A terms query that can extract the terms from another doc in an index. */ @@ -653,14 +632,6 @@ public static GeoShapeQueryBuilder geoShapeQuery(String name, String indexedShap return new GeoShapeQueryBuilder(name, indexedShapeId); } - /** - * @deprecated Types are in the process of being removed, use {@link #geoShapeQuery(String, String)} instead. - */ - @Deprecated - public static GeoShapeQueryBuilder geoShapeQuery(String name, String indexedShapeId, String indexedShapeType) { - return new GeoShapeQueryBuilder(name, indexedShapeId, indexedShapeType); - } - /** * A filter to filter indexed shapes intersecting with shapes * @@ -679,16 +650,6 @@ public static GeoShapeQueryBuilder geoIntersectionQuery(String name, String inde return builder; } - /** - * @deprecated Types are in the process of being removed, use {@link #geoIntersectionQuery(String, String)} instead. - */ - @Deprecated - public static GeoShapeQueryBuilder geoIntersectionQuery(String name, String indexedShapeId, String indexedShapeType) { - GeoShapeQueryBuilder builder = geoShapeQuery(name, indexedShapeId, indexedShapeType); - builder.relation(ShapeRelation.INTERSECTS); - return builder; - } - /** * A filter to filter indexed shapes that are contained by a shape * @@ -707,16 +668,6 @@ public static GeoShapeQueryBuilder geoWithinQuery(String name, String indexedSha return builder; } - /** - * @deprecated Types are in the process of being removed, use {@link #geoWithinQuery(String, String)} instead. - */ - @Deprecated - public static GeoShapeQueryBuilder geoWithinQuery(String name, String indexedShapeId, String indexedShapeType) { - GeoShapeQueryBuilder builder = geoShapeQuery(name, indexedShapeId, indexedShapeType); - builder.relation(ShapeRelation.WITHIN); - return builder; - } - /** * A filter to filter indexed shapes that are not intersection with the query shape * @@ -735,16 +686,6 @@ public static GeoShapeQueryBuilder geoDisjointQuery(String name, String indexedS return builder; } - /** - * @deprecated Types are in the process of being removed, use {@link #geoDisjointQuery(String, String)} instead. - */ - @Deprecated - public static GeoShapeQueryBuilder geoDisjointQuery(String name, String indexedShapeId, String indexedShapeType) { - GeoShapeQueryBuilder builder = geoShapeQuery(name, indexedShapeId, indexedShapeType); - builder.relation(ShapeRelation.DISJOINT); - return builder; - } - /** * A filter to filter only documents where a field exists in them. * diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 1467fd1f0971e..3c21085fc905d 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.fetch.subphase.highlight; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; @@ -1801,7 +1802,7 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { index("test", "type1", "2", "text", new String[] {"", text2}); refresh(); - IdsQueryBuilder idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("2"); + IdsQueryBuilder idsQueryBuilder = QueryBuilders.idsQuery().addIds("2"); field.highlighterType("plain"); response = client().prepareSearch("test") .setQuery(idsQueryBuilder) @@ -1824,7 +1825,7 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { // But if the field was actually empty then you should get no highlighting field index("test", "type1", "3", "text", new String[] {}); refresh(); - idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("3"); + idsQueryBuilder = QueryBuilders.idsQuery().addIds("3"); field.highlighterType("plain"); response = client().prepareSearch("test") .setQuery(idsQueryBuilder) @@ -1847,7 +1848,7 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { index("test", "type1", "4"); refresh(); - idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("4"); + idsQueryBuilder = QueryBuilders.idsQuery().addIds("4"); field.highlighterType("plain"); response = client().prepareSearch("test") .setQuery(idsQueryBuilder) diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationIT.java b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationIT.java index a7faa04017258..e3054cb1f6b0c 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationIT.java @@ -153,7 +153,7 @@ public void testIndexShapeRouting() throws Exception { indexRandom(true, client().prepareIndex("test", "doc", "0").setSource(source, XContentType.JSON).setRouting("ABC")); SearchResponse searchResponse = client().prepareSearch("test").setQuery( - geoShapeQuery("shape", "0", "doc").indexedShapeIndex("test").indexedShapeRouting("ABC") + geoShapeQuery("shape", "0").indexedShapeIndex("test").indexedShapeRouting("ABC") ).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java index 3d1d5b6876a65..ef6bea10d749d 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.geo; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; + import org.apache.lucene.geo.GeoTestUtil; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; @@ -233,7 +234,7 @@ public void testIndexedShapeReferenceWithTypes() throws Exception { .endObject()).setRefreshPolicy(IMMEDIATE).get(); SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(geoIntersectionQuery("location", "Big_Rectangle", "shape_type")) + .setQuery(geoIntersectionQuery("location", "Big_Rectangle")) .get(); assertSearchResponse(searchResponse); @@ -242,7 +243,7 @@ public void testIndexedShapeReferenceWithTypes() throws Exception { assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); searchResponse = client().prepareSearch("test") - .setQuery(geoShapeQuery("location", "Big_Rectangle", "shape_type")) + .setQuery(geoShapeQuery("location", "Big_Rectangle")) .get(); assertSearchResponse(searchResponse); diff --git a/server/src/test/java/org/elasticsearch/search/geo/LegacyGeoShapeIntegrationIT.java b/server/src/test/java/org/elasticsearch/search/geo/LegacyGeoShapeIntegrationIT.java index 574bdd46bba5b..8ddfbb2793024 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/LegacyGeoShapeIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/search/geo/LegacyGeoShapeIntegrationIT.java @@ -155,7 +155,7 @@ public void testIndexShapeRouting() throws Exception { indexRandom(true, client().prepareIndex("test", "doc", "0").setSource(source, XContentType.JSON).setRouting("ABC")); SearchResponse searchResponse = client().prepareSearch("test").setQuery( - geoShapeQuery("shape", "0", "doc").indexedShapeIndex("test").indexedShapeRouting("ABC") + geoShapeQuery("shape", "0").indexedShapeIndex("test").indexedShapeRouting("ABC") ).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); diff --git a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 5dc3874bcfa6d..7e233b863076a 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -26,7 +26,6 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.bootstrap.JavaVersion; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; @@ -92,7 +91,6 @@ import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.index.query.QueryBuilders.termsLookupQuery; import static org.elasticsearch.index.query.QueryBuilders.termsQuery; -import static org.elasticsearch.index.query.QueryBuilders.typeQuery; import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery; import static org.elasticsearch.index.query.QueryBuilders.wrapperQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -486,9 +484,6 @@ public void testTypeFilter() throws Exception { indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1"), client().prepareIndex("test", "type1", "2").setSource("field1", "value1")); - assertHitCount(client().prepareSearch().setQuery(typeQuery("type1")).get(), 2L); - assertHitCount(client().prepareSearch().setQuery(typeQuery("type2")).get(), 0L); - assertHitCount(client().prepareSearch().setTypes("type1").setQuery(matchAllQuery()).get(), 2L); assertHitCount(client().prepareSearch().setTypes("type2").setQuery(matchAllQuery()).get(), 0L); @@ -502,7 +497,7 @@ public void testIdsQueryTestsIdIndexed() throws Exception { client().prepareIndex("test", "type1", "2").setSource("field1", "value2"), client().prepareIndex("test", "type1", "3").setSource("field1", "value3")); - SearchResponse searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsQuery("type1").addIds("1", "3"))).get(); + SearchResponse searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsQuery().addIds("1", "3"))).get(); assertHitCount(searchResponse, 2L); assertSearchHits(searchResponse, "1", "3"); @@ -511,7 +506,7 @@ public void testIdsQueryTestsIdIndexed() throws Exception { assertHitCount(searchResponse, 2L); assertSearchHits(searchResponse, "1", "3"); - searchResponse = client().prepareSearch().setQuery(idsQuery("type1").addIds("1", "3")).get(); + searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1", "3")).get(); assertHitCount(searchResponse, 2L); assertSearchHits(searchResponse, "1", "3"); @@ -520,7 +515,7 @@ public void testIdsQueryTestsIdIndexed() throws Exception { assertHitCount(searchResponse, 2L); assertSearchHits(searchResponse, "1", "3"); - searchResponse = client().prepareSearch().setQuery(idsQuery("type1").addIds("7", "10")).get(); + searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("7", "10")).get(); assertHitCount(searchResponse, 0L); // repeat..., with terms @@ -1156,7 +1151,7 @@ public void testBasicQueryById() throws Exception { client().prepareIndex("test", "_doc", "3").setSource("field1", "value3").get(); refresh(); - SearchResponse searchResponse = client().prepareSearch().setQuery(idsQuery("_doc").addIds("1", "2")).get(); + SearchResponse searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1", "2")).get(); assertHitCount(searchResponse, 2L); assertThat(searchResponse.getHits().getHits().length, equalTo(2)); @@ -1168,11 +1163,11 @@ public void testBasicQueryById() throws Exception { assertHitCount(searchResponse, 2L); assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - searchResponse = client().prepareSearch().setQuery(idsQuery(Strings.EMPTY_ARRAY).addIds("1")).get(); + searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1")).get(); assertHitCount(searchResponse, 1L); assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - searchResponse = client().prepareSearch().setQuery(idsQuery("type1", "type2", "_doc").addIds("1", "2", "3", "4")).get(); + searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1", "2", "3", "4")).get(); assertHitCount(searchResponse, 3L); assertThat(searchResponse.getHits().getHits().length, equalTo(3)); } From 96094974e28da1c480ca4b7fb4ad40ef8ba1654e Mon Sep 17 00:00:00 2001 From: Nikita Glashenko Date: Wed, 22 May 2019 13:49:27 +0400 Subject: [PATCH 031/224] Fix TopHitsAggregationBuilder adding duplicate _score sort clauses (#42179) When using High Level Rest Client Java API to produce search query, using AggregationBuilders.topHits("th").sort("_score", SortOrder.DESC) caused query to contain duplicate sort clauses. --- .../aggregations/metrics/TopHitsAggregationBuilder.java | 6 ++++-- .../aggregations/metrics/TopHitsAggregatorTests.java | 7 ++++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java index 43bde648657ee..019fec82d0df4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java @@ -232,8 +232,9 @@ public TopHitsAggregationBuilder sort(String name, SortOrder order) { } if (name.equals(ScoreSortBuilder.NAME)) { sort(SortBuilders.scoreSort().order(order)); + } else { + sort(SortBuilders.fieldSort(name).order(order)); } - sort(SortBuilders.fieldSort(name).order(order)); return this; } @@ -249,8 +250,9 @@ public TopHitsAggregationBuilder sort(String name) { } if (name.equals(ScoreSortBuilder.NAME)) { sort(SortBuilders.scoreSort()); + } else { + sort(SortBuilders.fieldSort(name)); } - sort(SortBuilders.fieldSort(name)); return this; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorTests.java index 585cd7f9ff434..b087909757335 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorTests.java @@ -49,7 +49,6 @@ import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.TopHits; import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.sort.SortOrder; @@ -207,4 +206,10 @@ public void testSetScorer() throws Exception { reader.close(); directory.close(); } + + public void testSortByScore() throws Exception { + // just check that it does not fail with exceptions + testCase(new MatchAllDocsQuery(), topHits("_name").sort("_score", SortOrder.DESC)); + testCase(new MatchAllDocsQuery(), topHits("_name").sort("_score")); + } } From d22844208b228f7f3240d7dfcd44d726c2366624 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 22 May 2019 11:54:28 +0200 Subject: [PATCH 032/224] Remove IndexShard dependency from Repository (#42213) * Remove IndexShard dependency from Repository In order to simplify repository testing especially for BlobStoreRepository it's important to remove the dependency on IndexShard and reduce it to Store and MapperService (in the snapshot case). This significantly reduces the dependcy footprint for Repository and allows unittesting without starting nodes or instantiate entire shard instances. This change deprecates the old method signatures and adds a unittest for FileRepository to show the advantage of this change. In addition, the unittesting surfaced a bug where the internal file names that are private to the repository were used in the recovery stats instead of the target file names which makes it impossible to relate to the actual lucene files in the recovery stats. * don't delegate deprecated methods * apply comments * test --- .../index/shard/StoreRecovery.java | 3 +- .../repositories/FilterRepository.java | 15 +- .../repositories/Repository.java | 52 ++++- .../blobstore/BlobStoreRepository.java | 25 +-- .../blobstore/FileRestoreContext.java | 23 +- .../snapshots/SnapshotShardsService.java | 3 +- .../index/shard/IndexShardTests.java | 4 +- .../RepositoriesServiceTests.java | 10 +- .../repositories/fs/FsRepositoryTests.java | 201 ++++++++++++++++++ .../index/shard/IndexShardTestCase.java | 13 +- .../index/shard/RestoreOnlyRepository.java | 5 +- .../xpack/ccr/repository/CcrRepository.java | 57 +++-- .../ShardFollowTaskReplicationTests.java | 4 +- .../engine/FollowEngineIndexShardTests.java | 4 +- .../SourceOnlySnapshotRepository.java | 24 ++- .../SourceOnlySnapshotShardTests.java | 17 +- 16 files changed, 353 insertions(+), 107 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 0e87b9e2357e5..aa49f7ecb60ce 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -468,7 +468,8 @@ private void restore(final IndexShard indexShard, final Repository repository, f snapshotShardId = new ShardId(indexName, IndexMetaData.INDEX_UUID_NA_VALUE, shardId.id()); } final IndexId indexId = repository.getRepositoryData().resolveIndexId(indexName); - repository.restoreShard(indexShard, restoreSource.snapshot().getSnapshotId(), + assert indexShard.getEngineOrNull() == null; + repository.restoreShard(indexShard, indexShard.store(), restoreSource.snapshot().getSnapshotId(), restoreSource.version(), indexId, snapshotShardId, indexShard.recoveryState()); final Store store = indexShard.store(); store.bootstrapNewHistory(); diff --git a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java index afc38bda86c5b..1fa42579617e1 100644 --- a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java @@ -27,7 +27,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleListener; -import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; @@ -119,16 +119,17 @@ public boolean isReadOnly() { return in.isReadOnly(); } + @Override - public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, - IndexShardSnapshotStatus snapshotStatus) { - in.snapshotShard(shard, store, snapshotId, indexId, snapshotIndexCommit, snapshotStatus); + public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { + in.snapshotShard(store, mapperService, snapshotId, indexId, snapshotIndexCommit, snapshotStatus); } @Override - public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, - RecoveryState recoveryState) { - in.restoreShard(shard, snapshotId, version, indexId, snapshotShardId, recoveryState); + public void restoreShard(Store store, SnapshotId snapshotId, + Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { + in.restoreShard(store, snapshotId, version, indexId, snapshotShardId, recoveryState); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index 20f7c42cb21dd..3aa19cb130cae 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.LifecycleComponent; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; @@ -49,7 +50,7 @@ *
    *
  • Master calls {@link #initializeSnapshot(SnapshotId, List, org.elasticsearch.cluster.metadata.MetaData)} * with list of indices that will be included into the snapshot
  • - *
  • Data nodes call {@link Repository#snapshotShard(IndexShard, Store, SnapshotId, IndexId, IndexCommit, IndexShardSnapshotStatus)} + *
  • Data nodes call {@link Repository#snapshotShard(Store, MapperService, SnapshotId, IndexId, IndexCommit, IndexShardSnapshotStatus)} * for each shard
  • *
  • When all shard calls return master calls {@link #finalizeSnapshot} with possible list of failures
  • *
@@ -196,30 +197,69 @@ SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long *

* As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check * {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process should be aborted. - * @param shard shard to be snapshotted + * @param indexShard the shard to be snapshotted + * @param snapshotId snapshot id + * @param indexId id for the index being snapshotted + * @param snapshotIndexCommit commit point + * @param snapshotStatus snapshot status + * @deprecated use {@link #snapshotShard(Store, MapperService, SnapshotId, IndexId, IndexCommit, IndexShardSnapshotStatus)} instead + */ + @Deprecated + default void snapshotShard(IndexShard indexShard, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, + IndexShardSnapshotStatus snapshotStatus) { + snapshotShard(indexShard.store(), indexShard.mapperService(), snapshotId, indexId, snapshotIndexCommit, snapshotStatus); + } + + /** + * Creates a snapshot of the shard based on the index commit point. + *

+ * The index commit point can be obtained by using {@link org.elasticsearch.index.engine.Engine#acquireLastIndexCommit} method. + * Repository implementations shouldn't release the snapshot index commit point. It is done by the method caller. + *

+ * As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check + * {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process should be aborted. * @param store store to be snapshotted + * @param mapperService the shards mapper service * @param snapshotId snapshot id * @param indexId id for the index being snapshotted * @param snapshotIndexCommit commit point * @param snapshotStatus snapshot status */ - void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, + void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus); /** * Restores snapshot of the shard. *

* The index can be renamed on restore, hence different {@code shardId} and {@code snapshotShardId} are supplied. - * * @param shard the shard to restore the index into + * @param store the store to restore the index into + * @param snapshotId snapshot id + * @param version version of elasticsearch that created this snapshot + * @param indexId id of the index in the repository from which the restore is occurring + * @param snapshotShardId shard id (in the snapshot) + * @param recoveryState recovery state + * @deprecated use {@link #restoreShard(Store, SnapshotId, Version, IndexId, ShardId, RecoveryState)} instead + */ + @Deprecated + default void restoreShard(IndexShard shard, Store store, SnapshotId snapshotId, Version version, IndexId indexId, + ShardId snapshotShardId, RecoveryState recoveryState) { + restoreShard(store, snapshotId, version, indexId, snapshotShardId, recoveryState); + } + + /** + * Restores snapshot of the shard. + *

+ * The index can be renamed on restore, hence different {@code shardId} and {@code snapshotShardId} are supplied. + * @param store the store to restore the index into * @param snapshotId snapshot id * @param version version of elasticsearch that created this snapshot * @param indexId id of the index in the repository from which the restore is occurring * @param snapshotShardId shard id (in the snapshot) * @param recoveryState recovery state */ - void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, - ShardId snapshotShardId, RecoveryState recoveryState); + void restoreShard(Store store, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, + RecoveryState recoveryState); /** * Retrieve shard snapshot status for the stored snapshot diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 320b7ff2d5550..86409ebac7d31 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -71,7 +71,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.internal.io.Streams; -import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException; import org.elasticsearch.index.snapshots.IndexShardSnapshotException; @@ -793,8 +793,8 @@ private void writeAtomic(final String blobName, final BytesReference bytesRef, b } @Override - public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, - IndexShardSnapshotStatus snapshotStatus) { + public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { SnapshotContext snapshotContext = new SnapshotContext(store, snapshotId, indexId, snapshotStatus, System.currentTimeMillis()); try { snapshotContext.snapshot(snapshotIndexCommit); @@ -809,18 +809,19 @@ public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, } @Override - public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, - RecoveryState recoveryState) { - final Context context = new Context(snapshotId, indexId, shard.shardId(), snapshotShardId); + public void restoreShard(Store store, SnapshotId snapshotId, + Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { + ShardId shardId = store.shardId(); + final Context context = new Context(snapshotId, indexId, shardId, snapshotShardId); BlobPath path = basePath().add("indices").add(indexId.getId()).add(Integer.toString(snapshotShardId.getId())); BlobContainer blobContainer = blobStore().blobContainer(path); - final RestoreContext snapshotContext = new RestoreContext(shard, snapshotId, recoveryState, blobContainer); + final RestoreContext snapshotContext = new RestoreContext(shardId, snapshotId, recoveryState, blobContainer); try { BlobStoreIndexShardSnapshot snapshot = context.loadSnapshot(); SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles()); - snapshotContext.restore(snapshotFiles); + snapshotContext.restore(snapshotFiles, store); } catch (Exception e) { - throw new IndexShardRestoreFailedException(shard.shardId(), "failed to restore snapshot [" + snapshotId + "]", e); + throw new IndexShardRestoreFailedException(shardId, "failed to restore snapshot [" + snapshotId + "]", e); } } @@ -1366,13 +1367,13 @@ private class RestoreContext extends FileRestoreContext { /** * Constructs new restore context - * @param indexShard shard to restore into + * @param shardId shard id to restore into * @param snapshotId snapshot id * @param recoveryState recovery state to report progress * @param blobContainer the blob container to read the files from */ - RestoreContext(IndexShard indexShard, SnapshotId snapshotId, RecoveryState recoveryState, BlobContainer blobContainer) { - super(metadata.name(), indexShard, snapshotId, recoveryState, BUFFER_SIZE); + RestoreContext(ShardId shardId, SnapshotId snapshotId, RecoveryState recoveryState, BlobContainer blobContainer) { + super(metadata.name(), shardId, snapshotId, recoveryState, BUFFER_SIZE); this.blobContainer = blobContainer; } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java index 1e0ab2dd8beee..f78ddab9ee44c 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java @@ -31,7 +31,6 @@ import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.iterable.Iterables; -import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException; import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; @@ -64,7 +63,6 @@ public abstract class FileRestoreContext { protected static final Logger logger = LogManager.getLogger(FileRestoreContext.class); protected final String repositoryName; - protected final IndexShard indexShard; protected final RecoveryState recoveryState; protected final SnapshotId snapshotId; protected final ShardId shardId; @@ -73,26 +71,24 @@ public abstract class FileRestoreContext { /** * Constructs new restore context * - * @param indexShard shard to restore into + * @param shardId shard id to restore into * @param snapshotId snapshot id * @param recoveryState recovery state to report progress * @param bufferSize buffer size for restore */ - protected FileRestoreContext(String repositoryName, IndexShard indexShard, SnapshotId snapshotId, RecoveryState recoveryState, + protected FileRestoreContext(String repositoryName, ShardId shardId, SnapshotId snapshotId, RecoveryState recoveryState, int bufferSize) { this.repositoryName = repositoryName; this.recoveryState = recoveryState; - this.indexShard = indexShard; this.snapshotId = snapshotId; - this.shardId = indexShard.shardId(); + this.shardId = shardId; this.bufferSize = bufferSize; } /** * Performs restore operation */ - public void restore(SnapshotFiles snapshotFiles) throws IOException { - final Store store = indexShard.store(); + public void restore(SnapshotFiles snapshotFiles, Store store) throws IOException { store.incRef(); try { logger.debug("[{}] [{}] restoring to [{}] ...", snapshotId, repositoryName, shardId); @@ -108,7 +104,7 @@ public void restore(SnapshotFiles snapshotFiles) throws IOException { // version number and no checksum, even though the index itself is perfectly fine to restore, this // empty shard would cause exceptions to be thrown. Since there is no data to restore from an empty // shard anyway, we just create the empty shard here and then exit. - store.createEmpty(indexShard.indexSettings().getIndexMetaData().getCreationVersion().luceneVersion); + store.createEmpty(store.indexSettings().getIndexVersionCreated().luceneVersion); return; } @@ -117,7 +113,7 @@ public void restore(SnapshotFiles snapshotFiles) throws IOException { // this will throw an IOException if the store has no segments infos file. The // store can still have existing files but they will be deleted just before being // restored. - recoveryTargetMetadata = indexShard.snapshotStoreMetadata(); + recoveryTargetMetadata = store.getMetadata(null, true); } catch (org.apache.lucene.index.IndexNotFoundException e) { // happens when restore to an empty shard, not a big deal logger.trace("[{}] [{}] restoring from to an empty shard", shardId, snapshotId); @@ -127,7 +123,6 @@ public void restore(SnapshotFiles snapshotFiles) throws IOException { shardId, snapshotId), e); recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY; } - final List filesToRecover = new ArrayList<>(); final Map snapshotMetaData = new HashMap<>(); final Map fileInfos = new HashMap<>(); @@ -157,7 +152,7 @@ public void restore(SnapshotFiles snapshotFiles) throws IOException { final Store.RecoveryDiff diff = sourceMetaData.recoveryDiff(recoveryTargetMetadata); for (StoreFileMetaData md : diff.identical) { BlobStoreIndexShardSnapshot.FileInfo fileInfo = fileInfos.get(md.name()); - recoveryState.getIndex().addFileDetail(fileInfo.name(), fileInfo.length(), true); + recoveryState.getIndex().addFileDetail(fileInfo.physicalName(), fileInfo.length(), true); if (logger.isTraceEnabled()) { logger.trace("[{}] [{}] not_recovering file [{}] from [{}], exists in local store and is same", shardId, snapshotId, fileInfo.physicalName(), fileInfo.name()); @@ -167,7 +162,7 @@ public void restore(SnapshotFiles snapshotFiles) throws IOException { for (StoreFileMetaData md : concat(diff)) { BlobStoreIndexShardSnapshot.FileInfo fileInfo = fileInfos.get(md.name()); filesToRecover.add(fileInfo); - recoveryState.getIndex().addFileDetail(fileInfo.name(), fileInfo.length(), false); + recoveryState.getIndex().addFileDetail(fileInfo.physicalName(), fileInfo.length(), false); if (logger.isTraceEnabled()) { logger.trace("[{}] [{}] recovering [{}] from [{}]", shardId, snapshotId, fileInfo.physicalName(), fileInfo.name()); @@ -260,7 +255,7 @@ private void restoreFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, fi int length; while ((length = stream.read(buffer)) > 0) { indexOutput.writeBytes(buffer, 0, length); - recoveryState.getIndex().addRecoveredBytesToFile(fileInfo.name(), length); + recoveryState.getIndex().addRecoveredBytesToFile(fileInfo.physicalName(), length); } Store.verify(indexOutput); indexOutput.close(); diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index a0c5ea9392c67..f79b6da6ef626 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -367,8 +367,7 @@ private void snapshot(final IndexShard indexShard, final Snapshot snapshot, fina try { // we flush first to make sure we get the latest writes snapshotted try (Engine.IndexCommitRef snapshotRef = indexShard.acquireLastIndexCommit(true)) { - repository.snapshotShard(indexShard, indexShard.store(), snapshot.getSnapshotId(), indexId, snapshotRef.getIndexCommit(), - snapshotStatus); + repository.snapshotShard(indexShard, snapshot.getSnapshotId(), indexId, snapshotRef.getIndexCommit(), snapshotStatus); if (logger.isDebugEnabled()) { final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); logger.debug("snapshot ({}) completed to {} with {}", snapshot, repository, lastSnapshotStatus); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 04ef68852cc3f..1710154f72f94 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -2300,8 +2300,8 @@ public void testRestoreShard() throws IOException { target.markAsRecovering("store", new RecoveryState(routing, localNode, null)); assertTrue(target.restoreFromRepository(new RestoreOnlyRepository("test") { @Override - public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, - RecoveryState recoveryState) { + public void restoreShard(Store store, SnapshotId snapshotId, + Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { try { cleanLuceneIndex(targetStore.directory()); for (String file : sourceStore.directory().listAll()) { diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java index 505c0628d6aba..ae703795ec622 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java @@ -33,7 +33,7 @@ import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; @@ -200,14 +200,14 @@ public boolean isReadOnly() { } @Override - public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, - IndexShardSnapshotStatus snapshotStatus) { + public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, IndexCommit + snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { } @Override - public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, - RecoveryState recoveryState) { + public void restoreShard(Store store, SnapshotId snapshotId, + Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { } diff --git a/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java new file mode 100644 index 0000000000000..ec8a444d84fae --- /dev/null +++ b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java @@ -0,0 +1,201 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.fs; + +import org.apache.lucene.analysis.MockAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedDocValuesField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.CodecReader; +import org.apache.lucene.index.FilterMergePolicy; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.Term; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.IOSupplier; +import org.apache.lucene.util.TestUtil; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingHelper; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.indices.recovery.RecoveryState; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.test.DummyShardLock; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.stream.Collectors; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; + +public class FsRepositoryTests extends ESTestCase { + + public void testSnapshotAndRestore() throws IOException, InterruptedException { + ThreadPool threadPool = new TestThreadPool(getClass().getSimpleName()); + try (Directory directory = newDirectory()) { + Path repo = createTempDir(); + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), repo.toAbsolutePath()) + .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()) + .put("location", repo) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES).build(); + + int numDocs = indexDocs(directory); + RepositoryMetaData metaData = new RepositoryMetaData("test", "fs", settings); + FsRepository repository = new FsRepository(metaData, new Environment(settings, null), NamedXContentRegistry.EMPTY, threadPool); + repository.start(); + final Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_INDEX_UUID, "myindexUUID").build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("myindex", indexSettings); + ShardId shardId = new ShardId(idxSettings.getIndex(), 1); + Store store = new Store(shardId, idxSettings, directory, new DummyShardLock(shardId)); + SnapshotId snapshotId = new SnapshotId("test", "test"); + IndexId indexId = new IndexId(idxSettings.getIndex().getName(), idxSettings.getUUID()); + + IndexCommit indexCommit = Lucene.getIndexCommit(Lucene.readSegmentInfos(store.directory()), store.directory()); + runGeneric(threadPool, () -> { + IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing(); + repository.snapshotShard(store, null, snapshotId, indexId, indexCommit, + snapshotStatus); + IndexShardSnapshotStatus.Copy copy = snapshotStatus.asCopy(); + assertEquals(copy.getTotalFileCount(), copy.getIncrementalFileCount()); + }); + Lucene.cleanLuceneIndex(directory); + expectThrows(org.apache.lucene.index.IndexNotFoundException.class, () -> Lucene.readSegmentInfos(directory)); + DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); + ShardRouting routing = ShardRouting.newUnassigned(shardId, true, new RecoverySource.SnapshotRecoverySource("test", + new Snapshot("foo", snapshotId), Version.CURRENT, "myindex"), + new UnassignedInfo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED, "")); + routing = ShardRoutingHelper.initialize(routing, localNode.getId(), 0); + RecoveryState state = new RecoveryState(routing, localNode, null); + runGeneric(threadPool, () -> + repository.restoreShard(store, snapshotId, Version.CURRENT, indexId, shardId, state)); + assertTrue(state.getIndex().recoveredBytes() > 0); + assertEquals(0, state.getIndex().reusedFileCount()); + assertEquals(indexCommit.getFileNames().size(), state.getIndex().recoveredFileCount()); + assertEquals(numDocs, Lucene.readSegmentInfos(directory).totalMaxDoc()); + deleteRandomDoc(store.directory()); + SnapshotId incSnapshotId = new SnapshotId("test1", "test1"); + IndexCommit incIndexCommit = Lucene.getIndexCommit(Lucene.readSegmentInfos(store.directory()), store.directory()); + Collection commitFileNames = incIndexCommit.getFileNames(); + runGeneric(threadPool, () -> { + IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing(); + repository.snapshotShard(store, null, incSnapshotId, indexId, incIndexCommit, snapshotStatus); + IndexShardSnapshotStatus.Copy copy = snapshotStatus.asCopy(); + assertEquals(2, copy.getIncrementalFileCount()); + assertEquals(commitFileNames.size(), copy.getTotalFileCount()); + }); + + // roll back to the first snap and then incrementally restore + RecoveryState firstState = new RecoveryState(routing, localNode, null); + runGeneric(threadPool, () -> + repository.restoreShard(store, snapshotId, Version.CURRENT, indexId, shardId, firstState)); + assertEquals("should reuse everything except of .liv and .si", + commitFileNames.size()-2, firstState.getIndex().reusedFileCount()); + + RecoveryState secondState = new RecoveryState(routing, localNode, null); + runGeneric(threadPool, () -> + repository.restoreShard(store, incSnapshotId, Version.CURRENT, indexId, shardId, secondState)); + assertEquals(secondState.getIndex().reusedFileCount(), commitFileNames.size()-2); + assertEquals(secondState.getIndex().recoveredFileCount(), 2); + List recoveredFiles = + secondState.getIndex().fileDetails().stream().filter(f -> f.reused() == false).collect(Collectors.toList()); + Collections.sort(recoveredFiles, Comparator.comparing(RecoveryState.File::name)); + assertTrue(recoveredFiles.get(0).name(), recoveredFiles.get(0).name().endsWith(".liv")); + assertTrue(recoveredFiles.get(1).name(), recoveredFiles.get(1).name().endsWith("segments_2")); + } finally { + terminate(threadPool); + } + } + + private void runGeneric(ThreadPool threadPool, Runnable runnable) throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + threadPool.generic().submit(() -> { + try { + runnable.run(); + } finally { + latch.countDown(); + } + }); + latch.await(); + } + + private void deleteRandomDoc(Directory directory) throws IOException { + try(IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(random(), + new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()).setMergePolicy(new FilterMergePolicy(NoMergePolicy.INSTANCE) { + @Override + public boolean keepFullyDeletedSegment(IOSupplier readerIOSupplier) { + return true; + } + + }))) { + final int numDocs = writer.getDocStats().numDocs; + writer.deleteDocuments(new Term("id", "" + randomIntBetween(0, writer.getDocStats().numDocs-1))); + writer.commit(); + assertEquals(writer.getDocStats().numDocs, numDocs-1); + } + } + + private int indexDocs(Directory directory) throws IOException { + try(IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(random(), + new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()))) { + int docs = 1 + random().nextInt(100); + for (int i = 0; i < docs; i++) { + Document doc = new Document(); + doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); + doc.add(new TextField("body", + TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); + doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random())))); + writer.addDocument(doc); + } + writer.commit(); + return docs; + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 6175a22760029..2a2176f1c100d 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -797,7 +797,7 @@ protected void flushShard(IndexShard shard, boolean force) { /** Recover a shard from a snapshot using a given repository **/ protected void recoverShardFromSnapshot(final IndexShard shard, final Snapshot snapshot, - final Repository repository) throws IOException { + final Repository repository) { final Version version = Version.CURRENT; final ShardId shardId = shard.shardId(); final String index = shardId.getIndexName(); @@ -806,9 +806,12 @@ protected void recoverShardFromSnapshot(final IndexShard shard, final RecoverySource.SnapshotRecoverySource recoverySource = new RecoverySource.SnapshotRecoverySource(UUIDs.randomBase64UUID(), snapshot, version, index); final ShardRouting shardRouting = newShardRouting(shardId, node.getId(), true, ShardRoutingState.INITIALIZING, recoverySource); - shard.markAsRecovering("from snapshot", new RecoveryState(shardRouting, node, null)); - repository.restoreShard(shard, snapshot.getSnapshotId(), version, indexId, shard.shardId(), shard.recoveryState()); + repository.restoreShard(shard.store(), + snapshot.getSnapshotId(), version, + indexId, + shard.shardId(), + shard.recoveryState()); } /** Snapshot a shard using a given repository **/ @@ -820,8 +823,8 @@ protected void snapshotShard(final IndexShard shard, Index index = shard.shardId().getIndex(); IndexId indexId = new IndexId(index.getName(), index.getUUID()); - repository.snapshotShard(shard, shard.store(), snapshot.getSnapshotId(), indexId, indexCommitRef.getIndexCommit(), - snapshotStatus); + repository.snapshotShard(shard.store(), shard.mapperService(), snapshot.getSnapshotId(), indexId, + indexCommitRef.getIndexCommit(), snapshotStatus); } final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java index bc60b4c194622..2279b48c3c023 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.repositories.IndexId; @@ -133,8 +134,8 @@ public boolean isReadOnly() { } @Override - public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, - IndexShardSnapshotStatus snapshotStatus) { + public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { } @Override diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index 5a0472339c192..3010f90b803e9 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -42,10 +42,10 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.Index; import org.elasticsearch.index.engine.EngineException; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.seqno.LocalCheckpointTracker; import org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException; import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; -import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardRecoveryException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException; @@ -294,18 +294,19 @@ public boolean isReadOnly() { } @Override - public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, - IndexShardSnapshotStatus snapshotStatus) { + public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); } @Override - public void restoreShard(IndexShard indexShard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId, - RecoveryState recoveryState) { + public void restoreShard(Store store, SnapshotId snapshotId, + Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { // TODO: Add timeouts to network calls / the restore process. - createEmptyStore(indexShard, shardId); + createEmptyStore(store); + ShardId shardId = store.shardId(); - final Map ccrMetaData = indexShard.indexSettings().getIndexMetaData().getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY); + final Map ccrMetaData = store.indexSettings().getIndexMetaData().getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY); final String leaderIndexName = ccrMetaData.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_NAME_KEY); final String leaderUUID = ccrMetaData.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY); final Index leaderIndex = new Index(leaderIndexName, leaderUUID); @@ -314,14 +315,14 @@ public void restoreShard(IndexShard indexShard, SnapshotId snapshotId, Version v final Client remoteClient = getRemoteClusterClient(); final String retentionLeaseId = - retentionLeaseId(localClusterName, indexShard.shardId().getIndex(), remoteClusterAlias, leaderIndex); + retentionLeaseId(localClusterName, shardId.getIndex(), remoteClusterAlias, leaderIndex); acquireRetentionLeaseOnLeader(shardId, retentionLeaseId, leaderShardId, remoteClient); // schedule renewals to run during the restore final Scheduler.Cancellable renewable = threadPool.scheduleWithFixedDelay( () -> { - logger.trace("{} background renewal of retention lease [{}] during restore", indexShard.shardId(), retentionLeaseId); + logger.trace("{} background renewal of retention lease [{}] during restore", shardId, retentionLeaseId); final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { // we have to execute under the system context so that if security is enabled the renewal is authorized @@ -336,36 +337,34 @@ public void restoreShard(IndexShard indexShard, SnapshotId snapshotId, Version v e -> { assert e instanceof ElasticsearchSecurityException == false : e; logger.warn(new ParameterizedMessage( - "{} background renewal of retention lease [{}] failed during restore", - indexShard.shardId(), - retentionLeaseId), - e); + "{} background renewal of retention lease [{}] failed during restore", shardId, + retentionLeaseId), e); })); } }, - CcrRetentionLeases.RETENTION_LEASE_RENEW_INTERVAL_SETTING.get(indexShard.indexSettings().getNodeSettings()), + CcrRetentionLeases.RETENTION_LEASE_RENEW_INTERVAL_SETTING.get(store.indexSettings().getNodeSettings()), Ccr.CCR_THREAD_POOL_NAME); // TODO: There should be some local timeout. And if the remote cluster returns an unknown session // response, we should be able to retry by creating a new session. - try (RestoreSession restoreSession = openSession(metadata.name(), remoteClient, leaderShardId, indexShard, recoveryState)) { - restoreSession.restoreFiles(); - updateMappings(remoteClient, leaderIndex, restoreSession.mappingVersion, client, indexShard.routingEntry().index()); + try (RestoreSession restoreSession = openSession(metadata.name(), remoteClient, leaderShardId, shardId, recoveryState)) { + restoreSession.restoreFiles(store); + updateMappings(remoteClient, leaderIndex, restoreSession.mappingVersion, client, shardId.getIndex()); } catch (Exception e) { - throw new IndexShardRestoreFailedException(indexShard.shardId(), "failed to restore snapshot [" + snapshotId + "]", e); + throw new IndexShardRestoreFailedException(shardId, "failed to restore snapshot [" + snapshotId + "]", e); } finally { - logger.trace("{} canceling background renewal of retention lease [{}] at the end of restore", shardId, retentionLeaseId); + logger.trace("{} canceling background renewal of retention lease [{}] at the end of restore", shardId, + retentionLeaseId); renewable.cancel(); } } - private void createEmptyStore(final IndexShard indexShard, final ShardId shardId) { - final Store store = indexShard.store(); + private void createEmptyStore(Store store) { store.incRef(); try { - store.createEmpty(indexShard.indexSettings().getIndexMetaData().getCreationVersion().luceneVersion); + store.createEmpty(store.indexSettings().getIndexVersionCreated().luceneVersion); } catch (final EngineException | IOException e) { - throw new IndexShardRecoveryException(shardId, "failed to create empty store", e); + throw new IndexShardRecoveryException(store.shardId(), "failed to create empty store", e); } finally { store.decRef(); } @@ -432,12 +431,12 @@ private void updateMappings(Client leaderClient, Index leaderIndex, long leaderM } } - RestoreSession openSession(String repositoryName, Client remoteClient, ShardId leaderShardId, IndexShard indexShard, + RestoreSession openSession(String repositoryName, Client remoteClient, ShardId leaderShardId, ShardId indexShardId, RecoveryState recoveryState) { String sessionUUID = UUIDs.randomBase64UUID(); PutCcrRestoreSessionAction.PutCcrRestoreSessionResponse response = remoteClient.execute(PutCcrRestoreSessionAction.INSTANCE, new PutCcrRestoreSessionRequest(sessionUUID, leaderShardId)).actionGet(ccrSettings.getRecoveryActionTimeout()); - return new RestoreSession(repositoryName, remoteClient, sessionUUID, response.getNode(), indexShard, recoveryState, + return new RestoreSession(repositoryName, remoteClient, sessionUUID, response.getNode(), indexShardId, recoveryState, response.getStoreFileMetaData(), response.getMappingVersion(), threadPool, ccrSettings, throttledTime::inc); } @@ -452,10 +451,10 @@ private static class RestoreSession extends FileRestoreContext implements Closea private final LongConsumer throttleListener; private final ThreadPool threadPool; - RestoreSession(String repositoryName, Client remoteClient, String sessionUUID, DiscoveryNode node, IndexShard indexShard, + RestoreSession(String repositoryName, Client remoteClient, String sessionUUID, DiscoveryNode node, ShardId shardId, RecoveryState recoveryState, Store.MetadataSnapshot sourceMetaData, long mappingVersion, ThreadPool threadPool, CcrSettings ccrSettings, LongConsumer throttleListener) { - super(repositoryName, indexShard, SNAPSHOT_ID, recoveryState, Math.toIntExact(ccrSettings.getChunkSize().getBytes())); + super(repositoryName, shardId, SNAPSHOT_ID, recoveryState, Math.toIntExact(ccrSettings.getChunkSize().getBytes())); this.remoteClient = remoteClient; this.sessionUUID = sessionUUID; this.node = node; @@ -466,14 +465,14 @@ private static class RestoreSession extends FileRestoreContext implements Closea this.throttleListener = throttleListener; } - void restoreFiles() throws IOException { + void restoreFiles(Store store) throws IOException { ArrayList fileInfos = new ArrayList<>(); for (StoreFileMetaData fileMetaData : sourceMetaData) { ByteSizeValue fileSize = new ByteSizeValue(fileMetaData.length()); fileInfos.add(new FileInfo(fileMetaData.name(), fileMetaData, fileSize)); } SnapshotFiles snapshotFiles = new SnapshotFiles(LATEST, fileInfos); - restore(snapshotFiles); + restore(snapshotFiles, store); } @Override diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index c5a357c7df817..abef313d0b017 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -447,8 +447,8 @@ protected synchronized void recoverPrimary(IndexShard primary) { primary.markAsRecovering("remote recovery from leader", new RecoveryState(routing, localNode, null)); primary.restoreFromRepository(new RestoreOnlyRepository(index.getName()) { @Override - public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, - IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { + public void restoreShard(Store store, SnapshotId snapshotId, + Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { try { IndexShard leader = leaderGroup.getPrimary(); Lucene.cleanLuceneIndex(primary.store().directory()); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java index 947ce78da2ca3..f8260f2fce57c 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java @@ -127,8 +127,8 @@ public void testRestoreShard() throws IOException { target.markAsRecovering("store", new RecoveryState(routing, localNode, null)); assertTrue(target.restoreFromRepository(new RestoreOnlyRepository("test") { @Override - public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, - RecoveryState recoveryState) { + public void restoreShard(Store store, SnapshotId snapshotId, + Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { try { cleanLuceneIndex(targetStore.directory()); for (String file : sourceStore.directory().listAll()) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java index d7f70cf8ef2e1..bb5819e1bda43 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java @@ -10,7 +10,9 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; +import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.SimpleFSDirectory; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; @@ -24,8 +26,7 @@ import org.elasticsearch.env.ShardLock; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.ReadOnlyEngine; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.TranslogStats; @@ -104,15 +105,18 @@ public void initializeSnapshot(SnapshotId snapshotId, List indices, Met } @Override - public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, - IndexShardSnapshotStatus snapshotStatus) { - if (shard.mapperService().documentMapper() != null // if there is no mapping this is null - && shard.mapperService().documentMapper().sourceMapper().isComplete() == false) { + public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { + if (mapperService.documentMapper() != null // if there is no mapping this is null + && mapperService.documentMapper().sourceMapper().isComplete() == false) { throw new IllegalStateException("Can't snapshot _source only on an index that has incomplete source ie. has _source disabled " + "or filters the source"); } - ShardPath shardPath = shard.shardPath(); - Path dataPath = shardPath.getDataPath(); + Directory unwrap = FilterDirectory.unwrap(store.directory()); + if (unwrap instanceof FSDirectory == false) { + throw new AssertionError("expected FSDirectory but got " + unwrap.toString()); + } + Path dataPath = ((FSDirectory) unwrap).getDirectory().getParent(); // TODO should we have a snapshot tmp directory per shard that is maintained by the system? Path snapPath = dataPath.resolve(SNAPSHOT_DIR_NAME); try (FSDirectory directory = new SimpleFSDirectory(snapPath)) { @@ -122,7 +126,7 @@ protected void closeInternal() { // do nothing; } }, Store.OnClose.EMPTY); - Supplier querySupplier = shard.mapperService().hasNested() ? Queries::newNestedFilter : null; + Supplier querySupplier = mapperService.hasNested() ? Queries::newNestedFilter : null; // SourceOnlySnapshot will take care of soft- and hard-deletes no special casing needed here SourceOnlySnapshot snapshot = new SourceOnlySnapshot(tempStore.directory(), querySupplier); snapshot.syncSnapshot(snapshotIndexCommit); @@ -133,7 +137,7 @@ protected void closeInternal() { store.incRef(); try (DirectoryReader reader = DirectoryReader.open(tempStore.directory())) { IndexCommit indexCommit = reader.getIndexCommit(); - super.snapshotShard(shard, tempStore, snapshotId, indexId, indexCommit, snapshotStatus); + super.snapshotShard(tempStore, mapperService, snapshotId, indexId, indexCommit, snapshotStatus); } finally { store.decRef(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java index 6a37e8265c096..948503b33478c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java @@ -98,7 +98,7 @@ public void testSourceIncomplete() throws IOException { IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, () -> runAsSnapshot(shard.getThreadPool(), - () -> repository.snapshotShard(shard, shard.store(), snapshotId, indexId, + () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, snapshotRef.getIndexCommit(), indexShardSnapshotStatus))); assertEquals("Can't snapshot _source only on an index that has incomplete source ie. has _source disabled or filters the source" , illegalStateException.getMessage()); @@ -120,8 +120,8 @@ public void testIncrementalSnapshot() throws IOException { try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) { IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); SnapshotId snapshotId = new SnapshotId("test", "test"); - runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard, shard.store(), snapshotId, indexId, snapshotRef - .getIndexCommit(), indexShardSnapshotStatus)); + runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, + snapshotRef.getIndexCommit(), indexShardSnapshotStatus)); IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); assertEquals(copy.getTotalFileCount(), copy.getIncrementalFileCount()); totalFileCount = copy.getTotalFileCount(); @@ -134,8 +134,8 @@ public void testIncrementalSnapshot() throws IOException { SnapshotId snapshotId = new SnapshotId("test_1", "test_1"); IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); - runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard, shard.store(), snapshotId, indexId, snapshotRef - .getIndexCommit(), indexShardSnapshotStatus)); + runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, + snapshotRef.getIndexCommit(), indexShardSnapshotStatus)); IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); // we processed the segments_N file plus _1.si, _1.fdx, _1.fnm, _1.fdt assertEquals(5, copy.getIncrementalFileCount()); @@ -148,8 +148,8 @@ public void testIncrementalSnapshot() throws IOException { SnapshotId snapshotId = new SnapshotId("test_2", "test_2"); IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); - runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard, shard.store(), snapshotId, indexId, snapshotRef - .getIndexCommit(), indexShardSnapshotStatus)); + runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, + snapshotRef.getIndexCommit(), indexShardSnapshotStatus)); IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); // we processed the segments_N file plus _1_1.liv assertEquals(2, copy.getIncrementalFileCount()); @@ -197,7 +197,8 @@ public void testRestoreMinmal() throws IOException { repository.initializeSnapshot(snapshotId, Arrays.asList(indexId), MetaData.builder().put(shard.indexSettings() .getIndexMetaData(), false).build()); - repository.snapshotShard(shard, shard.store(), snapshotId, indexId, snapshotRef.getIndexCommit(), indexShardSnapshotStatus); + repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, snapshotRef.getIndexCommit(), + indexShardSnapshotStatus); }); IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); assertEquals(copy.getTotalFileCount(), copy.getIncrementalFileCount()); From 8918dd1f8641f04b16433b7e1fa035bf713b2a26 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Wed, 22 May 2019 13:20:18 +0300 Subject: [PATCH 033/224] Fail early when rp.client_secret is missing in OIDC realm (#42256) rp.client_secret is a required secure setting. Make sure we fail with a SettingsException and a clear, actionable message when building the realm, if the setting is missing. --- .../authc/oidc/OpenIdConnectRealm.java | 4 +++ .../authc/SecurityRealmSettingsTests.java | 8 ++++- .../oidc/OpenIdConnectRealmSettingsTests.java | 36 +++++++++++++++++++ .../authc/oidc/OpenIdConnectRealmTests.java | 18 +++++++--- .../authc/oidc/OpenIdConnectTestCase.java | 11 +++++- 5 files changed, 70 insertions(+), 7 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java index 5f876a677d689..ac933dcfef878 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java @@ -247,6 +247,10 @@ private RelyingPartyConfiguration buildRelyingPartyConfiguration(RealmConfig con } final ClientID clientId = new ClientID(require(config, RP_CLIENT_ID)); final SecureString clientSecret = config.getSetting(RP_CLIENT_SECRET); + if (clientSecret.length() == 0) { + throw new SettingsException("The configuration setting [" + RealmSettings.getFullSettingKey(config, RP_CLIENT_SECRET) + + "] is required"); + } final ResponseType responseType; try { // This should never happen as it's already validated in the settings diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java index bccee36631e3d..b9a557320e3e1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java @@ -8,6 +8,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; @@ -52,8 +53,12 @@ protected Settings nodeSettings(int nodeOrdinal) { final Path jwkSet = createTempFile("jwkset", "json"); OpenIdConnectTestCase.writeJwkSetToFile(jwkSet); + final Settings existingSettings = super.nodeSettings(nodeOrdinal); + MockSecureSettings mockSecureSettings = + (MockSecureSettings) Settings.builder().put(existingSettings).getSecureSettings(); + mockSecureSettings.setString("xpack.security.authc.realms.oidc.oidc1.rp.client_secret", randomAlphaOfLength(12)); settings = Settings.builder() - .put(super.nodeSettings(nodeOrdinal).filter(s -> s.startsWith("xpack.security.authc.realms.") == false)) + .put(existingSettings.filter(s -> s.startsWith("xpack.security.authc.realms.") == false), false) .put("xpack.security.authc.token.enabled", true) .put("xpack.security.authc.realms.file.file1.order", 1) .put("xpack.security.authc.realms.native.native1.order", 2) @@ -80,6 +85,7 @@ protected Settings nodeSettings(int nodeOrdinal) { .put("xpack.security.authc.realms.oidc.oidc1.rp.client_id", "my_client") .put("xpack.security.authc.realms.oidc.oidc1.rp.response_type", "code") .put("xpack.security.authc.realms.oidc.oidc1.claims.principal", "sub") + .setSecureSettings(mockSecureSettings) .build(); } catch (IOException e) { throw new RuntimeException(e); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java index 8dbf27070c492..341cf07b0dd7b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.security.authc.oidc; +import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -42,6 +43,7 @@ public void testIncorrectResponseTypeThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "hybrid"); + settingsBuilder.setSecureSettings(getSecureSettings()); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -58,6 +60,7 @@ public void testMissingAuthorizationEndpointThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + settingsBuilder.setSecureSettings(getSecureSettings()); SettingsException exception = expectThrows(SettingsException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -75,6 +78,7 @@ public void testInvalidAuthorizationEndpointThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + settingsBuilder.setSecureSettings(getSecureSettings()); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -91,6 +95,7 @@ public void testMissingTokenEndpointThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + settingsBuilder.setSecureSettings(getSecureSettings()); SettingsException exception = expectThrows(SettingsException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -108,6 +113,7 @@ public void testInvalidTokenEndpointThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + settingsBuilder.setSecureSettings(getSecureSettings()); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -123,6 +129,7 @@ public void testMissingJwksUrlThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + settingsBuilder.setSecureSettings(getSecureSettings()); SettingsException exception = expectThrows(SettingsException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -139,6 +146,7 @@ public void testMissingIssuerThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + settingsBuilder.setSecureSettings(getSecureSettings()); SettingsException exception = expectThrows(SettingsException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -155,6 +163,7 @@ public void testMissingRedirectUriThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + settingsBuilder.setSecureSettings(getSecureSettings()); SettingsException exception = expectThrows(SettingsException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -171,6 +180,7 @@ public void testMissingClientIdThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + settingsBuilder.setSecureSettings(getSecureSettings()); SettingsException exception = expectThrows(SettingsException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -189,6 +199,7 @@ public void testMissingPrincipalClaimThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code") .putList(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REQUESTED_SCOPES), Arrays.asList("openid", "scope1", "scope2")); + settingsBuilder.setSecureSettings(getSecureSettings()); SettingsException exception = expectThrows(SettingsException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -209,6 +220,7 @@ public void testPatternWithoutSettingThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code") .putList(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REQUESTED_SCOPES), Arrays.asList("openid", "scope1", "scope2")); + settingsBuilder.setSecureSettings(getSecureSettings()); SettingsException exception = expectThrows(SettingsException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -218,6 +230,30 @@ public void testPatternWithoutSettingThrowsError() { Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.NAME_CLAIM.getPattern()))); } + public void testMissingClientSecretThrowsError() { + final Settings.Builder settingsBuilder = Settings.builder() + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com") + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json") + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token") + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login") + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + SettingsException exception = expectThrows(SettingsException.class, () -> { + new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); + }); + assertThat(exception.getMessage(), + Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_SECRET))); + } + + private MockSecureSettings getSecureSettings() { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_SECRET), + randomAlphaOfLengthBetween(12, 18)); + return secureSettings; + } + private RealmConfig buildConfig(Settings realmSettings) { final Settings settings = Settings.builder() .put("path.home", createTempDir()) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java index 151a7e1caea19..162b88224414e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java @@ -165,7 +165,8 @@ public void testBuildRelyingPartyConfigWithoutOpenIdScope() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code") .putList(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REQUESTED_SCOPES), - Arrays.asList("scope1", "scope2")); + Arrays.asList("scope1", "scope2")) + .setSecureSettings(getSecureSettings()); final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null, null); final OpenIdConnectPrepareAuthenticationResponse response = realm.buildAuthenticationRequestUri(null, null, null); @@ -187,7 +188,8 @@ public void testBuildingAuthenticationRequest() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code") .putList(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REQUESTED_SCOPES), - Arrays.asList("openid", "scope1", "scope2")); + Arrays.asList("openid", "scope1", "scope2")) + .setSecureSettings(getSecureSettings()); final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null, null); final OpenIdConnectPrepareAuthenticationResponse response = realm.buildAuthenticationRequestUri(null, null, null); @@ -207,7 +209,9 @@ public void testBuilidingAuthenticationRequestWithDefaultScope() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code") + .setSecureSettings(getSecureSettings()); + ; final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null, null); final OpenIdConnectPrepareAuthenticationResponse response = realm.buildAuthenticationRequestUri(null, null, null); @@ -237,7 +241,9 @@ public void testBuildingAuthenticationRequestWithExistingStateAndNonce() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code") + .setSecureSettings(getSecureSettings()); + ; final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null, null); final String state = new State().getValue(); @@ -257,7 +263,9 @@ public void testBuildingAuthenticationRequestWithLoginHint() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code") + .setSecureSettings(getSecureSettings()); + ; final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null, null); final String state = new State().getValue(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java index 9c1c4e981109a..63071a3d1cb40 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java @@ -12,6 +12,7 @@ import com.nimbusds.jwt.JWTClaimsSet; import com.nimbusds.jwt.SignedJWT; import com.nimbusds.openid.connect.sdk.Nonce; +import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; @@ -50,7 +51,15 @@ protected static Settings.Builder getBasicRealmSettings() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.GROUPS_CLAIM.getClaim()), "groups") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.MAIL_CLAIM.getClaim()), "mail") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.NAME_CLAIM.getClaim()), "name"); + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.NAME_CLAIM.getClaim()), "name") + .setSecureSettings(getSecureSettings()); + } + + protected static MockSecureSettings getSecureSettings() { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_SECRET), + randomAlphaOfLengthBetween(12, 18)); + return secureSettings; } protected JWT generateIdToken(String subject, String audience, String issuer) throws Exception { From 3b67d87bf6d6b23694fadbcb8ab8b0d83ac3905d Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 22 May 2019 12:25:48 +0200 Subject: [PATCH 034/224] Avoid bubbling up failures from a shard that is recovering (#42287) A shard that is undergoing peer recovery is subject to logging warnings of the form org.elasticsearch.action.FailedNodeException: Failed node [XYZ] ... Caused by: org.apache.lucene.index.IndexNotFoundException: no segments* file found in ... These failures are actually harmless, and expected to happen while a peer recovery is ongoing (i.e. there is an IndexShard instance, but no proper IndexCommit just yet). As these failures are currently bubbled up to the master, they cause unnecessary reroutes and confusion amongst users due to being logged as warnings. Closes #40107 --- .../TransportNodesListShardStoreMetaData.java | 14 +++++- .../indices/recovery/IndexRecoveryIT.java | 44 +++++++++++++++++++ 2 files changed, 56 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java index bc041b4b322ae..20307af32f4ed 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java +++ b/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices.store; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; @@ -123,8 +124,17 @@ private StoreFilesMetaData listStoreMetaData(ShardId shardId) throws IOException if (indexService != null) { IndexShard indexShard = indexService.getShardOrNull(shardId.id()); if (indexShard != null) { - exists = true; - return new StoreFilesMetaData(shardId, indexShard.snapshotStoreMetadata()); + try { + final StoreFilesMetaData storeFilesMetaData = new StoreFilesMetaData(shardId, indexShard.snapshotStoreMetadata()); + exists = true; + return storeFilesMetaData; + } catch (org.apache.lucene.index.IndexNotFoundException e) { + logger.trace(new ParameterizedMessage("[{}] node is missing index, responding with empty", shardId), e); + return new StoreFilesMetaData(shardId, Store.MetadataSnapshot.EMPTY); + } catch (IOException e) { + logger.warn(new ParameterizedMessage("[{}] can't read metadata from store, responding with empty", shardId), e); + return new StoreFilesMetaData(shardId, Store.MetadataSnapshot.EMPTY); + } } } // try and see if we an list unallocated diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 0ea8eb8e9b447..4710c59647c25 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -923,6 +923,50 @@ public void testDoNotInfinitelyWaitForMapping() { assertHitCount(client().prepareSearch().get(), numDocs); } + /** Makes sure the new master does not repeatedly fetch index metadata from recovering replicas */ + public void testOngoingRecoveryAndMasterFailOver() throws Exception { + String indexName = "test"; + internalCluster().startNodes(2); + String nodeWithPrimary = internalCluster().startDataOnlyNode(); + assertAcked(client().admin().indices().prepareCreate(indexName) + .setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.routing.allocation.include._name", nodeWithPrimary))); + MockTransportService transport = (MockTransportService) internalCluster().getInstance(TransportService.class, nodeWithPrimary); + CountDownLatch phase1ReadyBlocked = new CountDownLatch(1); + CountDownLatch allowToCompletePhase1Latch = new CountDownLatch(1); + Semaphore blockRecovery = new Semaphore(1); + transport.addSendBehavior((connection, requestId, action, request, options) -> { + if (PeerRecoveryTargetService.Actions.CLEAN_FILES.equals(action) && blockRecovery.tryAcquire()) { + phase1ReadyBlocked.countDown(); + try { + allowToCompletePhase1Latch.await(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + } + connection.sendRequest(requestId, action, request, options); + }); + try { + String nodeWithReplica = internalCluster().startDataOnlyNode(); + assertAcked(client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.routing.allocation.include._name", nodeWithPrimary + "," + nodeWithReplica))); + phase1ReadyBlocked.await(); + internalCluster().restartNode(clusterService().state().nodes().getMasterNode().getName(), + new InternalTestCluster.RestartCallback()); + internalCluster().ensureAtLeastNumDataNodes(3); + assertAcked(client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2) + .putNull("index.routing.allocation.include._name"))); + assertFalse(client().admin().cluster().prepareHealth(indexName).setWaitForActiveShards(2).get().isTimedOut()); + } finally { + allowToCompletePhase1Latch.countDown(); + } + ensureGreen(indexName); + } + public void testRecoveryFlushReplica() throws Exception { internalCluster().ensureAtLeastNumDataNodes(3); String indexName = "test-index"; From 7ab59eef11f5f966ae3cad385237a4f8b7ad115f Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 22 May 2019 12:55:47 +0200 Subject: [PATCH 035/224] Some Cleanup in o.e.i.engine (#42278) * Some Cleanup in o.e.i.engine * Remove dead code and parameters * Reduce visibility in some obvious spots * Add missing `assert`s (not that important here since the methods themselves will probably be dead-code eliminated) but still --- .../elasticsearch/index/engine/Engine.java | 10 +-- .../index/engine/InternalEngine.java | 35 +++++----- .../index/engine/LiveVersionMap.java | 2 +- .../index/engine/LuceneChangesSnapshot.java | 3 +- .../index/engine/ReadOnlyEngine.java | 4 +- .../index/engine/RecoveryCounter.java | 65 ------------------- .../RecoverySourcePruneMergePolicy.java | 3 +- .../elasticsearch/index/engine/Segment.java | 18 ++--- .../index/engine/SegmentsStats.java | 25 ++++--- .../engine/SnapshotFailedEngineException.java | 7 +- .../index/engine/TranslogLeafReader.java | 5 +- .../VersionConflictEngineException.java | 6 +- .../index/engine/FrozenEngine.java | 2 +- 13 files changed, 47 insertions(+), 138 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 63659126f8438..2d210b716d4b7 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -911,7 +911,7 @@ private ImmutableOpenMap getSegmentFileSizes(SegmentReader segment map.put(extension, length); } - if (useCompoundFile && directory != null) { + if (useCompoundFile) { try { directory.close(); } catch (IOException e) { @@ -954,8 +954,7 @@ protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boole // now, correlate or add the committed ones... if (lastCommittedSegmentInfos != null) { - SegmentInfos infos = lastCommittedSegmentInfos; - for (SegmentCommitInfo info : infos) { + for (SegmentCommitInfo info : lastCommittedSegmentInfos) { Segment segment = segments.get(info.info.name); if (segment == null) { segment = new Segment(info.info.name); @@ -1783,11 +1782,8 @@ public boolean equals(Object o) { CommitId commitId = (CommitId) o; - if (!Arrays.equals(id, commitId.id)) { - return false; - } + return Arrays.equals(id, commitId.id); - return true; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 24d1078510c0b..9fb63d0de019d 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -560,7 +560,7 @@ private String loadTranslogUUIDFromLastCommit() throws IOException { /** * Reads the current stored history ID from the IW commit data. */ - private String loadHistoryUUID(final IndexWriter writer) throws IOException { + private String loadHistoryUUID(final IndexWriter writer) { final String uuid = commitDataAsMap(writer).get(HISTORY_UUID_KEY); if (uuid == null) { throw new IllegalStateException("commit doesn't contain history uuid"); @@ -632,9 +632,8 @@ public GetResult get(Get get, BiFunction search if (operation != null) { // in the case of a already pruned translog generation we might get null here - yet very unlikely final Translog.Index index = (Translog.Index) operation; - TranslogLeafReader reader = new TranslogLeafReader(index, engineConfig - .getIndexSettings().getIndexVersionCreated()); - return new GetResult(new Searcher("realtime_get", new IndexSearcher(reader), reader::close), + TranslogLeafReader reader = new TranslogLeafReader(index); + return new GetResult(new Searcher("realtime_get", new IndexSearcher(reader), reader), new VersionsAndSeqNoResolver.DocIdAndVersion(0, index.version(), index.seqNo(), index.primaryTerm(), reader, 0)); } @@ -753,7 +752,7 @@ private boolean canOptimizeAddDocument(Index index) { + index.getAutoGeneratedIdTimestamp(); switch (index.origin()) { case PRIMARY: - assertPrimaryCanOptimizeAddDocument(index); + assert assertPrimaryCanOptimizeAddDocument(index); return true; case PEER_RECOVERY: case REPLICA: @@ -779,7 +778,7 @@ protected boolean assertPrimaryCanOptimizeAddDocument(final Index index) { private boolean assertIncomingSequenceNumber(final Engine.Operation.Origin origin, final long seqNo) { if (origin == Operation.Origin.PRIMARY) { - assertPrimaryIncomingSequenceNumber(origin, seqNo); + assert assertPrimaryIncomingSequenceNumber(origin, seqNo); } else { // sequence number should be set when operation origin is not primary assert seqNo >= 0 : "recovery or replica ops should have an assigned seq no.; origin: " + origin; @@ -920,7 +919,7 @@ public IndexResult index(Index index) throws IOException { } protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IOException { - assertNonPrimaryOrigin(index); + assert assertNonPrimaryOrigin(index); final IndexingStrategy plan; final boolean appendOnlyRequest = canOptimizeAddDocument(index); if (appendOnlyRequest && mayHaveBeenIndexedBefore(index) == false && index.seqNo() > maxSeqNoOfNonAppendOnlyOperations.get()) { @@ -975,13 +974,13 @@ protected IndexingStrategy indexingStrategyForOperation(final Index index) throw } } - protected final IndexingStrategy planIndexingAsPrimary(Index index) throws IOException { + private IndexingStrategy planIndexingAsPrimary(Index index) throws IOException { assert index.origin() == Operation.Origin.PRIMARY : "planing as primary but origin isn't. got " + index.origin(); final IndexingStrategy plan; // resolve an external operation into an internal one which is safe to replay if (canOptimizeAddDocument(index)) { if (mayHaveBeenIndexedBefore(index)) { - plan = IndexingStrategy.overrideExistingAsIfNotThere(1L); + plan = IndexingStrategy.overrideExistingAsIfNotThere(); versionMap.enforceSafeAccess(); } else { plan = IndexingStrategy.optimizedAppendOnly(1L); @@ -1003,7 +1002,7 @@ protected final IndexingStrategy planIndexingAsPrimary(Index index) throws IOExc if (index.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && versionValue == null) { final VersionConflictEngineException e = new VersionConflictEngineException(shardId, index.id(), index.getIfSeqNo(), index.getIfPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0); - plan = IndexingStrategy.skipDueToVersionConflict(e, currentNotFoundOrDeleted, currentVersion, getPrimaryTerm()); + plan = IndexingStrategy.skipDueToVersionConflict(e, true, currentVersion, getPrimaryTerm()); } else if (index.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && ( versionValue.seqNo != index.getIfSeqNo() || versionValue.term != index.getIfPrimaryTerm() )) { @@ -1161,9 +1160,9 @@ static IndexingStrategy processNormally(boolean currentNotFoundOrDeleted, true, false, versionForIndexing, null); } - static IndexingStrategy overrideExistingAsIfNotThere(long versionForIndexing) { + static IndexingStrategy overrideExistingAsIfNotThere() { return new IndexingStrategy(true, true, true, - false, versionForIndexing, null); + false, 1L, null); } public static IndexingStrategy processButSkipLucene(boolean currentNotFoundOrDeleted, long versionForIndexing) { @@ -1282,7 +1281,7 @@ protected DeletionStrategy deletionStrategyForOperation(final Delete delete) thr } protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws IOException { - assertNonPrimaryOrigin(delete); + assert assertNonPrimaryOrigin(delete); maxSeqNoOfNonAppendOnlyOperations.updateAndGet(curr -> Math.max(delete.seqNo(), curr)); assert maxSeqNoOfNonAppendOnlyOperations.get() >= delete.seqNo() : "max_seqno of non-append-only was not updated;" + "max_seqno non-append-only [" + maxSeqNoOfNonAppendOnlyOperations.get() + "], seqno of delete [" + delete.seqNo() + "]"; @@ -1302,7 +1301,7 @@ protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws } else { final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnSeqNo(delete); if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { - plan = DeletionStrategy.processAsStaleOp(softDeleteEnabled, false, delete.version()); + plan = DeletionStrategy.processAsStaleOp(softDeleteEnabled, delete.version()); } else { plan = DeletionStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, delete.version()); } @@ -1315,7 +1314,7 @@ protected boolean assertNonPrimaryOrigin(final Operation operation) { return true; } - protected final DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOException { + private DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOException { assert delete.origin() == Operation.Origin.PRIMARY : "planing as primary but got " + delete.origin(); // resolve operation from external to internal final VersionValue versionValue = resolveDocVersion(delete, delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO); @@ -1333,7 +1332,7 @@ protected final DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOE if (delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && versionValue == null) { final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete.id(), delete.getIfSeqNo(), delete.getIfPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0); - plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, getPrimaryTerm(), currentlyDeleted); + plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, getPrimaryTerm(), true); } else if (delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && ( versionValue.seqNo != delete.getIfSeqNo() || versionValue.term != delete.getIfPrimaryTerm() )) { @@ -1425,8 +1424,8 @@ public static DeletionStrategy processButSkipLucene(boolean currentlyDeleted, lo return new DeletionStrategy(false, false, currentlyDeleted, versionOfDeletion, null); } - static DeletionStrategy processAsStaleOp(boolean addStaleOpToLucene, boolean currentlyDeleted, long versionOfDeletion) { - return new DeletionStrategy(false, addStaleOpToLucene, currentlyDeleted, versionOfDeletion, null); + static DeletionStrategy processAsStaleOp(boolean addStaleOpToLucene, long versionOfDeletion) { + return new DeletionStrategy(false, addStaleOpToLucene, false, versionOfDeletion, null); } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index e4dce8919cf1e..ce955903af494 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -234,7 +234,7 @@ long getMinDeleteTimestamp() { /** * Tracks bytes used by tombstones (deletes) */ - final AtomicLong ramBytesUsedTombstones = new AtomicLong(); + private final AtomicLong ramBytesUsedTombstones = new AtomicLong(); @Override public void beforeRefresh() throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java index c9550a61f9e58..a3e86ab1606df 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java @@ -188,8 +188,7 @@ private void fillParallelArray(ScoreDoc[] scoreDocs, ParallelArray parallelArray int readerIndex = 0; CombinedDocValues combinedDocValues = null; LeafReaderContext leaf = null; - for (int i = 0; i < scoreDocs.length; i++) { - ScoreDoc scoreDoc = scoreDocs[i]; + for (ScoreDoc scoreDoc : scoreDocs) { if (scoreDoc.doc >= docBase + maxDoc) { do { leaf = leaves.get(readerIndex++); diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index e7e0c4d927851..9d5f6054243e4 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -457,8 +457,8 @@ public void updateMaxUnsafeAutoIdTimestamp(long newTimestamp) { } - protected void processReaders(IndexReader reader, IndexReader previousReader) { - searcherFactory.processReaders(reader, previousReader); + protected void processReader(IndexReader reader) { + searcherFactory.processReaders(reader, null); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java b/server/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java deleted file mode 100644 index 31fddbedfb715..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.engine; - -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.index.store.Store; - -import java.util.concurrent.atomic.AtomicInteger; - -/** - * RecoveryCounter keeps tracks of the number of ongoing recoveries for a - * particular {@link Store} - */ -public class RecoveryCounter implements Releasable { - - private final Store store; - - RecoveryCounter(Store store) { - this.store = store; - } - - private final AtomicInteger onGoingRecoveries = new AtomicInteger(); - - void startRecovery() { - store.incRef(); - onGoingRecoveries.incrementAndGet(); - } - - public int get() { - return onGoingRecoveries.get(); - } - - /** - * End the recovery counter by decrementing the store's ref and the ongoing recovery counter - * @return number of ongoing recoveries remaining - */ - int endRecovery() { - store.decRef(); - int left = onGoingRecoveries.decrementAndGet(); - assert onGoingRecoveries.get() >= 0 : "ongoingRecoveries must be >= 0 but was: " + onGoingRecoveries.get(); - return left; - } - - @Override - public void close() { - endRecovery(); - } -} diff --git a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java index 42276f4ca2108..a4221bf01f210 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java @@ -58,8 +58,7 @@ public CodecReader wrapForMerge(CodecReader reader) throws IOException { }); } - // pkg private for testing - static CodecReader wrapReader(String recoverySourceField, CodecReader reader, Supplier retainSourceQuerySupplier) + private static CodecReader wrapReader(String recoverySourceField, CodecReader reader, Supplier retainSourceQuerySupplier) throws IOException { NumericDocValues recoverySource = reader.getNumericDocValues(recoverySourceField); if (recoverySource == null || recoverySource.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/Segment.java b/server/src/main/java/org/elasticsearch/index/engine/Segment.java index 945359eda1b17..b1e6d09d897f2 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Segment.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Segment.java @@ -39,6 +39,7 @@ import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Objects; public class Segment implements Streamable { @@ -93,10 +94,6 @@ public ByteSizeValue getSize() { return new ByteSizeValue(sizeInBytes); } - public long getSizeInBytes() { - return this.sizeInBytes; - } - public org.apache.lucene.util.Version getVersion() { return version; } @@ -144,9 +141,8 @@ public boolean equals(Object o) { Segment segment = (Segment) o; - if (name != null ? !name.equals(segment.name) : segment.name != null) return false; + return Objects.equals(name, segment.name); - return true; } @Override @@ -211,7 +207,7 @@ public void writeTo(StreamOutput out) throws IOException { } } - Sort readSegmentSort(StreamInput in) throws IOException { + private Sort readSegmentSort(StreamInput in) throws IOException { int size = in.readVInt(); if (size == 0) { return null; @@ -262,7 +258,7 @@ Sort readSegmentSort(StreamInput in) throws IOException { return new Sort(fields); } - void writeSegmentSort(StreamOutput out, Sort sort) throws IOException { + private void writeSegmentSort(StreamOutput out, Sort sort) throws IOException { if (sort == null) { out.writeVInt(0); return; @@ -302,14 +298,14 @@ void writeSegmentSort(StreamOutput out, Sort sort) throws IOException { } } - Accountable readRamTree(StreamInput in) throws IOException { + private Accountable readRamTree(StreamInput in) throws IOException { final String name = in.readString(); final long bytes = in.readVLong(); int numChildren = in.readVInt(); if (numChildren == 0) { return Accountables.namedAccountable(name, bytes); } - List children = new ArrayList(numChildren); + List children = new ArrayList<>(numChildren); while (numChildren-- > 0) { children.add(readRamTree(in)); } @@ -317,7 +313,7 @@ Accountable readRamTree(StreamInput in) throws IOException { } // the ram tree is written recursively since the depth is fairly low (5 or 6) - void writeRamTree(StreamOutput out, Accountable tree) throws IOException { + private void writeRamTree(StreamOutput out, Accountable tree) throws IOException { out.writeString(tree.toString()); out.writeVLong(tree.ramBytesUsed()); Collection children = tree.getChildResources(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java b/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java index 2d22a6f3caf20..ae78de574531f 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java +++ b/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Iterator; public class SegmentsStats implements Streamable, Writeable, ToXContentFragment { @@ -54,7 +53,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment * Ideally this should be in sync to what the current version of Lucene is using, but it's harmless to leave extensions out, * they'll just miss a proper description in the stats */ - private static ImmutableOpenMap fileDescriptions = ImmutableOpenMap.builder() + private static final ImmutableOpenMap FILE_DESCRIPTIONS = ImmutableOpenMap.builder() .fPut("si", "Segment Info") .fPut("fnm", "Fields") .fPut("fdx", "Field Index") @@ -150,8 +149,7 @@ public void addBitsetMemoryInBytes(long bitsetMemoryInBytes) { public void addFileSizes(ImmutableOpenMap fileSizes) { ImmutableOpenMap.Builder map = ImmutableOpenMap.builder(this.fileSizes); - for (Iterator> it = fileSizes.iterator(); it.hasNext();) { - ObjectObjectCursor entry = it.next(); + for (ObjectObjectCursor entry : fileSizes) { if (map.containsKey(entry.key)) { Long oldValue = map.get(entry.key); map.put(entry.key, oldValue + entry.value); @@ -206,7 +204,7 @@ public long getTermsMemoryInBytes() { return this.termsMemoryInBytes; } - public ByteSizeValue getTermsMemory() { + private ByteSizeValue getTermsMemory() { return new ByteSizeValue(termsMemoryInBytes); } @@ -217,7 +215,7 @@ public long getStoredFieldsMemoryInBytes() { return this.storedFieldsMemoryInBytes; } - public ByteSizeValue getStoredFieldsMemory() { + private ByteSizeValue getStoredFieldsMemory() { return new ByteSizeValue(storedFieldsMemoryInBytes); } @@ -228,7 +226,7 @@ public long getTermVectorsMemoryInBytes() { return this.termVectorsMemoryInBytes; } - public ByteSizeValue getTermVectorsMemory() { + private ByteSizeValue getTermVectorsMemory() { return new ByteSizeValue(termVectorsMemoryInBytes); } @@ -239,7 +237,7 @@ public long getNormsMemoryInBytes() { return this.normsMemoryInBytes; } - public ByteSizeValue getNormsMemory() { + private ByteSizeValue getNormsMemory() { return new ByteSizeValue(normsMemoryInBytes); } @@ -250,7 +248,7 @@ public long getPointsMemoryInBytes() { return this.pointsMemoryInBytes; } - public ByteSizeValue getPointsMemory() { + private ByteSizeValue getPointsMemory() { return new ByteSizeValue(pointsMemoryInBytes); } @@ -261,7 +259,7 @@ public long getDocValuesMemoryInBytes() { return this.docValuesMemoryInBytes; } - public ByteSizeValue getDocValuesMemory() { + private ByteSizeValue getDocValuesMemory() { return new ByteSizeValue(docValuesMemoryInBytes); } @@ -326,11 +324,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, getBitsetMemory()); builder.field(Fields.MAX_UNSAFE_AUTO_ID_TIMESTAMP, maxUnsafeAutoIdTimestamp); builder.startObject(Fields.FILE_SIZES); - for (Iterator> it = fileSizes.iterator(); it.hasNext();) { - ObjectObjectCursor entry = it.next(); + for (ObjectObjectCursor entry : fileSizes) { builder.startObject(entry.key); builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(entry.value)); - builder.field(Fields.DESCRIPTION, fileDescriptions.getOrDefault(entry.key, "Others")); + builder.field(Fields.DESCRIPTION, FILE_DESCRIPTIONS.getOrDefault(entry.key, "Others")); builder.endObject(); } builder.endObject(); @@ -391,7 +388,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(fileSizes.size()); for (ObjectObjectCursor entry : fileSizes) { out.writeString(entry.key); - out.writeLong(entry.value.longValue()); + out.writeLong(entry.value); } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java b/server/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java index f669139c07e2a..d858ccb0ab667 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java +++ b/server/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java @@ -20,17 +20,12 @@ package org.elasticsearch.index.engine; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.index.shard.ShardId; import java.io.IOException; public class SnapshotFailedEngineException extends EngineException { - public SnapshotFailedEngineException(ShardId shardId, Throwable cause) { - super(shardId, "Snapshot failed", cause); - } - public SnapshotFailedEngineException(StreamInput in) throws IOException{ super(in); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java index c1f92966196a3..d40e7d04e3ef3 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java @@ -35,7 +35,6 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; @@ -61,11 +60,9 @@ final class TranslogLeafReader extends LeafReader { private static final FieldInfo FAKE_ID_FIELD = new FieldInfo(IdFieldMapper.NAME, 3, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), 0, 0, 0, false); - private final Version indexVersionCreated; - TranslogLeafReader(Translog.Index operation, Version indexVersionCreated) { + TranslogLeafReader(Translog.Index operation) { this.operation = operation; - this.indexVersionCreated = indexVersionCreated; } @Override public CacheHelper getCoreCacheHelper() { diff --git a/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java b/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java index 0f6c217409c30..c869e2bc386aa 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java +++ b/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java @@ -42,11 +42,7 @@ public VersionConflictEngineException(ShardId shardId, String id, } public VersionConflictEngineException(ShardId shardId, String id, String explanation) { - this(shardId, null, id, explanation); - } - - public VersionConflictEngineException(ShardId shardId, Throwable cause, String id, String explanation) { - this(shardId, "[{}]: version conflict, {}", cause, id, explanation); + this(shardId, "[{}]: version conflict, {}", null, id, explanation); } public VersionConflictEngineException(ShardId shardId, String msg, Throwable cause, Object... params) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java index e9b57e316cccc..50f1125b275f1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java @@ -169,7 +169,7 @@ private synchronized DirectoryReader getOrOpenReader() throws IOException { listeners.beforeRefresh(); } reader = DirectoryReader.open(engineConfig.getStore().directory()); - processReaders(reader, null); + processReader(reader); reader = lastOpenedReader = wrapReader(reader, Function.identity()); reader.getReaderCacheHelper().addClosedListener(this::onReaderClosed); for (ReferenceManager.RefreshListener listeners : config ().getInternalRefreshListener()) { From 28aae648feb921727a6690c193f1162ed87c8e38 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Wed, 22 May 2019 14:25:54 +0300 Subject: [PATCH 036/224] TestClusters: Convert docs (#42100) * TestClusters: Convert docs --- .../gradle/doc/DocsTestPlugin.groovy | 10 ++-- .../testclusters/ElasticsearchCluster.java | 12 +++-- .../testclusters/ElasticsearchNode.java | 11 +++- .../TestClusterConfiguration.java | 3 ++ .../testclusters/TestClustersPluginIT.java | 28 +++++----- docs/build.gradle | 18 ++++--- docs/reference/cluster/health.asciidoc | 2 +- docs/reference/getting-started.asciidoc | 2 +- x-pack/docs/build.gradle | 54 ++----------------- 9 files changed, 60 insertions(+), 80 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index a0ce24e45c729..805a1b213e859 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -18,6 +18,7 @@ */ package org.elasticsearch.gradle.doc +import org.elasticsearch.gradle.OS import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.test.ClusterFormationTasks @@ -32,10 +33,13 @@ public class DocsTestPlugin extends RestTestPlugin { @Override public void apply(Project project) { + project.pluginManager.apply('elasticsearch.testclusters') project.pluginManager.apply('elasticsearch.standalone-rest-test') super.apply(project) + String distribution = System.getProperty('tests.distribution', 'default') // The distribution can be configured with -Dtests.distribution on the command line - project.integTestCluster.distribution = System.getProperty('tests.distribution', 'default') + project.testClusters.integTest.distribution = distribution.toUpperCase() + project.testClusters.integTest.nameCustomization = { it.replace("integTest", "node") } // Docs are published separately so no need to assemble project.tasks.assemble.enabled = false Map defaultSubstitutions = [ @@ -46,8 +50,8 @@ public class DocsTestPlugin extends RestTestPlugin { '\\{version\\}': Version.fromString(VersionProperties.elasticsearch).toString(), '\\{version_qualified\\}': VersionProperties.elasticsearch, '\\{lucene_version\\}' : VersionProperties.lucene.replaceAll('-snapshot-\\w+$', ''), - '\\{build_flavor\\}' : project.integTestCluster.distribution, - '\\{build_type\\}' : ClusterFormationTasks.getOs().equals("windows") ? "zip" : "tar", + '\\{build_flavor\\}' : distribution, + '\\{build_type\\}' : OS.conditionalString().onWindows({"zip"}).onUnix({"tar"}).supply(), ] Task listSnippets = project.tasks.create('listSnippets', SnippetsTask) listSnippets.group 'Docs' diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java index 0cb7ee0c10fc7..e245fb0ead95a 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java @@ -42,6 +42,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; +import java.util.function.Function; import java.util.function.Predicate; import java.util.function.Supplier; import java.util.stream.Collectors; @@ -70,7 +71,7 @@ public ElasticsearchCluster(String path, String clusterName, Project project, Fi this.nodes = project.container(ElasticsearchNode.class); this.nodes.add( new ElasticsearchNode( - path, clusterName + "-1", + path, clusterName + "-0", services, artifactsExtractDir, workingDirBase ) ); @@ -91,7 +92,7 @@ public void setNumberOfNodes(int numberOfNodes) { ); } - for (int i = nodes.size() + 1 ; i <= numberOfNodes; i++) { + for (int i = nodes.size() ; i < numberOfNodes; i++) { this.nodes.add(new ElasticsearchNode( path, clusterName + "-" + i, services, artifactsExtractDir, workingDirBase )); @@ -99,7 +100,7 @@ public void setNumberOfNodes(int numberOfNodes) { } private ElasticsearchNode getFirstNode() { - return nodes.getAt(clusterName + "-1"); + return nodes.getAt(clusterName + "-0"); } public int getNumberOfNodes() { @@ -276,6 +277,11 @@ public void stop(boolean tailLogs) { nodes.forEach(each -> each.stop(tailLogs)); } + @Override + public void setNameCustomization(Function nameCustomization) { + nodes.all(each -> each.setNameCustomization(nameCustomization)); + } + @Override public boolean isProcessAlive() { return nodes.stream().noneMatch(node -> node.isProcessAlive() == false); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index 3bb1fb2ddb6e3..bba94f6c7d173 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -50,6 +50,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; import java.util.function.Predicate; import java.util.function.Supplier; import java.util.stream.Collectors; @@ -103,6 +104,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { private String version; private File javaHome; private volatile Process esProcess; + private Function nameCustomization = Function.identity(); ElasticsearchNode(String path, String name, GradleServicesAdapter services, File artifactsExtractDir, File workingDirBase) { this.path = path; @@ -123,7 +125,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { } public String getName() { - return name; + return nameCustomization.apply(name); } public String getVersion() { @@ -536,6 +538,11 @@ public synchronized void stop(boolean tailLogs) { esProcess = null; } + @Override + public void setNameCustomization(Function nameCustomizer) { + this.nameCustomization = nameCustomizer; + } + private void stopHandle(ProcessHandle processHandle, boolean forcibly) { // Stop all children first, ES could actually be a child when there's some wrapper process like on Windows. if (processHandle.isAlive() == false) { @@ -656,7 +663,7 @@ private void syncWithLinks(Path sourceRoot, Path destinationRoot) { } private void createConfiguration() { - defaultConfig.put("node.name", safeName(name)); + defaultConfig.put("node.name", nameCustomization.apply(safeName(name))); defaultConfig.put("path.repo", confPathRepo.toAbsolutePath().toString()); defaultConfig.put("path.data", confPathData.toAbsolutePath().toString()); defaultConfig.put("path.logs", confPathLogs.toAbsolutePath().toString()); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java index 628dadcbb9d37..1ccbeabd4b88a 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java @@ -29,6 +29,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import java.util.function.Predicate; import java.util.function.Supplier; @@ -85,6 +86,8 @@ public interface TestClusterConfiguration { void stop(boolean tailLogs); + void setNameCustomization(Function nameSupplier); + default void waitForConditions( LinkedHashMap> waitConditions, long startedAtMillis, diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index c9086d1459afd..39651ff896057 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -86,8 +86,8 @@ public void testUseClusterBySkippedAndWorkingTask() { assertOutputContains( result.getOutput(), "> Task :user1", - "Starting `node{::myTestCluster-1}`", - "Stopping `node{::myTestCluster-1}`" + "Starting `node{::myTestCluster-0}`", + "Stopping `node{::myTestCluster-0}`" ); } @@ -104,22 +104,22 @@ public void testMultiProject() { assertStartedAndStoppedOnce(result); assertOutputOnlyOnce( result.getOutput(), - "Starting `node{:alpha:myTestCluster-1}`", - "Stopping `node{::myTestCluster-1}`" + "Starting `node{:alpha:myTestCluster-0}`", + "Stopping `node{::myTestCluster-0}`" ); assertOutputOnlyOnce( result.getOutput(), - "Starting `node{::myTestCluster-1}`", - "Stopping `node{:bravo:myTestCluster-1}`" + "Starting `node{::myTestCluster-0}`", + "Stopping `node{:bravo:myTestCluster-0}`" ); } public void testReleased() { BuildResult result = getTestClustersRunner("testReleased").build(); assertTaskSuccessful(result, ":testReleased"); - assertStartedAndStoppedOnce(result, "releasedVersionDefault-1"); - assertStartedAndStoppedOnce(result, "releasedVersionOSS-1"); - assertStartedAndStoppedOnce(result, "releasedVersionIntegTest-1"); + assertStartedAndStoppedOnce(result, "releasedVersionDefault-0"); + assertStartedAndStoppedOnce(result, "releasedVersionOSS-0"); + assertStartedAndStoppedOnce(result, "releasedVersionIntegTest-0"); } public void testIncremental() { @@ -143,7 +143,7 @@ public void testUseClusterByFailingOne() { assertStartedAndStoppedOnce(result); assertOutputContains( result.getOutput(), - "Stopping `node{::myTestCluster-1}`, tailLogs: true", + "Stopping `node{::myTestCluster-0}`, tailLogs: true", "Execution failed for task ':itAlwaysFails'." ); } @@ -155,7 +155,7 @@ public void testUseClusterByFailingDependency() { assertStartedAndStoppedOnce(result); assertOutputContains( result.getOutput(), - "Stopping `node{::myTestCluster-1}`, tailLogs: true", + "Stopping `node{::myTestCluster-0}`, tailLogs: true", "Execution failed for task ':itAlwaysFails'." ); } @@ -165,7 +165,7 @@ public void testConfigurationLocked() { assertTaskFailed(result, ":illegalConfigAlter"); assertOutputContains( result.getOutput(), - "Configuration for node{::myTestCluster-1} can not be altered, already locked" + "Configuration for node{::myTestCluster-0} can not be altered, already locked" ); } @@ -173,9 +173,9 @@ public void testConfigurationLocked() { public void testMultiNode() { BuildResult result = getTestClustersRunner(":multiNode").build(); assertTaskSuccessful(result, ":multiNode"); + assertStartedAndStoppedOnce(result, "multiNode-0"); assertStartedAndStoppedOnce(result, "multiNode-1"); assertStartedAndStoppedOnce(result, "multiNode-2"); - assertStartedAndStoppedOnce(result, "multiNode-3"); } public void testPluginInstalled() { @@ -211,7 +211,7 @@ private void assertStartedAndStoppedOnce(BuildResult result, String nodeName) { } private void assertStartedAndStoppedOnce(BuildResult result) { - assertStartedAndStoppedOnce(result, "myTestCluster-1"); + assertStartedAndStoppedOnce(result, "myTestCluster-0"); } diff --git a/docs/build.gradle b/docs/build.gradle index 8156d1d54b57a..feda444301ec7 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -1,3 +1,5 @@ +import static org.elasticsearch.gradle.Distribution.DEFAULT + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -35,15 +37,15 @@ buildRestTests.expectedUnconvertedCandidates = [ 'reference/ml/apis/update-snapshot.asciidoc', ] -integTestCluster { - if ("default".equals(integTestCluster.distribution)) { +testClusters.integTest { + if (singleNode().distribution == DEFAULT) { setting 'xpack.license.self_generated.type', 'trial' } // enable regexes in painless so our tests don't complain about example snippets that use them setting 'script.painless.regex.enabled', 'true' Closure configFile = { - extraConfigFile it, "src/test/cluster/config/$it" + extraConfigFile it, file("src/test/cluster/config/$it") } configFile 'analysis/example_word_list.txt' configFile 'analysis/hyphenation_patterns.xml' @@ -52,8 +54,8 @@ integTestCluster { configFile 'userdict_ja.txt' configFile 'userdict_ko.txt' configFile 'KeywordTokenizer.rbbi' - extraConfigFile 'hunspell/en_US/en_US.aff', '../server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.aff' - extraConfigFile 'hunspell/en_US/en_US.dic', '../server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic' + extraConfigFile 'hunspell/en_US/en_US.aff', project(":server").file('src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.aff') + extraConfigFile 'hunspell/en_US/en_US.dic', project(":server").file('src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic') // Whitelist reindexing from the local node so we can test it. setting 'reindex.remote.whitelist', '127.0.0.1:*' } @@ -65,10 +67,12 @@ project.rootProject.subprojects.findAll { it.parent.path == ':plugins' }.each { if (subproj.path.startsWith(':plugins:repository-')) { return } + // FIXME subproj.afterEvaluate { // need to wait until the project has been configured - integTestCluster { - plugin subproj.path + testClusters.integTest { + plugin file(subproj.bundlePlugin.archiveFile) } + tasks.integTest.dependsOn subproj.bundlePlugin } } diff --git a/docs/reference/cluster/health.asciidoc b/docs/reference/cluster/health.asciidoc index 1e33455d02613..d75ce77d1af80 100644 --- a/docs/reference/cluster/health.asciidoc +++ b/docs/reference/cluster/health.asciidoc @@ -34,7 +34,7 @@ Returns this: "active_shards_percent_as_number": 50.0 } -------------------------------------------------- -// TESTRESPONSE[s/testcluster/docs_integTestCluster/] +// TESTRESPONSE[s/testcluster/integTest/] // TESTRESPONSE[s/"number_of_pending_tasks" : 0,/"number_of_pending_tasks" : $body.number_of_pending_tasks,/] // TESTRESPONSE[s/"task_max_waiting_in_queue_millis": 0/"task_max_waiting_in_queue_millis": $body.task_max_waiting_in_queue_millis/] diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index b81d2b284371d..7df9bdfe7aa6c 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -301,7 +301,7 @@ And the response: epoch timestamp cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent 1475247709 17:01:49 elasticsearch green 1 1 0 0 0 0 0 0 - 100.0% -------------------------------------------------- -// TESTRESPONSE[s/1475247709 17:01:49 elasticsearch/\\d+ \\d+:\\d+:\\d+ docs_integTestCluster/] +// TESTRESPONSE[s/1475247709 17:01:49 elasticsearch/\\d+ \\d+:\\d+:\\d+ integTest/] // TESTRESPONSE[s/0 0 -/0 \\d+ -/] // TESTRESPONSE[_cat] diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 0075b4989e69f..0a23bb9c9cf62 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -27,54 +27,14 @@ dependencies { testCompile project(path: xpackProject('plugin').path, configuration: 'testArtifacts') } -Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> - File tmpFile = new File(node.cwd, 'wait.success') - // wait up to twenty seconds - final long stopTime = System.currentTimeMillis() + 20000L; - Exception lastException = null; - while (System.currentTimeMillis() < stopTime) { - lastException = null; - // we use custom wait logic here as the elastic user is not available immediately and ant.get will fail when a 401 is returned - HttpURLConnection httpURLConnection = null; - try { - httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health").openConnection(); - httpURLConnection.setRequestProperty("Authorization", "Basic " + - Base64.getEncoder().encodeToString("test_admin:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); - httpURLConnection.setRequestMethod("GET"); - httpURLConnection.setConnectTimeout(1000); - httpURLConnection.setReadTimeout(30000); - httpURLConnection.connect(); - if (httpURLConnection.getResponseCode() == 200) { - tmpFile.withWriter StandardCharsets.UTF_8.name(), { - it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name())) - } - break; - } - } catch (Exception e) { - logger.debug("failed to call cluster health", e) - lastException = e - } finally { - if (httpURLConnection != null) { - httpURLConnection.disconnect(); - } - } - - // did not start, so wait a bit before trying again - Thread.sleep(500L); - } - if (tmpFile.exists() == false && lastException != null) { - logger.error("final attempt of calling cluster health failed", lastException) - } - return tmpFile.exists() -} - // copy xpack rest api File xpackResources = new File(xpackProject('plugin').projectDir, 'src/test/resources') project.copyRestSpec.from(xpackResources) { include 'rest-api-spec/api/**' } -File jwks = new File(xpackProject('test:idp-fixture').projectDir, 'oidc/op-jwks.json') -integTestCluster { + +testClusters.integTest { + extraConfigFile 'op-jwks.json', xpackProject('test:idp-fixture').file("oidc/op-jwks.json") setting 'xpack.security.enabled', 'true' setting 'xpack.security.authc.api_key.enabled', 'true' setting 'xpack.security.authc.token.enabled', 'true' @@ -91,17 +51,13 @@ integTestCluster { setting 'xpack.security.authc.realms.oidc.oidc1.op.jwkset_path', 'op-jwks.json' setting 'xpack.security.authc.realms.oidc.oidc1.rp.redirect_uri', 'https://my.fantastic.rp/cb' setting 'xpack.security.authc.realms.oidc.oidc1.rp.client_id', 'elasticsearch-rp' - keystoreSetting 'xpack.security.authc.realms.oidc.oidc1.rp.client_secret', 'b07efb7a1cf6ec9462afe7b6d3ab55c6c7880262aa61ac28dded292aca47c9a2' + keystore 'xpack.security.authc.realms.oidc.oidc1.rp.client_secret', 'b07efb7a1cf6ec9462afe7b6d3ab55c6c7880262aa61ac28dded292aca47c9a2' setting 'xpack.security.authc.realms.oidc.oidc1.rp.response_type', 'id_token' setting 'xpack.security.authc.realms.oidc.oidc1.claims.principal', 'sub' - setupCommand 'setupTestAdmin', - 'bin/elasticsearch-users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser' - waitCondition = waitWithAuth - extraConfigFile 'op-jwks.json', jwks + user username: 'test_admin' } - buildRestTests.docs = fileTree(projectDir) { // No snippets in here! exclude 'build.gradle' From 385dfd95d6d149b9d9ca117768fe48a1dec1f7b6 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Wed, 22 May 2019 08:10:10 -0400 Subject: [PATCH 037/224] Update version skips and constants after backport (#42290) After https://github.com/elastic/elasticsearch/pull/41906 was backported, we need to update the various test skips and version constants --- .../test/search.aggregation/230_composite.yml | 4 ++-- .../test/search.aggregation/250_moving_fn.yml | 4 ++-- .../test/search.aggregation/80_typed_keys.yml | 2 +- .../rest-api-spec/test/search/240_date_nanos.yml | 2 +- .../bucket/histogram/DateIntervalWrapper.java | 4 ++-- .../xpack/restart/FullClusterRestartIT.java | 2 +- .../upgrades/RollupDateHistoUpgradeIT.java | 2 +- .../test/mixed_cluster/40_ml_datafeed_crud.yml | 6 +----- .../test/old_cluster/40_ml_datafeed_crud.yml | 13 ++++--------- .../test/upgraded_cluster/40_ml_datafeed_crud.yml | 4 ---- 10 files changed, 15 insertions(+), 28 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml index 4003d29abb5bf..fc0710fdb5375 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -241,7 +241,7 @@ setup: --- "Composite aggregation with format": - skip: - version: " - 7.99.99" #TODO change this after backport + version: " - 7.1.99" reason: calendar_interval introduced in 7.2.0 features: warnings @@ -307,7 +307,7 @@ setup: --- "Composite aggregation with format and calendar_interval": - skip: - version: " - 7.99.99" #TODO change this after backport + version: " - 7.1.99" reason: calendar_interval introduced in 7.2.0 - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml index a4517d46d2c62..cd24da7bd616b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml @@ -2,7 +2,7 @@ "Bad window": - skip: - version: " - 7.99.0" #TODO change this after backport + version: " - 7.1.99" reason: "calendar_interval added in 7.2" - do: @@ -30,7 +30,7 @@ "Bad window deprecated interval": - skip: - version: " - 7.99.0" #TODO change this after backport + version: " - 7.1.99" reason: "interval deprecation added in 7.2" features: "warnings" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml index 023c08f3b2d50..d041432556430 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml @@ -206,7 +206,7 @@ setup: --- "Test typed keys parameter for date_histogram aggregation and max_bucket pipeline aggregation": - skip: - version: " - 7.99.0" #TODO change this after backport + version: " - 7.1.99" reason: "calendar_interval added in 7.2" - do: search: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml index 352d5edf6b374..2caf9c7084792 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml @@ -123,7 +123,7 @@ setup: --- "date histogram aggregation with date and date_nanos mapping": - skip: - version: " - 7.99.99" #TODO change this after backport + version: " - 7.1.99" reason: calendar_interval introduced in 7.2.0 - do: diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java index b08782f1fd37a..229fa0d15bb30 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java @@ -113,7 +113,7 @@ public static void declareIntervalFields(Object public DateIntervalWrapper() {} public DateIntervalWrapper(StreamInput in) throws IOException { - if (in.getVersion().before(Version.V_8_0_0)) { // TODO change this after backport + if (in.getVersion().before(Version.V_7_2_0)) { long interval = in.readLong(); DateHistogramInterval histoInterval = in.readOptionalWriteable(DateHistogramInterval::new); @@ -374,7 +374,7 @@ public boolean isEmpty() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(Version.V_8_0_0)) { // TODO change this after backport + if (out.getVersion().before(Version.V_7_2_0)) { if (intervalType.equals(IntervalTypeEnum.LEGACY_INTERVAL)) { out.writeLong(TimeValue.parseTimeValue(dateHistogramInterval.toString(), DateHistogramAggregationBuilder.NAME + ".innerWriteTo").getMillis()); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index f17aab309ba72..a62a23dac70b8 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -229,7 +229,7 @@ public void testRollupAfterRestart() throws Exception { final Request createRollupJobRequest = new Request("PUT", getRollupEndpoint() + "/job/rollup-job-test"); String intervalType; - if (getOldClusterVersion().onOrAfter(Version.V_8_0_0)) { // TODO change this after backport + if (getOldClusterVersion().onOrAfter(Version.V_7_2_0)) { intervalType = "fixed_interval"; } else { intervalType = "interval"; diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java index 035e29ccf771c..08ad9f09d599c 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java @@ -34,7 +34,7 @@ public class RollupDateHistoUpgradeIT extends AbstractUpgradeTestCase { Version.fromString(System.getProperty("tests.upgrade_from_version")); public void testDateHistoIntervalUpgrade() throws Exception { - assumeTrue("DateHisto interval changed in 7.1", UPGRADE_FROM_VERSION.before(Version.V_7_2_0)); + assumeTrue("DateHisto interval changed in 7.2", UPGRADE_FROM_VERSION.before(Version.V_7_2_0)); switch (CLUSTER_TYPE) { case OLD: break; diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml index 4d732015d47f4..2ff9b08e9b13f 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml @@ -1,8 +1,3 @@ -setup: - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258" - --- "Test old cluster datafeed without aggs": - do: @@ -114,6 +109,7 @@ setup: - do: warnings: - '[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.' + - '[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.' ml.put_datafeed: datafeed_id: mixed-cluster-datafeed-with-aggs body: > diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml index 2a7b56adb9a16..4918dde9ba899 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml @@ -1,8 +1,3 @@ -setup: - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258" - --- "Put job and datafeed without aggs in old cluster": @@ -53,8 +48,8 @@ setup: --- "Put job and datafeed with aggs in old cluster - pre-deprecated interval": - skip: - version: "all" #TODO change this after backport - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258; calendar_interval introduced in 7.2.0" + version: "7.1.99 - " + reason: "calendar_interval introduced in 7.2.0" - do: ml.put_job: @@ -123,8 +118,8 @@ setup: --- "Put job and datafeed with aggs in old cluster - deprecated interval with warning": - skip: - version: " - 7.99.99" #TODO change this after backport - reason: calendar_interval introduced in 7.1.0 + version: " - 7.1.99" + reason: calendar_interval introduced in 7.2.0 features: warnings - do: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml index 4b742e10de61f..5dc71ecb0679e 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml @@ -1,8 +1,4 @@ setup: - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258" - - do: cluster.health: wait_for_status: green From 1e9221da84ad0da66de87eb82c95c1255a81a530 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 22 May 2019 14:12:25 +0200 Subject: [PATCH 038/224] Remove Obsolete BwC Logic from BlobStoreRepository (#42193) * Remove Obsolete BwC Logic from BlobStoreRepository * We can't restore 1.3.3 files anyway -> no point in doing the dance of computing a hash here * Some other minor+obvious cleanups --- .../blobstore/BlobStoreRepository.java | 43 +------------------ .../blobstore/FileRestoreContext.java | 40 +---------------- 2 files changed, 3 insertions(+), 80 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 86409ebac7d31..49b551b26b796 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -26,8 +26,6 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.RateLimiter; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; @@ -954,8 +952,6 @@ protected void finalize(final List snapshots, final Map blobs, final String reason) { final String indexGeneration = Integer.toString(fileListGeneration); - final String currentIndexGen = indexShardSnapshotsFormat.blobName(indexGeneration); - final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(snapshots); try { // Delete temporary index files first, as we might otherwise fail in the next step creating the new index file if an earlier @@ -998,7 +994,8 @@ protected void finalize(final List snapshots, snapshotId, shardId), e); } } catch (IOException e) { - String message = "Failed to finalize " + reason + " with shard index [" + currentIndexGen + "]"; + String message = + "Failed to finalize " + reason + " with shard index [" + indexShardSnapshotsFormat.blobName(indexGeneration) + "]"; throw new IndexShardSnapshotFailedException(shardId, message, e); } } @@ -1135,16 +1132,6 @@ public void snapshot(final IndexCommit snapshotIndexCommit) { List filesInfo = snapshots.findPhysicalIndexFiles(fileName); if (filesInfo != null) { for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesInfo) { - try { - // in 1.3.3 we added additional hashes for .si / segments_N files - // to ensure we don't double the space in the repo since old snapshots - // don't have this hash we try to read that hash from the blob store - // in a bwc compatible way. - maybeRecalculateMetadataHash(blobContainer, fileInfo, metadata); - } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("{} Can't calculate hash from blob for file [{}] [{}]", - shardId, fileInfo.physicalName(), fileInfo.metadata()), e); - } if (fileInfo.isSame(md) && snapshotFileExistsInBlobs(fileInfo, blobs)) { // a commit point file with the same name, size and checksum was already copied to repository // we will reuse it for this snapshot @@ -1315,32 +1302,6 @@ private void checkAborted() { } } - /** - * This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them. - * The new logic for StoreFileMetaData reads the entire {@code .si} and {@code segments.n} files to strengthen the - * comparison of the files on a per-segment / per-commit level. - */ - private static void maybeRecalculateMetadataHash(final BlobContainer blobContainer, final BlobStoreIndexShardSnapshot.FileInfo fileInfo, - Store.MetadataSnapshot snapshot) throws Exception { - final StoreFileMetaData metadata; - if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) { - if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) { - // we have a hash - check if our repo has a hash too otherwise we have - // to calculate it. - // we might have multiple parts even though the file is small... make sure we read all of it. - try (InputStream stream = new PartSliceStream(blobContainer, fileInfo)) { - BytesRefBuilder builder = new BytesRefBuilder(); - Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length()); - BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash - assert hash.length == 0; - hash.bytes = builder.bytes(); - hash.offset = 0; - hash.length = builder.length(); - } - } - } - } - private static final class PartSliceStream extends SlicedInputStream { private final BlobContainer container; diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java index f78ddab9ee44c..3abe4d7b50722 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java @@ -27,8 +27,6 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.index.shard.ShardId; @@ -127,17 +125,6 @@ public void restore(SnapshotFiles snapshotFiles, Store store) throws IOException final Map snapshotMetaData = new HashMap<>(); final Map fileInfos = new HashMap<>(); for (final BlobStoreIndexShardSnapshot.FileInfo fileInfo : snapshotFiles.indexFiles()) { - try { - // in 1.3.3 we added additional hashes for .si / segments_N files - // to ensure we don't double the space in the repo since old snapshots - // don't have this hash we try to read that hash from the blob store - // in a bwc compatible way. - maybeRecalculateMetadataHash(fileInfo, recoveryTargetMetadata); - } catch (Exception e) { - // if the index is broken we might not be able to read it - logger.warn(new ParameterizedMessage("[{}] Can't calculate hash from blog for file [{}] [{}]", shardId, - fileInfo.physicalName(), fileInfo.metadata()), e); - } snapshotMetaData.put(fileInfo.metadata().name(), fileInfo.metadata()); fileInfos.put(fileInfo.metadata().name(), fileInfo); } @@ -237,7 +224,7 @@ protected void restoreFiles(List filesToRe protected abstract InputStream fileInputStream(BlobStoreIndexShardSnapshot.FileInfo fileInfo); @SuppressWarnings("unchecked") - private Iterable concat(Store.RecoveryDiff diff) { + private static Iterable concat(Store.RecoveryDiff diff) { return Iterables.concat(diff.different, diff.missing); } @@ -276,29 +263,4 @@ private void restoreFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, fi } } - /** - * This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them. - * The new logic for StoreFileMetaData reads the entire {@code .si} and {@code segments.n} files to strengthen the - * comparison of the files on a per-segment / per-commit level. - */ - private void maybeRecalculateMetadataHash(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, Store.MetadataSnapshot snapshot) - throws IOException { - final StoreFileMetaData metadata; - if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) { - if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) { - // we have a hash - check if our repo has a hash too otherwise we have - // to calculate it. - // we might have multiple parts even though the file is small... make sure we read all of it. - try (InputStream stream = fileInputStream(fileInfo)) { - BytesRefBuilder builder = new BytesRefBuilder(); - Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length()); - BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash - assert hash.length == 0; - hash.bytes = builder.bytes(); - hash.offset = 0; - hash.length = builder.length(); - } - } - } - } } From 05809deb490f71a90a4164c302955c5c2ab6d8ac Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 22 May 2019 14:21:48 +0200 Subject: [PATCH 039/224] Revert "Mute SpecificMasterNodesIT.testElectOnlyBetweenMasterNodes()" This reverts commit 2964ceaa0371d8bd1665e599c6395a7e7026d094. --- .../java/org/elasticsearch/cluster/SpecificMasterNodesIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index f80a5befa83d9..38b9579eff046 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -86,7 +86,6 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/38331") public void testElectOnlyBetweenMasterNodes() throws Exception { internalCluster().setBootstrapMasterNodeIndex(0); logger.info("--> start data node / non master node"); From d292d95eaada378c216ce00d11e68db79954d359 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 22 May 2019 14:36:17 +0200 Subject: [PATCH 040/224] Fix testCannotJoinIfMasterLostDataFolder Relates to #41047 --- .../discovery/ClusterDisruptionIT.java | 23 +++++++++++++++---- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 3a257ec5973f8..ad3b8006ed0c3 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.coordination.ClusterBootstrapService; +import org.elasticsearch.cluster.coordination.LagDetector; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.cluster.routing.ShardRouting; @@ -389,7 +390,6 @@ public void onFailure(Exception e) { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41047") public void testCannotJoinIfMasterLostDataFolder() throws Exception { String masterNode = internalCluster().startMasterOnlyNode(); String dataNode = internalCluster().startDataOnlyNode(); @@ -402,7 +402,18 @@ public boolean clearData(String nodeName) { @Override public Settings onNodeStopped(String nodeName) { - return Settings.builder().put(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), nodeName).build(); + return Settings.builder() + .put(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), nodeName) + /* + * the data node might join while the master is still not fully established as master just yet and bypasses the join + * validation that is done before adding the node to the cluster. Only the join validation when handling the publish + * request takes place, but at this point the cluster state has been successfully committed, and will subsequently be + * exposed to the applier. The health check below therefore sees the cluster state with the 2 nodes and thinks all is + * good, even though the data node never accepted this state. What's worse is that it takes 90 seconds for the data + * node to be kicked out of the cluster (lag detection). We speed this up here. + */ + .put(LagDetector.CLUSTER_FOLLOWER_LAG_TIMEOUT_SETTING.getKey(), "10s") + .build(); } @Override @@ -411,9 +422,11 @@ public boolean validateClusterForming() { } }); - assertFalse(internalCluster().client(masterNode).admin().cluster().prepareHealth().get().isTimedOut()); - assertTrue(internalCluster().client(masterNode).admin().cluster().prepareHealth().setWaitForNodes("2").setTimeout("2s").get() - .isTimedOut()); + assertBusy(() -> { + assertFalse(internalCluster().client(masterNode).admin().cluster().prepareHealth().get().isTimedOut()); + assertTrue(internalCluster().client(masterNode).admin().cluster().prepareHealth().setWaitForNodes("2").setTimeout("2s").get() + .isTimedOut()); + }, 30, TimeUnit.SECONDS); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataNode)); // otherwise we will fail during clean-up } From 40beecd1e04b81dfbc398e22132bf9411f54c6d5 Mon Sep 17 00:00:00 2001 From: markharwood Date: Wed, 22 May 2019 13:37:47 +0100 Subject: [PATCH 041/224] Search - enable low_level_cancellation by default. (#42291) Benchmarking on worst-case queries (max agg on match_all or popular-term query with large index) was not noticeably slower. Closes #26258 --- .../main/java/org/elasticsearch/search/SearchService.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index b703493b4d505..daf8e1faf7bb8 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -134,11 +134,11 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv /** * Enables low-level, frequent search cancellation checks. Enabling low-level checks will make long running searches to react - * to the cancellation request faster. However, since it will produce more cancellation checks it might slow the search performance - * down. + * to the cancellation request faster. It will produce more cancellation checks but benchmarking has shown these did not + * noticeably slow down searches. */ public static final Setting LOW_LEVEL_CANCELLATION_SETTING = - Setting.boolSetting("search.low_level_cancellation", false, Property.Dynamic, Property.NodeScope); + Setting.boolSetting("search.low_level_cancellation", true, Property.Dynamic, Property.NodeScope); public static final TimeValue NO_TIMEOUT = timeValueMillis(-1); public static final Setting DEFAULT_SEARCH_TIMEOUT_SETTING = From b03d7b20928b481ee09418bcc39f1536b40493b0 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 22 May 2019 14:45:26 +0200 Subject: [PATCH 042/224] Remove testNodeFailuresAreProcessedOnce This test was not checking the thing it was supposed to anyway. --- .../cluster/coordination/ZenDiscoveryIT.java | 36 ------------------- 1 file changed, 36 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java index 9a17c25f44cce..feffbfc792656 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java @@ -41,18 +41,14 @@ import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.transport.RemoteTransportException; -import java.io.IOException; import java.util.EnumSet; import java.util.Optional; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicInteger; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; @@ -97,38 +93,6 @@ public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Excep assertThat(numRecoveriesAfterNewMaster, equalTo(numRecoveriesBeforeNewMaster)); } - public void testNodeFailuresAreProcessedOnce() throws IOException { - Settings masterNodeSettings = Settings.builder() - .put(Node.NODE_DATA_SETTING.getKey(), false) - .build(); - String master = internalCluster().startNode(masterNodeSettings); - Settings dateNodeSettings = Settings.builder() - .put(Node.NODE_MASTER_SETTING.getKey(), false) - .build(); - internalCluster().startNodes(2, dateNodeSettings); - client().admin().cluster().prepareHealth().setWaitForNodes("3").get(); - - ClusterService clusterService = internalCluster().getInstance(ClusterService.class, master); - final AtomicInteger numUpdates = new AtomicInteger(); - final CountDownLatch nodesStopped = new CountDownLatch(1); - clusterService.addStateApplier(event -> { - numUpdates.incrementAndGet(); - try { - // block until both nodes have stopped to accumulate node failures - nodesStopped.await(); - } catch (InterruptedException e) { - //meh - } - }); - - internalCluster().stopRandomNonMasterNode(); - internalCluster().stopRandomNonMasterNode(); - nodesStopped.countDown(); - - client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); // wait for all to be processed - assertThat(numUpdates.get(), either(equalTo(1)).or(equalTo(2))); // due to batching, both nodes can be handled in same CS update - } - public void testHandleNodeJoin_incompatibleClusterState() throws InterruptedException, ExecutionException, TimeoutException { String masterNode = internalCluster().startMasterOnlyNode(); From 94848d8a8c27d2f0af4e3da7aa155fce1896d562 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 22 May 2019 15:31:29 +0200 Subject: [PATCH 043/224] Dump Stacktrace on Slow IO-Thread Operations (#42000) * Dump Stacktrace on Slow IO-Thread Operations * Follow up to #39729 extending the functionality to actually dump the stack when the thread is blocked not afterwards * Logging the stacktrace after the thread became unblocked is only of limited use because we don't know what happened in the slow callback from that (only whether we were blocked on a read,write,connect etc.) * Relates #41745 --- .../transport/nio/MockNioTransport.java | 71 ++++++++++- .../transport/nio/TestEventHandler.java | 114 ++++++++++-------- .../transport/nio/TestEventHandlerTests.java | 13 +- 3 files changed, 143 insertions(+), 55 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index dc0e14a4d2984..42dae39146605 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.nio.BytesChannelContext; @@ -57,11 +58,16 @@ import java.nio.ByteBuffer; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; +import java.util.Arrays; import java.util.HashSet; +import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.function.IntFunction; +import java.util.stream.Collectors; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; @@ -70,6 +76,7 @@ public class MockNioTransport extends TcpTransport { private static final Logger logger = LogManager.getLogger(MockNioTransport.class); private final ConcurrentMap profileToChannelFactory = newConcurrentMap(); + private final TransportThreadWatchdog transportThreadWatchdog; private volatile NioSelectorGroup nioGroup; private volatile MockTcpChannelFactory clientChannelFactory; @@ -77,6 +84,7 @@ public MockNioTransport(Settings settings, Version version, ThreadPool threadPoo PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); + this.transportThreadWatchdog = new TransportThreadWatchdog(threadPool); } @Override @@ -96,7 +104,7 @@ protected void doStart() { boolean success = false; try { nioGroup = new NioSelectorGroup(daemonThreadFactory(this.settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX), 2, - (s) -> new TestEventHandler(this::onNonChannelException, s, System::nanoTime)); + (s) -> new TestEventHandler(this::onNonChannelException, s, transportThreadWatchdog)); ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default"); clientChannelFactory = new MockTcpChannelFactory(true, clientProfileSettings, "client"); @@ -125,6 +133,7 @@ protected void doStart() { @Override protected void stopInternal() { try { + transportThreadWatchdog.stop(); nioGroup.close(); } catch (Exception e) { logger.warn("unexpected exception while stopping nio group", e); @@ -311,4 +320,64 @@ public void sendMessage(BytesReference reference, ActionListener listener) getContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener)); } } + + static final class TransportThreadWatchdog { + + private static final long WARN_THRESHOLD = TimeUnit.MILLISECONDS.toNanos(150); + + // Only check every 2s to not flood the logs on a blocked thread. + // We mostly care about long blocks and not random slowness anyway and in tests would randomly catch slow operations that block for + // less than 2s eventually. + private static final TimeValue CHECK_INTERVAL = TimeValue.timeValueSeconds(2); + + private final ThreadPool threadPool; + private final ConcurrentHashMap registry = new ConcurrentHashMap<>(); + + private volatile boolean stopped; + + TransportThreadWatchdog(ThreadPool threadPool) { + this.threadPool = threadPool; + threadPool.schedule(this::logLongRunningExecutions, CHECK_INTERVAL, ThreadPool.Names.GENERIC); + } + + public boolean register() { + Long previousValue = registry.put(Thread.currentThread(), threadPool.relativeTimeInNanos()); + return previousValue == null; + } + + public void unregister() { + Long previousValue = registry.remove(Thread.currentThread()); + assert previousValue != null; + maybeLogElapsedTime(previousValue); + } + + private void maybeLogElapsedTime(long startTime) { + long elapsedTime = threadPool.relativeTimeInNanos() - startTime; + if (elapsedTime > WARN_THRESHOLD) { + logger.warn( + new ParameterizedMessage("Slow execution on network thread [{} milliseconds]", + TimeUnit.NANOSECONDS.toMillis(elapsedTime)), + new RuntimeException("Slow exception on network thread")); + } + } + + private void logLongRunningExecutions() { + for (Map.Entry entry : registry.entrySet()) { + final long elapsedTime = threadPool.relativeTimeInMillis() - entry.getValue(); + if (elapsedTime > WARN_THRESHOLD) { + final Thread thread = entry.getKey(); + logger.warn("Slow execution on network thread [{}] [{} milliseconds]: \n{}", thread.getName(), + TimeUnit.NANOSECONDS.toMillis(elapsedTime), + Arrays.stream(thread.getStackTrace()).map(Object::toString).collect(Collectors.joining("\n"))); + } + } + if (stopped == false) { + threadPool.schedule(this::logLongRunningExecutions, CHECK_INTERVAL, ThreadPool.Names.GENERIC); + } + } + + public void stop() { + stopped = true; + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java index a70ecb0c59efa..069e19c34558c 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java @@ -19,9 +19,6 @@ package org.elasticsearch.transport.nio; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.nio.ChannelContext; import org.elasticsearch.nio.EventHandler; import org.elasticsearch.nio.NioSelector; @@ -32,185 +29,202 @@ import java.util.Collections; import java.util.Set; import java.util.WeakHashMap; -import java.util.concurrent.TimeUnit; import java.util.function.Consumer; -import java.util.function.LongSupplier; import java.util.function.Supplier; public class TestEventHandler extends EventHandler { - private static final Logger logger = LogManager.getLogger(TestEventHandler.class); - private final Set hasConnectedMap = Collections.newSetFromMap(new WeakHashMap<>()); private final Set hasConnectExceptionMap = Collections.newSetFromMap(new WeakHashMap<>()); - private final LongSupplier relativeNanosSupplier; + private final MockNioTransport.TransportThreadWatchdog transportThreadWatchdog; - TestEventHandler(Consumer exceptionHandler, Supplier selectorSupplier, LongSupplier relativeNanosSupplier) { + TestEventHandler(Consumer exceptionHandler, Supplier selectorSupplier, + MockNioTransport.TransportThreadWatchdog transportThreadWatchdog) { super(exceptionHandler, selectorSupplier); - this.relativeNanosSupplier = relativeNanosSupplier; + this.transportThreadWatchdog = transportThreadWatchdog; } @Override protected void acceptChannel(ServerChannelContext context) throws IOException { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.acceptChannel(context); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void acceptException(ServerChannelContext context, Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.acceptException(context, exception); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void handleRegistration(ChannelContext context) throws IOException { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.handleRegistration(context); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void registrationException(ChannelContext context, Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.registrationException(context, exception); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } public void handleConnect(SocketChannelContext context) throws IOException { assert hasConnectedMap.contains(context) == false : "handleConnect should only be called is a channel is not yet connected"; - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.handleConnect(context); if (context.isConnectComplete()) { hasConnectedMap.add(context); } } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } public void connectException(SocketChannelContext context, Exception e) { assert hasConnectExceptionMap.contains(context) == false : "connectException should only called at maximum once per channel"; + final boolean registered = transportThreadWatchdog.register(); hasConnectExceptionMap.add(context); - long startTime = relativeNanosSupplier.getAsLong(); try { super.connectException(context, e); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void handleRead(SocketChannelContext context) throws IOException { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.handleRead(context); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void readException(SocketChannelContext context, Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.readException(context, exception); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void handleWrite(SocketChannelContext context) throws IOException { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.handleWrite(context); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void writeException(SocketChannelContext context, Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.writeException(context, exception); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void handleTask(Runnable task) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.handleTask(task); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void taskException(Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.taskException(exception); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void handleClose(ChannelContext context) throws IOException { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.handleClose(context); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void closeException(ChannelContext context, Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.closeException(context, exception); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void genericChannelException(ChannelContext context, Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.genericChannelException(context, exception); } finally { - maybeLogElapsedTime(startTime); - } - } - - private static final long WARN_THRESHOLD = 150; - - private void maybeLogElapsedTime(long startTime) { - long elapsedTime = TimeUnit.NANOSECONDS.toMillis(relativeNanosSupplier.getAsLong() - startTime); - if (elapsedTime > WARN_THRESHOLD) { - logger.warn(new ParameterizedMessage("Slow execution on network thread [{} milliseconds]", elapsedTime), - new RuntimeException("Slow exception on network thread")); + if (registered) { + transportThreadWatchdog.unregister(); + } } } } diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java index 2a570eb59b6f6..424d4922f024e 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.threadpool.ThreadPool; import java.util.HashMap; import java.util.Map; @@ -34,6 +35,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.LongSupplier; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; public class TestEventHandlerTests extends ESTestCase { @@ -43,12 +45,12 @@ public class TestEventHandlerTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); appender = new MockLogAppender(); - Loggers.addAppender(LogManager.getLogger(TestEventHandler.class), appender); + Loggers.addAppender(LogManager.getLogger(MockNioTransport.class), appender); appender.start(); } public void tearDown() throws Exception { - Loggers.removeAppender(LogManager.getLogger(TestEventHandler.class), appender); + Loggers.removeAppender(LogManager.getLogger(MockNioTransport.class), appender); appender.stop(); super.tearDown(); } @@ -65,7 +67,10 @@ public void testLogOnElapsedTime() throws Exception { } throw new IllegalStateException("Cannot update isStart"); }; - TestEventHandler eventHandler = new TestEventHandler((e) -> {}, () -> null, timeSupplier); + final ThreadPool threadPool = mock(ThreadPool.class); + doAnswer(i -> timeSupplier.getAsLong()).when(threadPool).relativeTimeInNanos(); + TestEventHandler eventHandler = + new TestEventHandler((e) -> {}, () -> null, new MockNioTransport.TransportThreadWatchdog(threadPool)); ServerChannelContext serverChannelContext = mock(ServerChannelContext.class); SocketChannelContext socketChannelContext = mock(SocketChannelContext.class); @@ -91,7 +96,7 @@ public void testLogOnElapsedTime() throws Exception { for (Map.Entry> entry : tests.entrySet()) { String message = "*Slow execution on network thread*"; MockLogAppender.LoggingExpectation slowExpectation = - new MockLogAppender.SeenEventExpectation(entry.getKey(), TestEventHandler.class.getCanonicalName(), Level.WARN, message); + new MockLogAppender.SeenEventExpectation(entry.getKey(), MockNioTransport.class.getCanonicalName(), Level.WARN, message); appender.addExpectation(slowExpectation); entry.getValue().run(); appender.assertAllExpectationsMatched(); From 4a9438762a562d20e938d2ea82538805f33e85b1 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 22 May 2019 14:56:14 +0100 Subject: [PATCH 044/224] Mute Data Frame integration tests Relates to https://github.com/elastic/elasticsearch/issues/42344 --- .../xpack/dataframe/integration/DataFrameTransformIT.java | 1 + .../xpack/dataframe/integration/DataFrameAuditorIT.java | 2 ++ .../dataframe/integration/DataFrameConfigurationIndexIT.java | 2 ++ .../xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java | 2 ++ .../xpack/dataframe/integration/DataFrameMetaDataIT.java | 2 ++ .../xpack/dataframe/integration/DataFramePivotRestIT.java | 2 ++ .../xpack/dataframe/integration/DataFrameTaskFailedStateIT.java | 2 ++ .../dataframe/integration/DataFrameTransformProgressIT.java | 2 ++ .../xpack/dataframe/integration/DataFrameUsageIT.java | 2 ++ 9 files changed, 17 insertions(+) diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java index ecb2025c6a9c5..cc2e8c4436e06 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java @@ -28,6 +28,7 @@ public void cleanTransforms() { cleanUp(); } + @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public void testDataFrameTransformCrud() throws Exception { createReviewsIndex(); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java index 9884c9bb6793b..7dc79c1ae8fbe 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; import org.junit.Before; @@ -22,6 +23,7 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.is; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameAuditorIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_admin_plus_data"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java index 681599331c8af..d7e12cf2bee4d 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java @@ -8,6 +8,7 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -22,6 +23,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameConfigurationIndexIT extends DataFrameRestTestCase { /** diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java index d9927cd09ed8f..9bac6ca0b4049 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.xpack.core.dataframe.DataFrameField; @@ -21,6 +22,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_user"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java index 26a957ea055c2..5b95d1daead53 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -15,6 +16,7 @@ import java.io.IOException; import java.util.Map; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameMetaDataIT extends DataFrameRestTestCase { private boolean indicesCreated = false; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index 770eaec7bd141..dab7e819881d2 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.junit.Before; @@ -21,6 +22,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFramePivotRestIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_admin_plus_data"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java index 96aeeda8755f4..7b63644dd34ad 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestStatus; @@ -19,6 +20,7 @@ import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase { public void testDummy() { diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java index 194d35e8ba636..7d0fb179a2228 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; @@ -45,6 +46,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameTransformProgressIT extends ESIntegTestCase { protected void createReviewsIndex() throws Exception { diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java index 4f209c5a9f3f4..f98fa6a271365 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -22,6 +23,7 @@ import static org.elasticsearch.xpack.core.dataframe.DataFrameField.INDEX_DOC_TYPE; import static org.elasticsearch.xpack.dataframe.DataFrameFeatureSet.PROVIDED_STATS; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameUsageIT extends DataFrameRestTestCase { private boolean indicesCreated = false; From a568c3c5dac681fc93cff6c64204c3d00b3c1bb1 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 22 May 2019 15:35:08 +0100 Subject: [PATCH 045/224] [ML Data Frame] Persist data frame after state changes (#42347) --- .../transforms/DataFrameTransformTask.java | 28 +++++++------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index 9df6b5e3ab337..926f233c454d1 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -444,7 +444,6 @@ static class ClientDataFrameIndexer extends DataFrameIndexer { private final DataFrameTransformsCheckpointService transformsCheckpointService; private final String transformId; private final DataFrameTransformTask transformTask; - private volatile DataFrameIndexerTransformStats previouslyPersistedStats = null; private final AtomicInteger failureCount; // Keeps track of the last exception that was written to our audit, keeps us from spamming the audit index private volatile String lastAuditedExceptionMessage = null; @@ -552,25 +551,18 @@ protected void doSaveState(IndexerState indexerState, Map positi // only every-so-often when doing the bulk indexing calls. See AsyncTwoPhaseIndexer#onBulkResponse for current periodicity ActionListener> updateClusterStateListener = ActionListener.wrap( task -> { - // Only persist the stats if something has actually changed - if (previouslyPersistedStats == null || previouslyPersistedStats.equals(getStats()) == false) { - transformsConfigManager.putOrUpdateTransformStats( - new DataFrameTransformStateAndStats(transformId, state, getStats(), - DataFrameTransformCheckpointingInfo.EMPTY), // TODO should this be null + transformsConfigManager.putOrUpdateTransformStats( + new DataFrameTransformStateAndStats(transformId, state, getStats(), + DataFrameTransformCheckpointingInfo.EMPTY), // TODO should this be null ActionListener.wrap( - r -> { - previouslyPersistedStats = getStats(); - next.run(); - }, - statsExc -> { - logger.error("Updating stats of transform [" + transformConfig.getId() + "] failed", statsExc); - next.run(); - } + r -> { + next.run(); + }, + statsExc -> { + logger.error("Updating stats of transform [" + transformConfig.getId() + "] failed", statsExc); + next.run(); + } )); - // The stats that we have previously written to the doc is the same as as it is now, no need to update it - } else { - next.run(); - } }, exc -> { logger.error("Updating persistent state of transform [" + transformConfig.getId() + "] failed", exc); From c1d980cf3a37ae803d2e2ef6d87450039bf0ff7c Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 22 May 2019 17:31:59 +0200 Subject: [PATCH 046/224] Fix testAutoFollowManyIndices On a slow CI worker, the test was failing an assertion. Closes #41234 --- .../java/org/elasticsearch/xpack/ccr/AutoFollowIT.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java index 4fdb1fa00ab9a..0bcb3daac6284 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java @@ -31,6 +31,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @@ -149,7 +150,7 @@ public void testAutoFollowManyIndices() throws Exception { AutoFollowMetadata autoFollowMetadata = metaData[0].custom(AutoFollowMetadata.TYPE); assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("my-pattern"), hasSize((int) expectedVal1)); assertThat(autoFollowStats[0].getNumberOfSuccessfulFollowIndices(), equalTo(expectedVal1)); - }); + }, 30, TimeUnit.SECONDS); } catch (AssertionError ae) { logger.warn("indices={}", Arrays.toString(metaData[0].indices().keys().toArray(String.class))); logger.warn("auto follow stats={}", Strings.toString(autoFollowStats[0])); @@ -168,7 +169,7 @@ public void testAutoFollowManyIndices() throws Exception { AutoFollowMetadata autoFollowMetadata = metaData[0].custom(AutoFollowMetadata.TYPE); assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("my-pattern"), nullValue()); assertThat(autoFollowStats[0].getAutoFollowedClusters().size(), equalTo(0)); - }); + }, 30, TimeUnit.SECONDS); } catch (AssertionError ae) { logger.warn("indices={}", Arrays.toString(metaData[0].indices().keys().toArray(String.class))); logger.warn("auto follow stats={}", Strings.toString(autoFollowStats[0])); @@ -199,7 +200,7 @@ public void testAutoFollowManyIndices() throws Exception { // Ensure that there are no auto follow errors: // (added specifically to see that there are no leader indices auto followed multiple times) assertThat(autoFollowStats[0].getRecentAutoFollowErrors().size(), equalTo(0)); - }); + }, 30, TimeUnit.SECONDS); } catch (AssertionError ae) { logger.warn("indices={}", Arrays.toString(metaData[0].indices().keys().toArray(String.class))); logger.warn("auto follow stats={}", Strings.toString(autoFollowStats[0])); From 145c3bec7898f9a4e9bb43ade48d9103d8e30d88 Mon Sep 17 00:00:00 2001 From: Mengwei Ding Date: Wed, 22 May 2019 08:53:25 -0700 Subject: [PATCH 047/224] Add .code_internal-* index pattern to kibana user (#42247) --- .../xpack/core/security/authz/store/ReservedRolesStore.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index 2c86971b529f9..49d4159f13968 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -118,8 +118,9 @@ private static Map initializeReservedRoles() { .indices(".monitoring-*").privileges("read", "read_cross_cluster").build(), RoleDescriptor.IndicesPrivileges.builder() .indices(".management-beats").privileges("create_index", "read", "write").build(), + // .code_internal-* is for Code's internal worker queue index creation. RoleDescriptor.IndicesPrivileges.builder() - .indices(".code-*").privileges("all").build(), + .indices(".code-*", ".code_internal-*").privileges("all").build(), }, null, new ConditionalClusterPrivilege[] { new ManageApplicationPrivileges(Collections.singleton("kibana-*")) }, From d5888b23d73a245f40fa124a39d474b34c042156 Mon Sep 17 00:00:00 2001 From: mushao999 Date: Thu, 23 May 2019 00:05:48 +0800 Subject: [PATCH 048/224] Fix alpha version error message (#40406) --- server/src/main/java/org/elasticsearch/Version.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 5089a7fe0cec9..ce0fc1559c18b 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -257,7 +257,7 @@ public static Version fromString(String version) { if (buildStr.startsWith("alpha")) { assert rawMajor >= 5 : "major must be >= 5 but was " + major; build = Integer.parseInt(buildStr.substring(5)); - assert build < 25 : "expected a beta build but " + build + " >= 25"; + assert build < 25 : "expected a alpha build but " + build + " >= 25"; } else if (buildStr.startsWith("Beta") || buildStr.startsWith("beta")) { build = betaOffset + Integer.parseInt(buildStr.substring(4)); assert build < 50 : "expected a beta build but " + build + " >= 50"; From 148df31639a983058b758f5eef2c9df2f9346e94 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Wed, 22 May 2019 09:19:14 -0700 Subject: [PATCH 049/224] Fix a rendering issue in the geo envelope docs. (#42332) Previously the formatting information didn't display in the docs, and the sentence just rendered as "bounding rectangle in the format :". --- docs/reference/mapping/types/geo-shape.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index 1cf85e305a95d..26f59e1058c09 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -615,7 +615,7 @@ POST /example/_doc Elasticsearch supports an `envelope` type, which consists of coordinates for upper left and lower right points of the shape to represent a -bounding rectangle in the format [[minLon, maxLat],[maxLon, minLat]]: +bounding rectangle in the format `[[minLon, maxLat], [maxLon, minLat]]`: [source,js] -------------------------------------------------- From 943344fa48d8d1f83776863250af8b8fb52417fd Mon Sep 17 00:00:00 2001 From: swstepp <49322243+swstepp@users.noreply.github.com> Date: Wed, 22 May 2019 10:44:41 -0600 Subject: [PATCH 050/224] Fix grammar problem in stemming reference. (#42148) --- docs/reference/how-to/recipes/stemming.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/how-to/recipes/stemming.asciidoc b/docs/reference/how-to/recipes/stemming.asciidoc index e8c213646578c..d7ddda116327e 100644 --- a/docs/reference/how-to/recipes/stemming.asciidoc +++ b/docs/reference/how-to/recipes/stemming.asciidoc @@ -171,7 +171,7 @@ the query need to be matched exactly while other parts should still take stemming into account? Fortunately, the `query_string` and `simple_query_string` queries have a feature -that solve this exact problem: `quote_field_suffix`. This tell Elasticsearch +that solves this exact problem: `quote_field_suffix`. This tells Elasticsearch that the words that appear in between quotes are to be redirected to a different field, see below: From 458aa6409f37ea636c1b099c99ff4369599cb17c Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Wed, 22 May 2019 11:58:50 -0500 Subject: [PATCH 051/224] add 7_3 as version (#42368) --- server/src/main/java/org/elasticsearch/Version.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index ce0fc1559c18b..e3381a3384c0e 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -92,6 +92,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_1_1 = new Version(V_7_1_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_2_0_ID = 7020099; public static final Version V_7_2_0 = new Version(V_7_2_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final int V_7_3_0_ID = 7030099; + public static final Version V_7_3_0 = new Version(V_7_3_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_8_0_0_ID = 8000099; public static final Version V_8_0_0 = new Version(V_8_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_1_0); public static final Version CURRENT = V_8_0_0; @@ -110,6 +112,8 @@ public static Version fromId(int id) { switch (id) { case V_8_0_0_ID: return V_8_0_0; + case V_7_3_0_ID: + return V_7_3_0; case V_7_2_0_ID: return V_7_2_0; case V_7_1_1_ID: From d49d9b53d6e0ac8acda61913489fa55e5118f0c5 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 22 May 2019 19:07:56 +0200 Subject: [PATCH 052/224] Ensure testAckedIndexing uses disruption index settings AbstractDisruptionTestCase set a lower global checkpoint sync interval setting, but this was ignored by testAckedIndexing, which has led to spurious test failures Relates #41068, #38931 --- .../indices/recovery/PeerRecoveryTargetService.java | 2 +- .../java/org/elasticsearch/discovery/ClusterDisruptionIT.java | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 1ba854fdb2b13..6b1a893667f2c 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -356,7 +356,7 @@ private StartRecoveryRequest getStartRecoveryRequest(final RecoveryTarget recove logger.trace("{} preparing for file-based recovery from [{}]", recoveryTarget.shardId(), recoveryTarget.sourceNode()); } else { logger.trace( - "{} preparing for sequence-number-based recovery starting at local checkpoint [{}] from [{}]", + "{} preparing for sequence-number-based recovery starting at sequence number [{}] from [{}]", recoveryTarget.shardId(), startingSeqNo, recoveryTarget.sourceNode()); diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index ad3b8006ed0c3..5bc5efc96c661 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -119,6 +119,7 @@ public void testAckedIndexing() throws Exception { assertAcked(prepareCreate("test") .setSettings(Settings.builder() + .put(indexSettings()) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1 + randomInt(2)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(2)) )); From 35c4c9efb0705e1e6b080d16d78ed0c5967b80e6 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Tue, 21 May 2019 10:25:23 -0400 Subject: [PATCH 053/224] Re-mute all ml_datafeed_crud rolling upgrade tests AwaitsFix https://github.com/elastic/elasticsearch/issues/42258 Thought this was fixed, but throwing deprecation warnings at an unexpected time so putting this back on mute until we figure it out. --- .../rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml | 5 +++++ .../rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml | 5 +++++ .../test/upgraded_cluster/40_ml_datafeed_crud.yml | 4 ++++ 3 files changed, 14 insertions(+) diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml index 2ff9b08e9b13f..4d2254a1ba8c3 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml @@ -1,3 +1,8 @@ +setup: + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258" + --- "Test old cluster datafeed without aggs": - do: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml index 4918dde9ba899..62a9d33a511e6 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml @@ -1,3 +1,8 @@ +setup: + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258" + --- "Put job and datafeed without aggs in old cluster": diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml index 5dc71ecb0679e..4b742e10de61f 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml @@ -1,4 +1,8 @@ setup: + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258" + - do: cluster.health: wait_for_status: green From c9d04ccb3a13eeaccdf056df0243477d06da013d Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Wed, 22 May 2019 22:00:51 +0300 Subject: [PATCH 054/224] Make packer cache branches explicit (#41990) Before this change we would recurse to cache bwc versions. This proved to be problematic due to the number of steps it was generating taking too long. Also this required tricky maintenance to break the recursion for old branches we don't really care about. With this change we now cache specific branches only. --- .ci/packer_cache.sh | 1 + distribution/bwc/build.gradle | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.ci/packer_cache.sh b/.ci/packer_cache.sh index 4533213920c3a..adc4f80d4960d 100755 --- a/.ci/packer_cache.sh +++ b/.ci/packer_cache.sh @@ -22,3 +22,4 @@ export JAVA8_HOME="${HOME}"/.java/java8 export JAVA11_HOME="${HOME}"/.java/java11 export JAVA12_HOME="${HOME}"/.java/openjdk12 ./gradlew --parallel clean --scan -Porg.elasticsearch.acceptScanTOS=true -s resolveAllDependencies + diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 87644fb7f6785..1a4e4161418ab 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -239,12 +239,15 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased createBuildBwcTask(projectName, "${baseDir}/${projectName}", projectArtifact) } - createRunBwcGradleTask("resolveAllBwcDependencies") { args 'resolveAllDependencies' } - resolveAllDependencies.dependsOn resolveAllBwcDependencies + Version currentVersion = Version.fromString(version) + if (currentVersion.getMinor() == 0 && currentVersion.getRevision() == 0) { + // We only want to resolve dependencies for live versions of master, without cascading this to older versions + resolveAllDependencies.dependsOn resolveAllBwcDependencies + } for (e in artifactFiles) { String projectName = e.key From da77b97c56c948fea5909e60170a1680c791ce1b Mon Sep 17 00:00:00 2001 From: David Kyle Date: Thu, 23 May 2019 08:42:06 +0100 Subject: [PATCH 055/224] [ML Data Frame] Account for completed data frames in test (#42351) When asserting on the checkpoint value if the DF has completed the checkpoint will be 1 else 0. Similarly state may be started or indexing. Closes #42309 --- .../rest-api-spec/test/data_frame/transforms_stats.yml | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml index f552e4710c781..79aa14cb6f628 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml @@ -42,9 +42,6 @@ teardown: --- "Test get transform stats": - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42309" - do: data_frame.get_data_frame_transform_stats: transform_id: "airline-transform-stats" @@ -52,7 +49,7 @@ teardown: - match: { transforms.0.id: "airline-transform-stats" } - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.0.state.task_state: "started" } - - match: { transforms.0.state.checkpoint: 0 } + - lte: { transforms.0.state.checkpoint: 1 } - lte: { transforms.0.stats.pages_processed: 1 } - match: { transforms.0.stats.documents_processed: 0 } - match: { transforms.0.stats.documents_indexed: 0 } @@ -149,9 +146,6 @@ teardown: --- "Test get multiple transform stats where one does not have a task": - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42309" - do: data_frame.put_data_frame_transform: transform_id: "airline-transform-stats-dos" @@ -169,7 +163,7 @@ teardown: transform_id: "*" - match: { count: 2 } - match: { transforms.0.id: "airline-transform-stats" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.1.id: "airline-transform-stats-dos" } - match: { transforms.1.state.indexer_state: "stopped" } From e75ff0c748e6b68232c2b08e19ac4a4934918264 Mon Sep 17 00:00:00 2001 From: Marios Trivyzas Date: Thu, 23 May 2019 10:10:07 +0200 Subject: [PATCH 056/224] Allow `fields` to be set to `*` (#42301) Allow for SimpleQueryString, QueryString and MultiMatchQuery to set the `fields` parameter to the wildcard `*`. If so, set the leniency to `true`, to achieve the same behaviour as from the `"default_field" : "*" setting. Furthermore, check if `*` is in the list of the `default_field` but not necessarily as the 1st element. Closes: #39577 --- .../index/query/MultiMatchQueryBuilder.java | 15 +- .../index/query/QueryStringQueryBuilder.java | 9 +- .../index/query/SimpleQueryStringBuilder.java | 12 +- .../index/search/QueryParserHelper.java | 8 ++ .../query/MultiMatchQueryBuilderTests.java | 128 ++++++++++++------ .../query/QueryStringQueryBuilderTests.java | 55 +++++++- .../query/SimpleQueryStringBuilderTests.java | 79 ++++++++--- 7 files changed, 227 insertions(+), 79 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java index 5537df2fdf874..7827c032ea0d7 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -783,18 +782,20 @@ protected Query doToQuery(QueryShardContext context) throws IOException { multiMatchQuery.setTranspositions(fuzzyTranspositions); Map newFieldsBoosts; + boolean isAllField; if (fieldsBoosts.isEmpty()) { // no fields provided, defaults to index.query.default_field List defaultFields = context.defaultFields(); - boolean isAllField = defaultFields.size() == 1 && Regex.isMatchAllPattern(defaultFields.get(0)); - if (isAllField && lenient == null) { - // Sets leniency to true if not explicitly - // set in the request - multiMatchQuery.setLenient(true); - } newFieldsBoosts = QueryParserHelper.resolveMappingFields(context, QueryParserHelper.parseFieldsAndWeights(defaultFields)); + isAllField = QueryParserHelper.hasAllFieldsWildcard(defaultFields); } else { newFieldsBoosts = QueryParserHelper.resolveMappingFields(context, fieldsBoosts); + isAllField = QueryParserHelper.hasAllFieldsWildcard(fieldsBoosts.keySet()); + } + if (isAllField && lenient == null) { + // Sets leniency to true if not explicitly + // set in the request + multiMatchQuery.setLenient(true); } return multiMatchQuery.parse(type, newFieldsBoosts, value, minimumShouldMatch); } diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java index f129ccbec7254..1d1d139ceef1c 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java @@ -847,11 +847,14 @@ protected Query doToQuery(QueryShardContext context) throws IOException { } } else if (fieldsAndWeights.size() > 0) { final Map resolvedFields = QueryParserHelper.resolveMappingFields(context, fieldsAndWeights); - queryParser = new QueryStringQueryParser(context, resolvedFields, isLenient); + if (QueryParserHelper.hasAllFieldsWildcard(fieldsAndWeights.keySet())) { + queryParser = new QueryStringQueryParser(context, resolvedFields, lenient == null ? true : lenient); + } else { + queryParser = new QueryStringQueryParser(context, resolvedFields, isLenient); + } } else { List defaultFields = context.defaultFields(); - boolean isAllField = defaultFields.size() == 1 && Regex.isMatchAllPattern(defaultFields.get(0)); - if (isAllField) { + if (QueryParserHelper.hasAllFieldsWildcard(defaultFields)) { queryParser = new QueryStringQueryParser(context, lenient == null ? true : lenient); } else { final Map resolvedFields = QueryParserHelper.resolveMappingFields(context, diff --git a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index bd74d34196345..beae19a4403ac 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.search.QueryParserHelper; @@ -399,16 +398,19 @@ public SimpleQueryStringBuilder fuzzyTranspositions(boolean fuzzyTranspositions) protected Query doToQuery(QueryShardContext context) throws IOException { Settings newSettings = new Settings(settings); final Map resolvedFieldsAndWeights; + boolean isAllField; if (fieldsAndWeights.isEmpty() == false) { resolvedFieldsAndWeights = QueryParserHelper.resolveMappingFields(context, fieldsAndWeights); + isAllField = QueryParserHelper.hasAllFieldsWildcard(fieldsAndWeights.keySet()); } else { List defaultFields = context.defaultFields(); - boolean isAllField = defaultFields.size() == 1 && Regex.isMatchAllPattern(defaultFields.get(0)); - if (isAllField) { - newSettings.lenient(lenientSet ? settings.lenient() : true); - } resolvedFieldsAndWeights = QueryParserHelper.resolveMappingFields(context, QueryParserHelper.parseFieldsAndWeights(defaultFields)); + isAllField = QueryParserHelper.hasAllFieldsWildcard(defaultFields); + } + + if (isAllField) { + newSettings.lenient(lenientSet ? settings.lenient() : true); } final SimpleQueryStringQueryParser sqp; diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java index adc1691608b23..3acf2929687c5 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java @@ -161,4 +161,12 @@ private static void checkForTooManyFields(Map fields, QueryShardC throw new IllegalArgumentException("field expansion matches too many fields, limit: " + limit + ", got: " + fields.size()); } } + + /** + * Returns true if any of the fields is the wildcard {@code *}, false otherwise. + * @param fields A collection of field names + */ + public static boolean hasAllFieldsWildcard(Collection fields) { + return fields.stream().anyMatch(Regex::isMatchAllPattern); + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java index ab9b3c732135d..6590a5609353a 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java @@ -55,6 +55,7 @@ import static org.hamcrest.CoreMatchers.anyOf; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.hasItems; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.collection.IsCollectionWithSize.hasSize; @@ -409,52 +410,79 @@ public void testToFuzzyQuery() throws Exception { public void testDefaultField() throws Exception { QueryShardContext context = createShardContext(); MultiMatchQueryBuilder builder = new MultiMatchQueryBuilder("hello"); - // should pass because we set lenient to true when default field is `*` + // default value `*` sets leniency to true Query query = builder.toQuery(context); - assertThat(query, instanceOf(DisjunctionMaxQuery.class)); - - context.getIndexSettings().updateIndexMetaData( - newIndexMeta("index", context.getIndexSettings().getSettings(), - Settings.builder().putList("index.query.default_field", STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5") - .build()) - ); - - MultiMatchQueryBuilder qb = new MultiMatchQueryBuilder("hello"); - query = qb.toQuery(context); - DisjunctionMaxQuery expected = new DisjunctionMaxQuery( - Arrays.asList( - new TermQuery(new Term(STRING_FIELD_NAME, "hello")), - new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f) - ), 0.0f - ); - assertEquals(expected, query); + assertQueryWithAllFieldsWildcard(query); + + try { + // `*` is in the list of the default_field => leniency set to true + context.getIndexSettings().updateIndexMetaData( + newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", + STRING_FIELD_NAME, "*", STRING_FIELD_NAME_2).build()) + ); + query = new MultiMatchQueryBuilder("hello") + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); + + context.getIndexSettings().updateIndexMetaData( + newIndexMeta("index", context.getIndexSettings().getSettings(), + Settings.builder().putList("index.query.default_field", STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5") + .build()) + ); + MultiMatchQueryBuilder qb = new MultiMatchQueryBuilder("hello"); + query = qb.toQuery(context); + DisjunctionMaxQuery expected = new DisjunctionMaxQuery( + Arrays.asList( + new TermQuery(new Term(STRING_FIELD_NAME, "hello")), + new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f) + ), 0.0f + ); + assertEquals(expected, query); + + context.getIndexSettings().updateIndexMetaData( + newIndexMeta("index", context.getIndexSettings().getSettings(), + Settings.builder().putList("index.query.default_field", + STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5", INT_FIELD_NAME).build()) + ); + // should fail because lenient defaults to false + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> qb.toQuery(context)); + assertThat(exc, instanceOf(NumberFormatException.class)); + assertThat(exc.getMessage(), equalTo("For input string: \"hello\"")); + + // explicitly sets lenient + qb.lenient(true); + query = qb.toQuery(context); + expected = new DisjunctionMaxQuery( + Arrays.asList( + new TermQuery(new Term(STRING_FIELD_NAME, "hello")), + new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f), + new MatchNoDocsQuery("failed [mapped_int] query, caused by number_format_exception:[For input string: \"hello\"]") + ), 0.0f + ); + assertEquals(expected, query); + + } finally { + // Reset to the default value + context.getIndexSettings().updateIndexMetaData( + newIndexMeta("index", context.getIndexSettings().getSettings(), + Settings.builder().putNull("index.query.default_field").build()) + ); + } + } - context.getIndexSettings().updateIndexMetaData( - newIndexMeta("index", context.getIndexSettings().getSettings(), - Settings.builder().putList("index.query.default_field", - STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5", INT_FIELD_NAME).build()) - ); - // should fail because lenient defaults to false - IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> qb.toQuery(context)); - assertThat(exc, instanceOf(NumberFormatException.class)); - assertThat(exc.getMessage(), equalTo("For input string: \"hello\"")); - - // explicitly sets lenient - qb.lenient(true); - query = qb.toQuery(context); - expected = new DisjunctionMaxQuery( - Arrays.asList( - new TermQuery(new Term(STRING_FIELD_NAME, "hello")), - new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f), - new MatchNoDocsQuery("failed [mapped_int] query, caused by number_format_exception:[For input string: \"hello\"]") - ), 0.0f - ); - assertEquals(expected, query); + public void testAllFieldsWildcard() throws Exception { + QueryShardContext context = createShardContext(); + Query query = new MultiMatchQueryBuilder("hello") + .field("*") + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); - context.getIndexSettings().updateIndexMetaData( - newIndexMeta("index", context.getIndexSettings().getSettings(), - Settings.builder().putNull("index.query.default_field").build()) - ); + query = new MultiMatchQueryBuilder("hello") + .field(STRING_FIELD_NAME) + .field("*") + .field(STRING_FIELD_NAME_2) + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); } public void testWithStopWords() throws Exception { @@ -536,4 +564,18 @@ private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings .build(); return IndexMetaData.builder(name).settings(build).build(); } + + private void assertQueryWithAllFieldsWildcard(Query query) { + assertEquals(DisjunctionMaxQuery.class, query.getClass()); + DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) query; + int noMatchNoDocsQueries = 0; + for (Query q : disjunctionMaxQuery.getDisjuncts()) { + if (q.getClass() == MatchNoDocsQuery.class) { + noMatchNoDocsQueries++; + } + } + assertEquals(11, noMatchNoDocsQueries); + assertThat(disjunctionMaxQuery.getDisjuncts(), hasItems(new TermQuery(new Term(STRING_FIELD_NAME, "hello")), + new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")))); + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 001df6deb5647..ee4e0f9540451 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -79,6 +79,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBooleanSubQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertDisjunctionSubQuery; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.hasItems; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; @@ -1255,12 +1256,27 @@ public void testUnmappedFieldRewriteToMatchNoDocs() throws IOException { public void testDefaultField() throws Exception { QueryShardContext context = createShardContext(); - context.getIndexSettings().updateIndexMetaData( - newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", - STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5").build()) - ); + // default value `*` sets leniency to true + Query query = new QueryStringQueryBuilder("hello") + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); + try { - Query query = new QueryStringQueryBuilder("hello") + // `*` is in the list of the default_field => leniency set to true + context.getIndexSettings().updateIndexMetaData( + newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", + STRING_FIELD_NAME, "*", STRING_FIELD_NAME_2).build()) + ); + query = new QueryStringQueryBuilder("hello") + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); + + + context.getIndexSettings().updateIndexMetaData( + newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", + STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5").build()) + ); + query = new QueryStringQueryBuilder("hello") .toQuery(context); Query expected = new DisjunctionMaxQuery( Arrays.asList( @@ -1278,6 +1294,21 @@ public void testDefaultField() throws Exception { } } + public void testAllFieldsWildcard() throws Exception { + QueryShardContext context = createShardContext(); + Query query = new QueryStringQueryBuilder("hello") + .field("*") + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); + + query = new QueryStringQueryBuilder("hello") + .field(STRING_FIELD_NAME) + .field("*") + .field(STRING_FIELD_NAME_2) + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); + } + /** * the quote analyzer should overwrite any other forced analyzer in quoted parts of the query */ @@ -1513,4 +1544,18 @@ private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings .build(); return IndexMetaData.builder(name).settings(build).build(); } + + private void assertQueryWithAllFieldsWildcard(Query query) { + assertEquals(DisjunctionMaxQuery.class, query.getClass()); + DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) query; + int noMatchNoDocsQueries = 0; + for (Query q : disjunctionMaxQuery.getDisjuncts()) { + if (q.getClass() == MatchNoDocsQuery.class) { + noMatchNoDocsQueries++; + } + } + assertEquals(11, noMatchNoDocsQueries); + assertThat(disjunctionMaxQuery.getDisjuncts(), hasItems(new TermQuery(new Term(STRING_FIELD_NAME, "hello")), + new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")))); + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index 0adac9db8287e..ab479d89fe9d6 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -56,6 +56,7 @@ import java.util.Set; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.hasItems; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; @@ -576,24 +577,56 @@ public void testQuoteFieldSuffix() { public void testDefaultField() throws Exception { QueryShardContext context = createShardContext(); - context.getIndexSettings().updateIndexMetaData( - newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", - STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5").build()) - ); + // default value `*` sets leniency to true Query query = new SimpleQueryStringBuilder("hello") .toQuery(context); - Query expected = new DisjunctionMaxQuery( - Arrays.asList( - new TermQuery(new Term(STRING_FIELD_NAME, "hello")), - new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f) - ), 1.0f - ); - assertEquals(expected, query); - // Reset the default value - context.getIndexSettings().updateIndexMetaData( - newIndexMeta("index", - context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", "*").build()) - ); + assertQueryWithAllFieldsWildcard(query); + + try { + // `*` is in the list of the default_field => leniency set to true + context.getIndexSettings().updateIndexMetaData( + newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", + STRING_FIELD_NAME, "*", STRING_FIELD_NAME_2).build()) + ); + query = new SimpleQueryStringBuilder("hello") + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); + + context.getIndexSettings().updateIndexMetaData( + newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", + STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5").build()) + ); + query = new SimpleQueryStringBuilder("hello") + .toQuery(context); + Query expected = new DisjunctionMaxQuery( + Arrays.asList( + new TermQuery(new Term(STRING_FIELD_NAME, "hello")), + new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f) + ), 1.0f + ); + assertEquals(expected, query); + } finally { + // Reset to the default value + context.getIndexSettings().updateIndexMetaData( + newIndexMeta("index", + context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", "*").build()) + ); + } + } + + public void testAllFieldsWildcard() throws Exception { + QueryShardContext context = createShardContext(); + Query query = new SimpleQueryStringBuilder("hello") + .field("*") + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); + + query = new SimpleQueryStringBuilder("hello") + .field(STRING_FIELD_NAME) + .field("*") + .field(STRING_FIELD_NAME_2) + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); } public void testToFuzzyQuery() throws Exception { @@ -739,4 +772,18 @@ private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings .build(); return IndexMetaData.builder(name).settings(build).build(); } + + private void assertQueryWithAllFieldsWildcard(Query query) { + assertEquals(DisjunctionMaxQuery.class, query.getClass()); + DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) query; + int noMatchNoDocsQueries = 0; + for (Query q : disjunctionMaxQuery.getDisjuncts()) { + if (q.getClass() == MatchNoDocsQuery.class) { + noMatchNoDocsQueries++; + } + } + assertEquals(11, noMatchNoDocsQueries); + assertThat(disjunctionMaxQuery.getDisjuncts(), hasItems(new TermQuery(new Term(STRING_FIELD_NAME, "hello")), + new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")))); + } } From f5a6aa7ad7bba178b7ac1d5aa34fac84ab097b43 Mon Sep 17 00:00:00 2001 From: markharwood Date: Thu, 23 May 2019 10:34:13 +0100 Subject: [PATCH 057/224] Test fix - results equality failed because of subtle scoring differences between replicas. (#42366) Diverging merge policies means the segments and therefore scores are not the same. Fixed the test by ensuring there are zero replicas. Closes #32492 --- .../elasticsearch/search/profile/query/QueryProfilerIT.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java index 664f5a09fa947..040e16b6e957f 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; @@ -110,7 +111,9 @@ public void testProfileQuery() throws Exception { * to make sure the profiling doesn't interfere with the hits being returned */ public void testProfileMatchesRegular() throws Exception { - createIndex("test"); + createIndex("test", Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0).build()); ensureGreen(); int numDocs = randomIntBetween(100, 150); From 4e999d7514e701c7cf5790e9b484d9f8d7b83297 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 23 May 2019 11:41:05 +0200 Subject: [PATCH 058/224] Upgrade to Lucene 8.1.0 (#42214) This commit upgrades to the GA release of Lucene 8.1.0 --- buildSrc/version.properties | 2 +- ...ene-expressions-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../licenses/lucene-expressions-8.1.0.jar.sha1 | 1 + ...e-analyzers-icu-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../licenses/lucene-analyzers-icu-8.1.0.jar.sha1 | 1 + ...lyzers-kuromoji-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../licenses/lucene-analyzers-kuromoji-8.1.0.jar.sha1 | 1 + ...-analyzers-nori-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../licenses/lucene-analyzers-nori-8.1.0.jar.sha1 | 1 + ...lyzers-phonetic-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../licenses/lucene-analyzers-phonetic-8.1.0.jar.sha1 | 1 + ...alyzers-smartcn-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../licenses/lucene-analyzers-smartcn-8.1.0.jar.sha1 | 1 + ...alyzers-stempel-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../licenses/lucene-analyzers-stempel-8.1.0.jar.sha1 | 1 + ...zers-morfologik-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../lucene-analyzers-morfologik-8.1.0.jar.sha1 | 1 + ...nalyzers-common-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../licenses/lucene-analyzers-common-8.1.0.jar.sha1 | 1 + ...backward-codecs-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-backward-codecs-8.1.0.jar.sha1 | 1 + .../lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-core-8.1.0.jar.sha1 | 1 + ...lucene-grouping-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-grouping-8.1.0.jar.sha1 | 1 + ...ene-highlighter-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-highlighter-8.1.0.jar.sha1 | 1 + .../lucene-join-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-join-8.1.0.jar.sha1 | 1 + .../lucene-memory-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-memory-8.1.0.jar.sha1 | 1 + .../lucene-misc-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-misc-8.1.0.jar.sha1 | 1 + .../lucene-queries-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-queries-8.1.0.jar.sha1 | 1 + ...ene-queryparser-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-queryparser-8.1.0.jar.sha1 | 1 + .../lucene-sandbox-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-sandbox-8.1.0.jar.sha1 | 1 + .../lucene-spatial-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-spatial-8.1.0.jar.sha1 | 1 + ...-spatial-extras-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-spatial-extras-8.1.0.jar.sha1 | 1 + ...ucene-spatial3d-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-spatial3d-8.1.0.jar.sha1 | 1 + .../lucene-suggest-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-suggest-8.1.0.jar.sha1 | 1 + .../org/elasticsearch/index/codec/CodecService.java | 3 +-- .../codec/PerFieldMappingPostingFormatCodec.java | 3 +-- .../index/engine/InternalEngineTests.java | 11 ----------- .../elasticsearch/validate/SimpleValidateQueryIT.java | 4 ++-- .../lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../sql-action/licenses/lucene-core-8.1.0.jar.sha1 | 1 + 53 files changed, 29 insertions(+), 42 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-8.1.0.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-analyzers-common-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-analyzers-common-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-core-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-grouping-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-join-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-join-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-memory-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-memory-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-misc-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-misc-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-queries-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-queries-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-spatial-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-suggest-8.1.0.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 471cb3a705cf5..a3214c789a47d 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 8.0.0 -lucene = 8.1.0-snapshot-e460356abe +lucene = 8.1.0 bundled_jdk = 12.0.1+12@69cfe15208a647278a19ef0990eea691 diff --git a/modules/lang-expression/licenses/lucene-expressions-8.1.0-snapshot-e460356abe.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 48446e877e309..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a1addebde14147501b7d24a581a7a7288bc585d \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-8.1.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..2554e8ce52652 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-8.1.0.jar.sha1 @@ -0,0 +1 @@ +0c98e3b9d25f27ab05ac643cfb28756daa516bc7 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index c03380c6cf36c..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b8b7d744e6294706b379ec7fdd2d6f1b6badc95b \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..e4657681667f1 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0.jar.sha1 @@ -0,0 +1 @@ +d61364290eb1c28696e62b7df3a7d041d3be2fa5 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index e3195509e493f..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c9dcc5568ccd4589f4a6871d2019661546106c83 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..fff37598a0861 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0.jar.sha1 @@ -0,0 +1 @@ +7f78b18890a0a0e74a8249806a6cfcabd2fae304 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 77cd0b32ed9ea..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bef6d901a9c8b4c6139de7419b3024e0c9fd4ad3 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..47b0c633fdc79 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0.jar.sha1 @@ -0,0 +1 @@ +bfc6b5d67a792aa23ee134fe93307696aad94223 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 1f090e9ca523f..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -074c06d6f2254edae04acdd53bf6989a4343acc8 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..d24096b883fc9 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0.jar.sha1 @@ -0,0 +1 @@ +6fac1ff799b86f872b67e7fad55120d338daa86f \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 42a1e22cdfbc0..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5cd2a341ab4524ec7ff40ba29faa4ead5e805413 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..9ed51a53f6226 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0.jar.sha1 @@ -0,0 +1 @@ +72941af5e1bfb012aec04dd518a2deb43402702c \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index c2468bbdd7cac..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba55aba7d278f6201b4ebd6dafbc7edb6fe94f8c \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..190a7031928b8 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0.jar.sha1 @@ -0,0 +1 @@ +0ac885595cfdc0267d7d9cb843c22dabf7215ff0 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 176e9533edde9..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -543d99fd2ba4302f3555792236350b201514d821 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..7f2d4c5e8647e --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0.jar.sha1 @@ -0,0 +1 @@ +e260cff7f48e350e1ec037dec1c260ce05ddb53e \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-analyzers-common-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 08507536ac134..0000000000000 --- a/server/licenses/lucene-analyzers-common-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c20a8ae0c3bd769aa6c415ebea94ba466d9a631d \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.1.0.jar.sha1 b/server/licenses/lucene-analyzers-common-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..6eb7722fec744 --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.1.0.jar.sha1 @@ -0,0 +1 @@ +262f20cb2786cdf7015a4ba1a64ce90ff2d746f5 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-backward-codecs-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 6b0a3854c6f38..0000000000000 --- a/server/licenses/lucene-backward-codecs-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6e8921ab37facdcc5c4b71f2612d72300d6de217 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.1.0.jar.sha1 b/server/licenses/lucene-backward-codecs-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..c232e0fbdfdb9 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.1.0.jar.sha1 @@ -0,0 +1 @@ +c5610306f8eff182b399b9aed7a60b82668a8395 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index fea3658cf61bd..0000000000000 --- a/server/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3e85f77d8f8ed1db53dba387fbdec55a9f912639 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.1.0.jar.sha1 b/server/licenses/lucene-core-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..4a6aa7b098686 --- /dev/null +++ b/server/licenses/lucene-core-8.1.0.jar.sha1 @@ -0,0 +1 @@ +46d614acdeb42f4661e91347100217bc72aae11e \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-grouping-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 0bcc1ebab16de..0000000000000 --- a/server/licenses/lucene-grouping-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -426a1822d888a6341f6bafccaad19e4a2ad88e25 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.1.0.jar.sha1 b/server/licenses/lucene-grouping-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..f3c49cb193aba --- /dev/null +++ b/server/licenses/lucene-grouping-8.1.0.jar.sha1 @@ -0,0 +1 @@ +443f63d9038eea0601b493fa37fc599d74b035eb \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-highlighter-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index b2478a52c7a85..0000000000000 --- a/server/licenses/lucene-highlighter-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f83fa4b264198dfb12436a803309a60f5588481d \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.1.0.jar.sha1 b/server/licenses/lucene-highlighter-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..6b174859e1834 --- /dev/null +++ b/server/licenses/lucene-highlighter-8.1.0.jar.sha1 @@ -0,0 +1 @@ +e3e52591f8d44a4e1006ced4dd4a67f7a572990a \ No newline at end of file diff --git a/server/licenses/lucene-join-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-join-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index ea3f6353ce09e..0000000000000 --- a/server/licenses/lucene-join-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f381131abef51f77d26bccbb213d1c8563c19ec4 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.1.0.jar.sha1 b/server/licenses/lucene-join-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..75232f1fc0a72 --- /dev/null +++ b/server/licenses/lucene-join-8.1.0.jar.sha1 @@ -0,0 +1 @@ +2e885b1e3e55f94ccc2744f85738563a577a4e21 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-memory-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 0bc96c932c18b..0000000000000 --- a/server/licenses/lucene-memory-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d8733551b9eb71e1f59688b8e78e0b481974d7a \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.1.0.jar.sha1 b/server/licenses/lucene-memory-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..4b2c65af32da5 --- /dev/null +++ b/server/licenses/lucene-memory-8.1.0.jar.sha1 @@ -0,0 +1 @@ +e58d0092da1c4744627d57d022f4e07d8b80d11b \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-misc-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index fdde3da39a264..0000000000000 --- a/server/licenses/lucene-misc-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -13da0b22f01dff4a01c9907425464a440695104b \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.1.0.jar.sha1 b/server/licenses/lucene-misc-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..37afcfadb7e12 --- /dev/null +++ b/server/licenses/lucene-misc-8.1.0.jar.sha1 @@ -0,0 +1 @@ +07833aee2c5feb6fa1a16a21d27c8f15c01d0b4c \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-queries-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index c50232482b5c1..0000000000000 --- a/server/licenses/lucene-queries-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c3de4dbb98b5cc00875d76e817929374bb9e710 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.1.0.jar.sha1 b/server/licenses/lucene-queries-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..7f09849b67693 --- /dev/null +++ b/server/licenses/lucene-queries-8.1.0.jar.sha1 @@ -0,0 +1 @@ +63096d40298b8b8245a602d344b57bfa14b929fd \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-queryparser-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 4a6c53845fc24..0000000000000 --- a/server/licenses/lucene-queryparser-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -539ef199c74ae6891ac93f55632fe140b9d4c291 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.1.0.jar.sha1 b/server/licenses/lucene-queryparser-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..ada3ec974e031 --- /dev/null +++ b/server/licenses/lucene-queryparser-8.1.0.jar.sha1 @@ -0,0 +1 @@ +9bb4fb3c7035a877e4a87ed86870894509d26d65 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-sandbox-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 198b93230fb7c..0000000000000 --- a/server/licenses/lucene-sandbox-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0371141f658e2157babd490f0a8ddbcd5114b371 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.1.0.jar.sha1 b/server/licenses/lucene-sandbox-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..422195c73c69d --- /dev/null +++ b/server/licenses/lucene-sandbox-8.1.0.jar.sha1 @@ -0,0 +1 @@ +1033737c97703516134ba4c99d41724729854df4 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-spatial-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index ad6558f167d1c..0000000000000 --- a/server/licenses/lucene-spatial-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1bae56fbce29d6c597c00889dab1909f51f4aaac \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.1.0.jar.sha1 b/server/licenses/lucene-spatial-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..e0d8f362a1ecf --- /dev/null +++ b/server/licenses/lucene-spatial-8.1.0.jar.sha1 @@ -0,0 +1 @@ +968d2fb35b0c2e68ac07c1ec187ab38a74b6602a \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-spatial-extras-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 66d5cc808a1ac..0000000000000 --- a/server/licenses/lucene-spatial-extras-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6eaed1dea9a18502ab9dffe55f081da6060373f7 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.1.0.jar.sha1 b/server/licenses/lucene-spatial-extras-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..0a45cfe117a3a --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.1.0.jar.sha1 @@ -0,0 +1 @@ +551b7fa327645d3fd59ae1321320153b2f858766 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-spatial3d-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 0e1c69171e07e..0000000000000 --- a/server/licenses/lucene-spatial3d-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e54c6be78275637544a3080874dd04b0d92755e5 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.1.0.jar.sha1 b/server/licenses/lucene-spatial3d-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..9cdde5a308e22 --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.1.0.jar.sha1 @@ -0,0 +1 @@ +45e63df708be458e95d9da3e6054189c50c30dff \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-suggest-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 2d1491c40dd0d..0000000000000 --- a/server/licenses/lucene-suggest-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e4c95d0bb740f18af520faebcebb968da3e0a687 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.1.0.jar.sha1 b/server/licenses/lucene-suggest-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..c4ac6e68080ab --- /dev/null +++ b/server/licenses/lucene-suggest-8.1.0.jar.sha1 @@ -0,0 +1 @@ +d5cd0e619b473e132f03e3577d1b422f050f99c0 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/index/codec/CodecService.java b/server/src/main/java/org/elasticsearch/index/codec/CodecService.java index c43f733f916cb..485c40d5d9bbd 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/CodecService.java +++ b/server/src/main/java/org/elasticsearch/index/codec/CodecService.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat.FSTLoadMode; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; import org.apache.lucene.codecs.lucene80.Lucene80Codec; import org.elasticsearch.common.Nullable; @@ -49,7 +48,7 @@ public CodecService(@Nullable MapperService mapperService, Logger logger) { final var codecs = new HashMap(); if (mapperService == null) { codecs.put(DEFAULT_CODEC, new Lucene80Codec()); - codecs.put(BEST_COMPRESSION_CODEC, new Lucene80Codec(Mode.BEST_COMPRESSION, FSTLoadMode.AUTO)); + codecs.put(BEST_COMPRESSION_CODEC, new Lucene80Codec(Mode.BEST_COMPRESSION)); } else { codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java index 705141f1fb925..4a154abd8eadd 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat.FSTLoadMode; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; import org.apache.lucene.codecs.lucene80.Lucene80Codec; import org.elasticsearch.common.lucene.Lucene; @@ -48,7 +47,7 @@ public class PerFieldMappingPostingFormatCodec extends Lucene80Codec { } public PerFieldMappingPostingFormatCodec(Lucene50StoredFieldsFormat.Mode compressionMode, MapperService mapperService, Logger logger) { - super(compressionMode, FSTLoadMode.AUTO); + super(compressionMode); this.mapperService = mapperService; this.logger = logger; } diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 82c4035cfa7db..db9de3765b1e7 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -28,8 +28,6 @@ import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.appender.AbstractAppender; import org.apache.logging.log4j.core.filter.RegexFilter; -import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat; -import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; import org.apache.lucene.document.Field; import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.NumericDocValuesField; @@ -133,7 +131,6 @@ import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.VersionUtils; import org.hamcrest.MatcherAssert; -import org.hamcrest.Matchers; import java.io.Closeable; import java.io.IOException; @@ -302,14 +299,6 @@ public void testSegments() throws Exception { assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); assertThat(segments.get(0).isCompound(), equalTo(true)); assertThat(segments.get(0).ramTree, nullValue()); - assertThat(segments.get(0).getAttributes().keySet(), - Matchers.contains( - // TODO: Lucene50PostingsFormat#MODE_KEY should be public ? - Lucene50PostingsFormat.class.getSimpleName() + ".fstMode", - Lucene50StoredFieldsFormat.MODE_KEY - ) - ); - engine.flush(); segments = engine.segments(false); diff --git a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java index 54d9a015b4e4a..5f730ad138f96 100644 --- a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java +++ b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java @@ -207,13 +207,13 @@ public void testExplainWithRewriteValidateQuery() throws Exception { // common terms queries assertExplanation(QueryBuilders.commonTermsQuery("field", "huge brown pidgin").cutoffFrequency(1), - containsString("+field:pidgin (field:huge field:brown)"), true); + containsString("+field:pidgin field:huge field:brown"), true); assertExplanation(QueryBuilders.commonTermsQuery("field", "the brown").analyzer("stop"), containsString("field:brown"), true); // match queries with cutoff frequency assertExplanation(QueryBuilders.matchQuery("field", "huge brown pidgin").cutoffFrequency(1), - containsString("+field:pidgin (field:huge field:brown)"), true); + containsString("+field:pidgin field:huge field:brown"), true); assertExplanation(QueryBuilders.matchQuery("field", "the brown").analyzer("stop"), containsString("field:brown"), true); diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index fea3658cf61bd..0000000000000 --- a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3e85f77d8f8ed1db53dba387fbdec55a9f912639 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..4a6aa7b098686 --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0.jar.sha1 @@ -0,0 +1 @@ +46d614acdeb42f4661e91347100217bc72aae11e \ No newline at end of file From 72ab7b6f26b8f76f9534db3ca8ecd6633b271eb9 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Thu, 23 May 2019 11:52:39 +0200 Subject: [PATCH 059/224] Rename SearchRequest#crossClusterSearch (#42363) The SearchRequest#crossClusterSearch method is currently used only as part of cross cluster search request, when minimizing roundtrips. It will soon be used also when splitting a search into two: one for throttled and one for non throttled indices. It will probably be used for other usecases as well in the future, hence it makes sense to generalize its name to subSearchRequest. --- .../action/search/SearchRequest.java | 15 ++++++++------- .../action/search/TransportSearchAction.java | 6 +++--- .../action/search/SearchPhaseControllerTests.java | 2 +- .../action/search/SearchRequestTests.java | 14 +++++++------- .../TransportSearchActionSingleNodeTests.java | 14 +++++++------- 5 files changed, 26 insertions(+), 25 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 6b641906d2e32..53dafc153fc4b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -134,9 +134,10 @@ public SearchRequest(String[] indices, SearchSourceBuilder source) { } /** - * Creates a new search request by providing the search request to copy all fields from, the indices to search against, the alias of - * the cluster where it will be executed, as well as the start time in milliseconds from the epoch time and whether the reduction - * should be final or not. Used when a {@link SearchRequest} is created and executed as part of a cross-cluster search request + * Creates a new sub-search request starting from the original search request that is provided. + * For internal use only, allows to fork a search request into multiple search requests that will be executed independently. + * Such requests will not be finally reduced, so that their results can be merged together in one response at completion. + * Used when a {@link SearchRequest} is created and executed as part of a cross-cluster search request * performing reduction on each cluster in order to minimize network round-trips between the coordinating node and the remote clusters. * * @param originalSearchRequest the original search request @@ -145,8 +146,8 @@ public SearchRequest(String[] indices, SearchSourceBuilder source) { * @param absoluteStartMillis the absolute start time to be used on the remote clusters to ensure that the same value is used * @param finalReduce whether the reduction should be final or not */ - static SearchRequest crossClusterSearch(SearchRequest originalSearchRequest, String[] indices, - String clusterAlias, long absoluteStartMillis, boolean finalReduce) { + static SearchRequest subSearchRequest(SearchRequest originalSearchRequest, String[] indices, + String clusterAlias, long absoluteStartMillis, boolean finalReduce) { Objects.requireNonNull(originalSearchRequest, "search request must not be null"); validateIndices(indices); Objects.requireNonNull(clusterAlias, "cluster alias must not be null"); @@ -284,7 +285,7 @@ boolean isFinalReduce() { /** * Returns the current time in milliseconds from the time epoch, to be used for the execution of this search request. Used to * ensure that the same value, determined by the coordinating node, is used on all nodes involved in the execution of the search - * request. When created through {@link #crossClusterSearch(SearchRequest, String[], String, long, boolean)}, this method returns + * request. When created through {@link #subSearchRequest(SearchRequest, String[], String, long, boolean)}, this method returns * the provided current time, otherwise it will return {@link System#currentTimeMillis()}. */ long getOrCreateAbsoluteStartMillis() { @@ -292,7 +293,7 @@ long getOrCreateAbsoluteStartMillis() { } /** - * Returns the provided absoluteStartMillis when created through {@link #crossClusterSearch} and + * Returns the provided absoluteStartMillis when created through {@link #subSearchRequest} and * -1 otherwise. */ long getAbsoluteStartMillis() { diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index d37e10a71f3b9..a7c0a785c7fce 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -270,7 +270,7 @@ static void ccsRemoteReduce(SearchRequest searchRequest, OriginalIndices localIn String clusterAlias = entry.getKey(); boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias); OriginalIndices indices = entry.getValue(); - SearchRequest ccsSearchRequest = SearchRequest.crossClusterSearch(searchRequest, indices.indices(), + SearchRequest ccsSearchRequest = SearchRequest.subSearchRequest(searchRequest, indices.indices(), clusterAlias, timeProvider.getAbsoluteStartMillis(), true); Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias); remoteClusterClient.search(ccsSearchRequest, new ActionListener() { @@ -306,7 +306,7 @@ public void onFailure(Exception e) { String clusterAlias = entry.getKey(); boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias); OriginalIndices indices = entry.getValue(); - SearchRequest ccsSearchRequest = SearchRequest.crossClusterSearch(searchRequest, indices.indices(), + SearchRequest ccsSearchRequest = SearchRequest.subSearchRequest(searchRequest, indices.indices(), clusterAlias, timeProvider.getAbsoluteStartMillis(), false); ActionListener ccsListener = createCCSListener(clusterAlias, skipUnavailable, countDown, skippedClusters, exceptions, searchResponseMerger, totalClusters, listener); @@ -316,7 +316,7 @@ public void onFailure(Exception e) { if (localIndices != null) { ActionListener ccsListener = createCCSListener(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, false, countDown, skippedClusters, exceptions, searchResponseMerger, totalClusters, listener); - SearchRequest ccsLocalSearchRequest = SearchRequest.crossClusterSearch(searchRequest, localIndices.indices(), + SearchRequest ccsLocalSearchRequest = SearchRequest.subSearchRequest(searchRequest, localIndices.indices(), RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, timeProvider.getAbsoluteStartMillis(), false); localSearchConsumer.accept(ccsLocalSearchRequest, ccsListener); } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index 084a45267b5c5..3a1adf9748a06 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -330,7 +330,7 @@ private static AtomicArray generateFetchResults(int nShards, } private static SearchRequest randomSearchRequest() { - return randomBoolean() ? new SearchRequest() : SearchRequest.crossClusterSearch(new SearchRequest(), + return randomBoolean() ? new SearchRequest() : SearchRequest.subSearchRequest(new SearchRequest(), Strings.EMPTY_ARRAY, "remote", 0, randomBoolean()); } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 8f1d89a37daaa..06231db26d67e 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -48,21 +48,21 @@ protected SearchRequest createSearchRequest() throws IOException { return request; } //clusterAlias and absoluteStartMillis do not have public getters/setters hence we randomize them only in this test specifically. - return SearchRequest.crossClusterSearch(request, request.indices(), + return SearchRequest.subSearchRequest(request, request.indices(), randomAlphaOfLengthBetween(5, 10), randomNonNegativeLong(), randomBoolean()); } public void testWithLocalReduction() { - expectThrows(NullPointerException.class, () -> SearchRequest.crossClusterSearch(null, Strings.EMPTY_ARRAY, "", 0, randomBoolean())); + expectThrows(NullPointerException.class, () -> SearchRequest.subSearchRequest(null, Strings.EMPTY_ARRAY, "", 0, randomBoolean())); SearchRequest request = new SearchRequest(); - expectThrows(NullPointerException.class, () -> SearchRequest.crossClusterSearch(request, null, "", 0, randomBoolean())); - expectThrows(NullPointerException.class, () -> SearchRequest.crossClusterSearch(request, + expectThrows(NullPointerException.class, () -> SearchRequest.subSearchRequest(request, null, "", 0, randomBoolean())); + expectThrows(NullPointerException.class, () -> SearchRequest.subSearchRequest(request, new String[]{null}, "", 0, randomBoolean())); - expectThrows(NullPointerException.class, () -> SearchRequest.crossClusterSearch(request, + expectThrows(NullPointerException.class, () -> SearchRequest.subSearchRequest(request, Strings.EMPTY_ARRAY, null, 0, randomBoolean())); - expectThrows(IllegalArgumentException.class, () -> SearchRequest.crossClusterSearch(request, + expectThrows(IllegalArgumentException.class, () -> SearchRequest.subSearchRequest(request, Strings.EMPTY_ARRAY, "", -1, randomBoolean())); - SearchRequest searchRequest = SearchRequest.crossClusterSearch(request, Strings.EMPTY_ARRAY, "", 0, randomBoolean()); + SearchRequest searchRequest = SearchRequest.subSearchRequest(request, Strings.EMPTY_ARRAY, "", 0, randomBoolean()); assertNull(searchRequest.validate()); } diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java index 82f7c513bf0ce..10f252c30dc3b 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java @@ -46,7 +46,7 @@ public void testLocalClusterAlias() { assertEquals(RestStatus.CREATED, indexResponse.status()); { - SearchRequest searchRequest = SearchRequest.crossClusterSearch(new SearchRequest(), Strings.EMPTY_ARRAY, + SearchRequest searchRequest = SearchRequest.subSearchRequest(new SearchRequest(), Strings.EMPTY_ARRAY, "local", nowInMillis, randomBoolean()); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(1, searchResponse.getHits().getTotalHits().value); @@ -58,7 +58,7 @@ public void testLocalClusterAlias() { assertEquals("1", hit.getId()); } { - SearchRequest searchRequest = SearchRequest.crossClusterSearch(new SearchRequest(), Strings.EMPTY_ARRAY, + SearchRequest searchRequest = SearchRequest.subSearchRequest(new SearchRequest(), Strings.EMPTY_ARRAY, "", nowInMillis, randomBoolean()); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(1, searchResponse.getHits().getTotalHits().value); @@ -100,13 +100,13 @@ public void testAbsoluteStartMillis() { assertEquals(0, searchResponse.getTotalShards()); } { - SearchRequest searchRequest = SearchRequest.crossClusterSearch(new SearchRequest(), + SearchRequest searchRequest = SearchRequest.subSearchRequest(new SearchRequest(), Strings.EMPTY_ARRAY, "", 0, randomBoolean()); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(2, searchResponse.getHits().getTotalHits().value); } { - SearchRequest searchRequest = SearchRequest.crossClusterSearch(new SearchRequest(), + SearchRequest searchRequest = SearchRequest.subSearchRequest(new SearchRequest(), Strings.EMPTY_ARRAY, "", 0, randomBoolean()); searchRequest.indices(""); SearchResponse searchResponse = client().search(searchRequest).actionGet(); @@ -114,7 +114,7 @@ public void testAbsoluteStartMillis() { assertEquals("test-1970.01.01", searchResponse.getHits().getHits()[0].getIndex()); } { - SearchRequest searchRequest = SearchRequest.crossClusterSearch(new SearchRequest(), + SearchRequest searchRequest = SearchRequest.subSearchRequest(new SearchRequest(), Strings.EMPTY_ARRAY, "", 0, randomBoolean()); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); RangeQueryBuilder rangeQuery = new RangeQueryBuilder("date"); @@ -156,7 +156,7 @@ public void testFinalReduce() { source.aggregation(terms); { - SearchRequest searchRequest = randomBoolean() ? originalRequest : SearchRequest.crossClusterSearch(originalRequest, + SearchRequest searchRequest = randomBoolean() ? originalRequest : SearchRequest.subSearchRequest(originalRequest, Strings.EMPTY_ARRAY, "remote", nowInMillis, true); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(2, searchResponse.getHits().getTotalHits().value); @@ -165,7 +165,7 @@ public void testFinalReduce() { assertEquals(1, longTerms.getBuckets().size()); } { - SearchRequest searchRequest = SearchRequest.crossClusterSearch(originalRequest, + SearchRequest searchRequest = SearchRequest.subSearchRequest(originalRequest, Strings.EMPTY_ARRAY, "remote", nowInMillis, false); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(2, searchResponse.getHits().getTotalHits().value); From 5da6f5dfbfcce23ff5ea0b4131887792e24d37bd Mon Sep 17 00:00:00 2001 From: jimczi Date: Thu, 23 May 2019 12:18:11 +0200 Subject: [PATCH 060/224] upgrade Lucene Version for ES 7.3.0 after backport of #42214 --- server/src/main/java/org/elasticsearch/Version.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index e3381a3384c0e..7f939ca627a95 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -93,7 +93,7 @@ public class Version implements Comparable, ToXContentFragment { public static final int V_7_2_0_ID = 7020099; public static final Version V_7_2_0 = new Version(V_7_2_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_3_0_ID = 7030099; - public static final Version V_7_3_0 = new Version(V_7_3_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final Version V_7_3_0 = new Version(V_7_3_0_ID, org.apache.lucene.util.Version.LUCENE_8_1_0); public static final int V_8_0_0_ID = 8000099; public static final Version V_8_0_0 = new Version(V_8_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_1_0); public static final Version CURRENT = V_8_0_0; From cb402220d88127b35152f4567beec41b219b96d3 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 23 May 2019 12:29:39 +0200 Subject: [PATCH 061/224] Remove deprecated Repository methods (#42359) We deprecated `restoreShard` and `snapshotShard` in #42213 This change removes the deprecated methods and their usage and adds a note in the migration docs. --- .../migration/migrate_8_0/java.asciidoc | 8 ++++ .../index/shard/StoreRecovery.java | 2 +- .../repositories/Repository.java | 41 ------------------- .../snapshots/SnapshotShardsService.java | 3 +- 4 files changed, 11 insertions(+), 43 deletions(-) diff --git a/docs/reference/migration/migrate_8_0/java.asciidoc b/docs/reference/migration/migrate_8_0/java.asciidoc index 523e5b463d8bc..21d281acff97f 100644 --- a/docs/reference/migration/migrate_8_0/java.asciidoc +++ b/docs/reference/migration/migrate_8_0/java.asciidoc @@ -25,3 +25,11 @@ while silently truncating them to one of the three allowed edit distances 0, 1 or 2. This leniency is now removed and the class will throw errors when trying to construct an instance with another value (e.g. floats like 1.3 used to get accepted but truncated to 1). You should use one of the allowed values. + + +[float] +==== Changes to Repository + +Repository has no dependency on IndexShard anymore. The contract of restoreShard +and snapshotShard has been reduced to Store and MappingService in order to improve +testability. diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index aa49f7ecb60ce..fae3703027f9e 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -469,7 +469,7 @@ private void restore(final IndexShard indexShard, final Repository repository, f } final IndexId indexId = repository.getRepositoryData().resolveIndexId(indexName); assert indexShard.getEngineOrNull() == null; - repository.restoreShard(indexShard, indexShard.store(), restoreSource.snapshot().getSnapshotId(), + repository.restoreShard(indexShard.store(), restoreSource.snapshot().getSnapshotId(), restoreSource.version(), indexId, snapshotShardId, indexShard.recoveryState()); final Store store = indexShard.store(); store.bootstrapNewHistory(); diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index 3aa19cb130cae..0eca92039fbf8 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; @@ -189,27 +188,6 @@ SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long */ boolean isReadOnly(); - /** - * Creates a snapshot of the shard based on the index commit point. - *

- * The index commit point can be obtained by using {@link org.elasticsearch.index.engine.Engine#acquireLastIndexCommit} method. - * Repository implementations shouldn't release the snapshot index commit point. It is done by the method caller. - *

- * As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check - * {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process should be aborted. - * @param indexShard the shard to be snapshotted - * @param snapshotId snapshot id - * @param indexId id for the index being snapshotted - * @param snapshotIndexCommit commit point - * @param snapshotStatus snapshot status - * @deprecated use {@link #snapshotShard(Store, MapperService, SnapshotId, IndexId, IndexCommit, IndexShardSnapshotStatus)} instead - */ - @Deprecated - default void snapshotShard(IndexShard indexShard, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, - IndexShardSnapshotStatus snapshotStatus) { - snapshotShard(indexShard.store(), indexShard.mapperService(), snapshotId, indexId, snapshotIndexCommit, snapshotStatus); - } - /** * Creates a snapshot of the shard based on the index commit point. *

@@ -228,25 +206,6 @@ default void snapshotShard(IndexShard indexShard, SnapshotId snapshotId, IndexId void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus); - /** - * Restores snapshot of the shard. - *

- * The index can be renamed on restore, hence different {@code shardId} and {@code snapshotShardId} are supplied. - * @param shard the shard to restore the index into - * @param store the store to restore the index into - * @param snapshotId snapshot id - * @param version version of elasticsearch that created this snapshot - * @param indexId id of the index in the repository from which the restore is occurring - * @param snapshotShardId shard id (in the snapshot) - * @param recoveryState recovery state - * @deprecated use {@link #restoreShard(Store, SnapshotId, Version, IndexId, ShardId, RecoveryState)} instead - */ - @Deprecated - default void restoreShard(IndexShard shard, Store store, SnapshotId snapshotId, Version version, IndexId indexId, - ShardId snapshotShardId, RecoveryState recoveryState) { - restoreShard(store, snapshotId, version, indexId, snapshotShardId, recoveryState); - } - /** * Restores snapshot of the shard. *

diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index f79b6da6ef626..b21df093fadd2 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -367,7 +367,8 @@ private void snapshot(final IndexShard indexShard, final Snapshot snapshot, fina try { // we flush first to make sure we get the latest writes snapshotted try (Engine.IndexCommitRef snapshotRef = indexShard.acquireLastIndexCommit(true)) { - repository.snapshotShard(indexShard, snapshot.getSnapshotId(), indexId, snapshotRef.getIndexCommit(), snapshotStatus); + repository.snapshotShard(indexShard.store(), indexShard.mapperService(), snapshot.getSnapshotId(), indexId, + snapshotRef.getIndexCommit(), snapshotStatus); if (logger.isDebugEnabled()) { final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); logger.debug("snapshot ({}) completed to {} with {}", snapshot, repository, lastSnapshotStatus); From 2721326d576ea8e2feaf278480580d2d83f29628 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 23 May 2019 05:15:40 -0700 Subject: [PATCH 062/224] Remove old assertion in resync replication request (#42390) This assertion was left behind from a previous cleanup. The assertion was there to remove some stale logic not needed when master would not talk to 6.x anymore. When that logic was removed, this assertion was left behind. This commit removes that stale assertion. --- .../elasticsearch/action/resync/ResyncReplicationRequest.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java b/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java index f19bfe3ac6952..78b87435a4f34 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java @@ -38,8 +38,6 @@ public final class ResyncReplicationRequest extends ReplicatedWriteRequest Date: Thu, 23 May 2019 07:54:00 -0500 Subject: [PATCH 063/224] Bulk processor concurrent requests (#41451) `org.elasticsearch.action.bulk.BulkProcessor` is a threadsafe class that allows for simple semantics to deal with sending bulk requests. Once a bulk reaches it's pre-defined size, documents, or flush interval it will execute sending the bulk. One configurable option is the number of concurrent outstanding bulk requests. That concurrency is implemented in `org.elasticsearch.action.bulk.BulkRequestHandler` via a semaphore. However, the only code that currently calls into this code is blocked by `synchronized` methods. This results in the in-ability for the BulkProcessor to behave concurrently despite supporting configurable amounts of concurrent requests. This change removes the `synchronized` method in favor an explicit lock around the non-thread safe parts of the method. The call into `org.elasticsearch.action.bulk.BulkRequestHandler` is no longer blocking, which allows `org.elasticsearch.action.bulk.BulkRequestHandler` to handle it's own concurrency. --- .../action/bulk/BulkProcessor.java | 108 +++++--- .../action/bulk/BulkProcessorTests.java | 251 +++++++++++++++++- 2 files changed, 328 insertions(+), 31 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index b0ad87a8b744a..08c42c5ea40de 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -26,6 +26,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -39,6 +40,7 @@ import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantLock; import java.util.function.BiConsumer; import java.util.function.Supplier; @@ -225,6 +227,7 @@ private static Scheduler buildScheduler(ScheduledThreadPoolExecutor scheduledThr private final Runnable onClose; private volatile boolean closed = false; + private final ReentrantLock lock = new ReentrantLock(); BulkProcessor(BiConsumer> consumer, BackoffPolicy backoffPolicy, Listener listener, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval, @@ -264,21 +267,26 @@ public void close() { * completed * @throws InterruptedException If the current thread is interrupted */ - public synchronized boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { - if (closed) { - return true; - } - closed = true; + public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { + lock.lock(); + try { + if (closed) { + return true; + } + closed = true; - this.cancellableFlushTask.cancel(); + this.cancellableFlushTask.cancel(); - if (bulkRequest.numberOfActions() > 0) { - execute(); - } - try { - return this.bulkRequestHandler.awaitClose(timeout, unit); + if (bulkRequest.numberOfActions() > 0) { + execute(); + } + try { + return this.bulkRequestHandler.awaitClose(timeout, unit); + } finally { + onClose.run(); + } } finally { - onClose.run(); + lock.unlock(); } } @@ -315,10 +323,22 @@ protected void ensureOpen() { } } - private synchronized void internalAdd(DocWriteRequest request) { - ensureOpen(); - bulkRequest.add(request); - executeIfNeeded(); + private void internalAdd(DocWriteRequest request) { + //bulkRequest and instance swapping is not threadsafe, so execute the mutations under a lock. + //once the bulk request is ready to be shipped swap the instance reference unlock and send the local reference to the handler. + Tuple bulkRequestToExecute = null; + lock.lock(); + try { + ensureOpen(); + bulkRequest.add(request); + bulkRequestToExecute = newBulkRequestIfNeeded(); + } finally { + lock.unlock(); + } + //execute sending the local reference outside the lock to allow handler to control the concurrency via it's configuration. + if (bulkRequestToExecute != null) { + execute(bulkRequestToExecute.v1(), bulkRequestToExecute.v2()); + } } /** @@ -332,11 +352,23 @@ public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nu /** * Adds the data from the bytes to be processed by the bulk processor */ - public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, + public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultPipeline, XContentType xContentType) throws Exception { - bulkRequest.add(data, defaultIndex, defaultType, null, null, defaultPipeline, true, xContentType); - executeIfNeeded(); + Tuple bulkRequestToExecute = null; + lock.lock(); + try { + ensureOpen(); + bulkRequest.add(data, defaultIndex, defaultType, null, null, defaultPipeline, + true, xContentType); + bulkRequestToExecute = newBulkRequestIfNeeded(); + } finally { + lock.unlock(); + } + + if (bulkRequestToExecute != null) { + execute(bulkRequestToExecute.v1(), bulkRequestToExecute.v2()); + } return this; } @@ -358,23 +390,32 @@ public boolean isCancelled() { return scheduler.scheduleWithFixedDelay(flushRunnable, flushInterval, ThreadPool.Names.GENERIC); } - private void executeIfNeeded() { + // needs to be executed under a lock + private Tuple newBulkRequestIfNeeded(){ ensureOpen(); if (!isOverTheLimit()) { - return; + return null; } - execute(); + final BulkRequest bulkRequest = this.bulkRequest; + this.bulkRequest = bulkRequestSupplier.get(); + return new Tuple<>(bulkRequest,executionIdGen.incrementAndGet()) ; + } + + // may be executed without a lock + private void execute(BulkRequest bulkRequest, long executionId ){ + this.bulkRequestHandler.execute(bulkRequest, executionId); } - // (currently) needs to be executed under a lock + // needs to be executed under a lock private void execute() { final BulkRequest bulkRequest = this.bulkRequest; final long executionId = executionIdGen.incrementAndGet(); this.bulkRequest = bulkRequestSupplier.get(); - this.bulkRequestHandler.execute(bulkRequest, executionId); + execute(bulkRequest, executionId); } + // needs to be executed under a lock private boolean isOverTheLimit() { if (bulkActions != -1 && bulkRequest.numberOfActions() >= bulkActions) { return true; @@ -388,18 +429,23 @@ private boolean isOverTheLimit() { /** * Flush pending delete or index requests. */ - public synchronized void flush() { - ensureOpen(); - if (bulkRequest.numberOfActions() > 0) { - execute(); + public void flush() { + lock.lock(); + try { + ensureOpen(); + if (bulkRequest.numberOfActions() > 0) { + execute(); + } + } finally { + lock.unlock(); } } class Flush implements Runnable { - @Override public void run() { - synchronized (BulkProcessor.this) { + lock.lock(); + try { if (closed) { return; } @@ -407,6 +453,8 @@ public void run() { return; } execute(); + } finally { + lock.unlock(); } } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java index e2527397a780a..6a58696534ed4 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java @@ -19,26 +19,43 @@ package org.elasticsearch.action.bulk; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; public class BulkProcessorTests extends ESTestCase { private ThreadPool threadPool; + private final Logger logger = LogManager.getLogger(BulkProcessorTests.class); @Before public void startThreadPool() { @@ -90,10 +107,216 @@ public void testBulkProcessorFlushPreservesContext() throws InterruptedException bulkProcessor.close(); } + public void testConcurrentExecutions() throws Exception { + final AtomicBoolean called = new AtomicBoolean(false); + final AtomicReference exceptionRef = new AtomicReference<>(); + int estimatedTimeForTest = Integer.MAX_VALUE; + final int simulateWorkTimeInMillis = 5; + int concurrentClients = 0; + int concurrentBulkRequests = 0; + int expectedExecutions = 0; + int maxBatchSize = 0; + int maxDocuments = 0; + int iterations = 0; + boolean runTest = true; + //find some randoms that allow this test to take under ~ 10 seconds + while (estimatedTimeForTest > 10_000) { + if (iterations++ > 1_000) { //extremely unlikely + runTest = false; + break; + } + maxBatchSize = randomIntBetween(1, 100); + maxDocuments = randomIntBetween(maxBatchSize, 1_000_000); + concurrentClients = randomIntBetween(1, 20); + concurrentBulkRequests = randomIntBetween(0, 20); + expectedExecutions = maxDocuments / maxBatchSize; + estimatedTimeForTest = (expectedExecutions * simulateWorkTimeInMillis) / + Math.min(concurrentBulkRequests + 1, concurrentClients); + } + assumeTrue("failed to find random values that allows test to run quickly", runTest); + BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[]{ new BulkItemResponse() }, 0); + AtomicInteger failureCount = new AtomicInteger(0); + AtomicInteger successCount = new AtomicInteger(0); + AtomicInteger requestCount = new AtomicInteger(0); + AtomicInteger docCount = new AtomicInteger(0); + BiConsumer> consumer = (request, listener) -> + { + try { + Thread.sleep(simulateWorkTimeInMillis); //simulate work + listener.onResponse(bulkResponse); + } catch (InterruptedException e) { + //should never happen + Thread.currentThread().interrupt(); + failureCount.getAndIncrement(); + exceptionRef.set(ExceptionsHelper.useOrSuppress(exceptionRef.get(), e)); + } + }; + try (BulkProcessor bulkProcessor = new BulkProcessor(consumer, BackoffPolicy.noBackoff(), + countingListener(requestCount, successCount, failureCount, docCount, exceptionRef), + concurrentBulkRequests, maxBatchSize, new ByteSizeValue(Integer.MAX_VALUE), null, + (command, delay, executor) -> null, () -> called.set(true), BulkRequest::new)) { + + ExecutorService executorService = Executors.newFixedThreadPool(concurrentClients); + CountDownLatch startGate = new CountDownLatch(1 + concurrentClients); + + IndexRequest indexRequest = new IndexRequest(); + String bulkRequest = "{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n" + "{ \"field1\" : \"value1\" }\n"; + BytesReference bytesReference = + BytesReference.fromByteBuffers(new ByteBuffer[]{ ByteBuffer.wrap(bulkRequest.getBytes(StandardCharsets.UTF_8)) }); + List futures = new ArrayList<>(); + for (final AtomicInteger i = new AtomicInteger(0); i.getAndIncrement() < maxDocuments; ) { + futures.add(executorService.submit(() -> { + try { + //don't start any work until all tasks are submitted + startGate.countDown(); + startGate.await(); + //alternate between ways to add to the bulk processor + if (randomBoolean()) { + bulkProcessor.add(indexRequest); + } else { + bulkProcessor.add(bytesReference, null, null, XContentType.JSON); + } + } catch (Exception e) { + throw ExceptionsHelper.convertToRuntime(e); + } + })); + } + startGate.countDown(); + startGate.await(); + + for (Future f : futures) { + try { + f.get(); + } catch (Exception e) { + failureCount.incrementAndGet(); + exceptionRef.set(ExceptionsHelper.useOrSuppress(exceptionRef.get(), e)); + } + } + executorService.shutdown(); + executorService.awaitTermination(10, TimeUnit.SECONDS); + + if (failureCount.get() > 0 || successCount.get() != expectedExecutions || requestCount.get() != successCount.get()) { + if (exceptionRef.get() != null) { + logger.error("exception(s) caught during test", exceptionRef.get()); + } + fail("\nExpected Bulks: " + expectedExecutions + "\n" + + "Requested Bulks: " + requestCount.get() + "\n" + + "Successful Bulks: " + successCount.get() + "\n" + + "Failed Bulks: " + failureCount.get() + "\n" + + "Max Documents: " + maxDocuments + "\n" + + "Max Batch Size: " + maxBatchSize + "\n" + + "Concurrent Clients: " + concurrentClients + "\n" + + "Concurrent Bulk Requests: " + concurrentBulkRequests + "\n" + ); + } + } + //count total docs after processor is closed since there may have been partial batches that are flushed on close. + assertEquals(docCount.get(), maxDocuments); + } + + public void testConcurrentExecutionsWithFlush() throws Exception { + final AtomicReference exceptionRef = new AtomicReference<>(); + final int maxDocuments = 100_000; + final int concurrentClients = 2; + final int maxBatchSize = Integer.MAX_VALUE; //don't flush based on size + final int concurrentBulkRequests = randomIntBetween(0, 20); + final int simulateWorkTimeInMillis = 5; + BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[]{ new BulkItemResponse() }, 0); + AtomicInteger failureCount = new AtomicInteger(0); + AtomicInteger successCount = new AtomicInteger(0); + AtomicInteger requestCount = new AtomicInteger(0); + AtomicInteger docCount = new AtomicInteger(0); + BiConsumer> consumer = (request, listener) -> + { + try { + Thread.sleep(simulateWorkTimeInMillis); //simulate work + listener.onResponse(bulkResponse); + } catch (InterruptedException e) { + //should never happen + Thread.currentThread().interrupt(); + failureCount.getAndIncrement(); + exceptionRef.set(ExceptionsHelper.useOrSuppress(exceptionRef.get(), e)); + } + }; + ScheduledExecutorService flushExecutor = Executors.newScheduledThreadPool(1); + try (BulkProcessor bulkProcessor = new BulkProcessor(consumer, BackoffPolicy.noBackoff(), + countingListener(requestCount, successCount, failureCount, docCount, exceptionRef), + concurrentBulkRequests, maxBatchSize, new ByteSizeValue(Integer.MAX_VALUE), + TimeValue.timeValueMillis(simulateWorkTimeInMillis * 2), + (command, delay, executor) -> + Scheduler.wrapAsScheduledCancellable(flushExecutor.schedule(command, delay.millis(), TimeUnit.MILLISECONDS)), + () -> + { + flushExecutor.shutdown(); + try { + flushExecutor.awaitTermination(10L, TimeUnit.SECONDS); + if (flushExecutor.isTerminated() == false) { + flushExecutor.shutdownNow(); + } + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + }, + BulkRequest::new)) { + + ExecutorService executorService = Executors.newFixedThreadPool(concurrentClients); + IndexRequest indexRequest = new IndexRequest(); + String bulkRequest = "{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n" + "{ \"field1\" : \"value1\" }\n"; + BytesReference bytesReference = + BytesReference.fromByteBuffers(new ByteBuffer[]{ ByteBuffer.wrap(bulkRequest.getBytes(StandardCharsets.UTF_8)) }); + List futures = new ArrayList<>(); + CountDownLatch startGate = new CountDownLatch(1 + concurrentClients); + for (final AtomicInteger i = new AtomicInteger(0); i.getAndIncrement() < maxDocuments; ) { + futures.add(executorService.submit(() -> { + try { + //don't start any work until all tasks are submitted + startGate.countDown(); + startGate.await(); + //alternate between ways to add to the bulk processor + if (randomBoolean()) { + bulkProcessor.add(indexRequest); + } else { + bulkProcessor.add(bytesReference, null, null, XContentType.JSON); + } + } catch (Exception e) { + throw ExceptionsHelper.convertToRuntime(e); + } + })); + } + startGate.countDown(); + startGate.await(); + + for (Future f : futures) { + try { + f.get(); + } catch (Exception e) { + failureCount.incrementAndGet(); + exceptionRef.set(ExceptionsHelper.useOrSuppress(exceptionRef.get(), e)); + } + } + executorService.shutdown(); + executorService.awaitTermination(10, TimeUnit.SECONDS); + } + + if (failureCount.get() > 0 || requestCount.get() != successCount.get() || maxDocuments != docCount.get()) { + if (exceptionRef.get() != null) { + logger.error("exception(s) caught during test", exceptionRef.get()); + } + fail("\nRequested Bulks: " + requestCount.get() + "\n" + + "Successful Bulks: " + successCount.get() + "\n" + + "Failed Bulks: " + failureCount.get() + "\n" + + "Total Documents: " + docCount.get() + "\n" + + "Max Documents: " + maxDocuments + "\n" + + "Max Batch Size: " + maxBatchSize + "\n" + + "Concurrent Clients: " + concurrentClients + "\n" + + "Concurrent Bulk Requests: " + concurrentBulkRequests + "\n" + ); + } + } public void testAwaitOnCloseCallsOnClose() throws Exception { final AtomicBoolean called = new AtomicBoolean(false); - BiConsumer> consumer = (request, listener) -> {}; + BiConsumer> consumer = (request, listener) -> { }; BulkProcessor bulkProcessor = new BulkProcessor(consumer, BackoffPolicy.noBackoff(), emptyListener(), 0, 10, new ByteSizeValue(1000), null, (command, delay, executor) -> null, () -> called.set(true), BulkRequest::new); @@ -118,4 +341,30 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) } }; } + + private BulkProcessor.Listener countingListener(AtomicInteger requestCount, AtomicInteger successCount, AtomicInteger failureCount, + AtomicInteger docCount, AtomicReference exceptionRef) { + + return new BulkProcessor.Listener() { + @Override + public void beforeBulk(long executionId, BulkRequest request) { + requestCount.incrementAndGet(); + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + successCount.incrementAndGet(); + docCount.addAndGet(request.requests().size()); + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + if (failure != null) { + failureCount.incrementAndGet(); + exceptionRef.set(ExceptionsHelper.useOrSuppress(exceptionRef.get(), failure)); + + } + } + }; + } } From cbb3bbdd78002a827011003c7ed14446eb3f4148 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 23 May 2019 09:53:16 -0400 Subject: [PATCH 064/224] Prevent normalizer from not being closed on exception (#42375) Currently AnalysisRegistry#processNormalizerFactory creates a normalizer and only later checks whether it should be added to the normalizer map passed in. In case we throw an exception it isn't closed. This can be prevented by moving the check that throws the exception earlier. --- .../org/elasticsearch/index/analysis/AnalysisRegistry.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index c4be6edd49069..d9c4b2c510bc9 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -523,14 +523,14 @@ private void processNormalizerFactory( if (normalizerFactory instanceof CustomNormalizerProvider) { ((CustomNormalizerProvider) normalizerFactory).build(tokenizerName, tokenizerFactory, charFilters, tokenFilters); } + if (normalizers.containsKey(name)) { + throw new IllegalStateException("already registered analyzer with name: " + name); + } Analyzer normalizerF = normalizerFactory.get(); if (normalizerF == null) { throw new IllegalArgumentException("normalizer [" + normalizerFactory.name() + "] created null normalizer"); } NamedAnalyzer normalizer = new NamedAnalyzer(name, normalizerFactory.scope(), normalizerF); - if (normalizers.containsKey(name)) { - throw new IllegalStateException("already registered analyzer with name: " + name); - } normalizers.put(name, normalizer); } } From c459ea828f6419fba0469cc1569c5ead741e7dee Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 23 May 2019 16:02:12 +0200 Subject: [PATCH 065/224] Remove node.max_local_storage_nodes (#42428) This setting, which prior to Elasticsearch 5 was enabled by default and caused all kinds of confusion, has since been disabled by default and is not recommended for production use. The preferred way going forward is for users to explicitly specify separate data folders for each started node to ensure that each node is consistently assigned to the same data path. Relates to #42426 --- docs/reference/commands/node-tool.asciidoc | 6 +- docs/reference/migration/migrate_8_0.asciidoc | 2 + .../migration/migrate_8_0/node.asciidoc | 16 +++ docs/reference/modules/node.asciidoc | 15 --- .../env/NodeEnvironmentEvilTests.java | 4 +- .../ElasticsearchNodeCommand.java | 14 +-- .../common/settings/ClusterSettings.java | 1 - .../elasticsearch/env/NodeEnvironment.java | 97 +++++-------------- .../RemoveCorruptedShardDataCommand.java | 86 +++++++--------- .../elasticsearch/index/shard/ShardPath.java | 7 +- .../UnsafeBootstrapAndDetachCommandIT.java | 12 +-- .../env/NodeEnvironmentTests.java | 37 +------ .../RemoveCorruptedShardDataCommandTests.java | 2 +- 13 files changed, 99 insertions(+), 200 deletions(-) create mode 100644 docs/reference/migration/migrate_8_0/node.asciidoc diff --git a/docs/reference/commands/node-tool.asciidoc b/docs/reference/commands/node-tool.asciidoc index ed810a4dac014..4dd2b0dfe0b6a 100644 --- a/docs/reference/commands/node-tool.asciidoc +++ b/docs/reference/commands/node-tool.asciidoc @@ -13,7 +13,7 @@ with the data on disk. [source,shell] -------------------------------------------------- bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster|override-version - [--ordinal ] [-E ] + [-E ] [-h, --help] ([-s, --silent] | [-v, --verbose]) -------------------------------------------------- @@ -290,10 +290,6 @@ it can join a different cluster. `override-version`:: Overwrites the version number stored in the data path so that a node can start despite being incompatible with the on-disk data. -`--ordinal `:: If there is <> then this specifies which node to target. Defaults -to `0`, meaning to use the first node in the data path. - `-E `:: Configures a setting. `-h, --help`:: Returns all of the command parameters. diff --git a/docs/reference/migration/migrate_8_0.asciidoc b/docs/reference/migration/migrate_8_0.asciidoc index ed40dddaae28e..84672da61635c 100644 --- a/docs/reference/migration/migrate_8_0.asciidoc +++ b/docs/reference/migration/migrate_8_0.asciidoc @@ -20,6 +20,7 @@ coming[8.0.0] * <> * <> * <> +* <> * <> * <> * <> @@ -54,6 +55,7 @@ include::migrate_8_0/security.asciidoc[] include::migrate_8_0/ilm.asciidoc[] include::migrate_8_0/java.asciidoc[] include::migrate_8_0/network.asciidoc[] +include::migrate_8_0/node.asciidoc[] include::migrate_8_0/transport.asciidoc[] include::migrate_8_0/http.asciidoc[] include::migrate_8_0/reindex.asciidoc[] diff --git a/docs/reference/migration/migrate_8_0/node.asciidoc b/docs/reference/migration/migrate_8_0/node.asciidoc new file mode 100644 index 0000000000000..a1dcd654807e1 --- /dev/null +++ b/docs/reference/migration/migrate_8_0/node.asciidoc @@ -0,0 +1,16 @@ +[float] +[[breaking_80_node_changes]] +=== Node changes + +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + +[float] +==== Removal of `node.max_local_storage_nodes` setting + +The `node.max_local_storage_nodes` setting was deprecated in 7.x and +has been removed in 8.0. Nodes should be run on separate data paths +to ensure that each node is consistently assigned to the same data path. diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index f988e97ef553c..031138dada3f1 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -277,21 +277,6 @@ home directory, so that the home directory can be deleted without deleting your data! The RPM and Debian distributions do this for you already. -[float] -[[max-local-storage-nodes]] -=== `node.max_local_storage_nodes` - -The <> can be shared by multiple nodes, even by nodes from different -clusters. This is very useful for testing failover and different configurations on your development -machine. In production, however, it is recommended to run only one node of Elasticsearch per server. - -By default, Elasticsearch is configured to prevent more than one node from sharing the same data -path. To allow for more than one node (e.g., on your development machine), use the setting -`node.max_local_storage_nodes` and set this to a positive integer larger than one. - -WARNING: Never run different node types (i.e. master, data) from the same data directory. This can -lead to unexpected data loss. - [float] == Other node settings diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java index 57d4a363cc8c7..44d3c2a88a55b 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java @@ -51,10 +51,10 @@ public void testMissingWritePermission() throws IOException { Settings build = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) .putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build(); - IOException ioException = expectThrows(IOException.class, () -> { + IOException exception = expectThrows(IOException.class, () -> { new NodeEnvironment(build, TestEnvironment.newEnvironment(build)); }); - assertTrue(ioException.getMessage(), ioException.getMessage().startsWith(path.toString())); + assertTrue(exception.getMessage(), exception.getMessage().startsWith(path.toString())); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java index ec664c97067d1..a65934c767769 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java @@ -20,7 +20,6 @@ import joptsimple.OptionParser; import joptsimple.OptionSet; -import joptsimple.OptionSpec; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.store.LockObtainFailedException; @@ -59,22 +58,15 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand { static final String NO_GLOBAL_METADATA_MSG = "failed to find global metadata, metadata corrupted?"; static final String WRITE_METADATA_EXCEPTION_MSG = "exception occurred when writing new metadata to disk"; protected static final String ABORTED_BY_USER_MSG = "aborted by user"; - final OptionSpec nodeOrdinalOption; public ElasticsearchNodeCommand(String description) { super(description); - nodeOrdinalOption = parser.accepts("ordinal", "Optional node ordinal, 0 if not specified") - .withRequiredArg().ofType(Integer.class); namedXContentRegistry = new NamedXContentRegistry(ClusterModule.getNamedXWriteables()); } - protected void processNodePathsWithLock(Terminal terminal, OptionSet options, Environment env) throws IOException { + protected void processNodePaths(Terminal terminal, OptionSet options, Environment env) throws IOException { terminal.println(Terminal.Verbosity.VERBOSE, "Obtaining lock for node"); - Integer nodeOrdinal = nodeOrdinalOption.value(options); - if (nodeOrdinal == null) { - nodeOrdinal = 0; - } - try (NodeEnvironment.NodeLock lock = new NodeEnvironment.NodeLock(nodeOrdinal, logger, env, Files::exists)) { + try (NodeEnvironment.NodeLock lock = new NodeEnvironment.NodeLock(logger, env, Files::exists)) { final Path[] dataPaths = Arrays.stream(lock.getNodePaths()).filter(Objects::nonNull).map(p -> p.path).toArray(Path[]::new); if (dataPaths.length == 0) { @@ -118,7 +110,7 @@ protected void confirm(Terminal terminal, String msg) { protected final void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { terminal.println(STOP_WARNING_MSG); if (validateBeforeLock(terminal, env)) { - processNodePathsWithLock(terminal, options, env); + processNodePaths(terminal, options, env); } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 6b50c0f1c112c..e29ceb7372bcf 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -390,7 +390,6 @@ public void apply(Settings value, Settings current, Settings previous) { ThreadContext.DEFAULT_HEADERS_SETTING, Loggers.LOG_DEFAULT_LEVEL_SETTING, Loggers.LOG_LEVEL_SETTING, - NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING, NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING, OsService.REFRESH_INTERVAL_SETTING, ProcessService.REFRESH_INTERVAL_SETTING, diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 4cfd22ecb1a65..497c6a9e06459 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -81,7 +81,6 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -91,9 +90,9 @@ */ public final class NodeEnvironment implements Closeable { public static class NodePath { - /* ${data.paths}/nodes/{node.id} */ + /* ${data.paths}/nodes/0 */ public final Path path; - /* ${data.paths}/nodes/{node.id}/indices */ + /* ${data.paths}/nodes/0/indices */ public final Path indicesPath; /** Cached FileStore from path */ public final FileStore fileStore; @@ -152,18 +151,11 @@ public String toString() { private final Path sharedDataPath; private final Lock[] locks; - private final int nodeLockId; private final AtomicBoolean closed = new AtomicBoolean(false); private final Map shardLocks = new HashMap<>(); private final NodeMetaData nodeMetaData; - /** - * Maximum number of data nodes that should run in an environment. - */ - public static final Setting MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 1, 1, - Property.NodeScope); - /** * Seed for determining a persisted unique uuid of this node. If the node has already a persisted uuid on disk, * this seed will be ignored and the uuid from disk will be reused. @@ -184,7 +176,6 @@ public String toString() { public static class NodeLock implements Releasable { - private final int nodeId; private final Lock[] locks; private final NodePath[] nodePaths; @@ -192,17 +183,16 @@ public static class NodeLock implements Releasable { * Tries to acquire a node lock for a node id, throws {@code IOException} if it is unable to acquire it * @param pathFunction function to check node path before attempt of acquiring a node lock */ - public NodeLock(final int nodeId, final Logger logger, + public NodeLock(final Logger logger, final Environment environment, final CheckedFunction pathFunction) throws IOException { - this.nodeId = nodeId; nodePaths = new NodePath[environment.dataFiles().length]; locks = new Lock[nodePaths.length]; try { final Path[] dataPaths = environment.dataFiles(); for (int dirIndex = 0; dirIndex < dataPaths.length; dirIndex++) { Path dataDir = dataPaths[dirIndex]; - Path dir = resolveNodePath(dataDir, nodeId); + Path dir = resolveNodePath(dataDir); if (pathFunction.apply(dir) == false) { continue; } @@ -248,61 +238,35 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce nodePaths = null; sharedDataPath = null; locks = null; - nodeLockId = -1; nodeMetaData = new NodeMetaData(generateNodeId(settings), Version.CURRENT); return; } boolean success = false; - NodeLock nodeLock = null; try { sharedDataPath = environment.sharedDataFile(); - IOException lastException = null; - int maxLocalStorageNodes = MAX_LOCAL_STORAGE_NODES_SETTING.get(settings); - final AtomicReference onCreateDirectoriesException = new AtomicReference<>(); - for (int possibleLockId = 0; possibleLockId < maxLocalStorageNodes; possibleLockId++) { - try { - nodeLock = new NodeLock(possibleLockId, logger, environment, - dir -> { - try { - Files.createDirectories(dir); - } catch (IOException e) { - onCreateDirectoriesException.set(e); - throw e; - } - return true; - }); - break; - } catch (LockObtainFailedException e) { - // ignore any LockObtainFailedException - } catch (IOException e) { - if (onCreateDirectoriesException.get() != null) { - throw onCreateDirectoriesException.get(); - } - lastException = e; - } + for (Path path : environment.dataFiles()) { + Files.createDirectories(resolveNodePath(path)); } - if (nodeLock == null) { + final NodeLock nodeLock; + try { + nodeLock = new NodeLock(logger, environment, dir -> true); + } catch (IOException e) { final String message = String.format( Locale.ROOT, - "failed to obtain node locks, tried [%s] with lock id%s;" + - " maybe these locations are not writable or multiple nodes were started without increasing [%s] (was [%d])?", - Arrays.toString(environment.dataFiles()), - maxLocalStorageNodes == 1 ? " [0]" : "s [0--" + (maxLocalStorageNodes - 1) + "]", - MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), - maxLocalStorageNodes); - throw new IllegalStateException(message, lastException); + "failed to obtain node locks, tried %s;" + + " maybe these locations are not writable or multiple nodes were started on the same data path?", + Arrays.toString(environment.dataFiles())); + throw new IllegalStateException(message, e); } + this.locks = nodeLock.locks; this.nodePaths = nodeLock.nodePaths; - this.nodeLockId = nodeLock.nodeId; this.nodeMetaData = loadOrCreateNodeMetaData(settings, logger, nodePaths); - if (logger.isDebugEnabled()) { - logger.debug("using node location [{}], local_lock_id [{}]", nodePaths, nodeLockId); - } + logger.debug("using node location {}", Arrays.toString(nodePaths)); maybeLogPathDetails(); maybeLogHeapDetails(); @@ -334,11 +298,10 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce * Resolve a specific nodes/{node.id} path for the specified path and node lock id. * * @param path the path - * @param nodeLockId the node lock id * @return the resolved path */ - public static Path resolveNodePath(final Path path, final int nodeLockId) { - return path.resolve(NODES_FOLDER).resolve(Integer.toString(nodeLockId)); + public static Path resolveNodePath(final Path path) { + return path.resolve(NODES_FOLDER).resolve("0"); } private void maybeLogPathDetails() throws IOException { @@ -805,14 +768,6 @@ public NodePath[] nodePaths() { return nodePaths; } - public int getNodeLockId() { - assertEnvIsLocked(); - if (nodePaths == null || locks == null) { - throw new IllegalStateException("node is not configured to store local location"); - } - return nodeLockId; - } - /** * Returns all index paths. */ @@ -1137,12 +1092,12 @@ private static boolean isIndexMetaDataPath(Path path) { * * @param indexSettings settings for the index */ - public static Path resolveBaseCustomLocation(IndexSettings indexSettings, Path sharedDataPath, int nodeLockId) { + public static Path resolveBaseCustomLocation(IndexSettings indexSettings, Path sharedDataPath) { String customDataDir = indexSettings.customDataPath(); if (customDataDir != null) { // This assert is because this should be caught by MetaDataCreateIndexService assert sharedDataPath != null; - return sharedDataPath.resolve(customDataDir).resolve(Integer.toString(nodeLockId)); + return sharedDataPath.resolve(customDataDir).resolve("0"); } else { throw new IllegalArgumentException("no custom " + IndexMetaData.SETTING_DATA_PATH + " setting available"); } @@ -1156,11 +1111,11 @@ public static Path resolveBaseCustomLocation(IndexSettings indexSettings, Path s * @param indexSettings settings for the index */ private Path resolveIndexCustomLocation(IndexSettings indexSettings) { - return resolveIndexCustomLocation(indexSettings, sharedDataPath, nodeLockId); + return resolveIndexCustomLocation(indexSettings, sharedDataPath); } - private static Path resolveIndexCustomLocation(IndexSettings indexSettings, Path sharedDataPath, int nodeLockId) { - return resolveBaseCustomLocation(indexSettings, sharedDataPath, nodeLockId).resolve(indexSettings.getUUID()); + private static Path resolveIndexCustomLocation(IndexSettings indexSettings, Path sharedDataPath) { + return resolveBaseCustomLocation(indexSettings, sharedDataPath).resolve(indexSettings.getUUID()); } /** @@ -1172,11 +1127,11 @@ private static Path resolveIndexCustomLocation(IndexSettings indexSettings, Path * @param shardId shard to resolve the path to */ public Path resolveCustomLocation(IndexSettings indexSettings, final ShardId shardId) { - return resolveCustomLocation(indexSettings, shardId, sharedDataPath, nodeLockId); + return resolveCustomLocation(indexSettings, shardId, sharedDataPath); } - public static Path resolveCustomLocation(IndexSettings indexSettings, final ShardId shardId, Path sharedDataPath, int nodeLockId) { - return resolveIndexCustomLocation(indexSettings, sharedDataPath, nodeLockId).resolve(Integer.toString(shardId.id())); + public static Path resolveCustomLocation(IndexSettings indexSettings, final ShardId shardId, Path sharedDataPath) { + return resolveIndexCustomLocation(indexSettings, sharedDataPath).resolve(Integer.toString(shardId.id())); } /** diff --git a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java index 7242198633be2..16db596515b4c 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java +++ b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java @@ -126,8 +126,6 @@ protected void findAndProcessShardPath(OptionSet options, Environment environmen final String indexName; final int shardId; - final int fromNodeId; - final int toNodeId; if (options.has(folderOption)) { final Path path = getPath(folderOption.value(options)).getParent(); @@ -150,8 +148,6 @@ protected void findAndProcessShardPath(OptionSet options, Environment environmen ) { shardId = Integer.parseInt(shardIdFileName); indexName = indexMetaData.getIndex().getName(); - fromNodeId = Integer.parseInt(nodeIdFileName); - toNodeId = fromNodeId + 1; } else { throw new ElasticsearchException("Unable to resolve shard id. Wrong folder structure at [ " + path.toString() + " ], expected .../nodes/[NODE-ID]/indices/[INDEX-UUID]/[SHARD-ID]"); @@ -160,59 +156,49 @@ protected void findAndProcessShardPath(OptionSet options, Environment environmen // otherwise resolve shardPath based on the index name and shard id indexName = Objects.requireNonNull(indexNameOption.value(options), "Index name is required"); shardId = Objects.requireNonNull(shardIdOption.value(options), "Shard ID is required"); - - // resolve shard path in case of multi-node layout per environment - fromNodeId = 0; - toNodeId = NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.get(settings); } - // have to iterate over possibleLockId as NodeEnvironment; on a contrast to it - we have to fail if node is busy - for (int possibleLockId = fromNodeId; possibleLockId < toNodeId; possibleLockId++) { - try { - try (NodeEnvironment.NodeLock nodeLock = new NodeEnvironment.NodeLock(possibleLockId, logger, environment, Files::exists)) { - final NodeEnvironment.NodePath[] nodePaths = nodeLock.getNodePaths(); - for (NodeEnvironment.NodePath nodePath : nodePaths) { - if (Files.exists(nodePath.indicesPath)) { - // have to scan all index uuid folders to resolve from index name - try (DirectoryStream stream = Files.newDirectoryStream(nodePath.indicesPath)) { - for (Path file : stream) { - if (Files.exists(file.resolve(MetaDataStateFormat.STATE_DIR_NAME)) == false) { - continue; - } - - final IndexMetaData indexMetaData = - IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, file); - if (indexMetaData == null) { - continue; - } - final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); - final Index index = indexMetaData.getIndex(); - if (indexName.equals(index.getName()) == false) { - continue; - } - final ShardId shId = new ShardId(index, shardId); - - final Path shardPathLocation = nodePath.resolve(shId); - if (Files.exists(shardPathLocation) == false) { - continue; - } - final ShardPath shardPath = ShardPath.loadShardPath(logger, shId, indexSettings, - new Path[]{shardPathLocation}, possibleLockId, nodePath.path); - if (shardPath != null) { - consumer.accept(shardPath); - return; - } - } + try (NodeEnvironment.NodeLock nodeLock = new NodeEnvironment.NodeLock(logger, environment, Files::exists)) { + final NodeEnvironment.NodePath[] nodePaths = nodeLock.getNodePaths(); + for (NodeEnvironment.NodePath nodePath : nodePaths) { + if (Files.exists(nodePath.indicesPath)) { + // have to scan all index uuid folders to resolve from index name + try (DirectoryStream stream = Files.newDirectoryStream(nodePath.indicesPath)) { + for (Path file : stream) { + if (Files.exists(file.resolve(MetaDataStateFormat.STATE_DIR_NAME)) == false) { + continue; + } + + final IndexMetaData indexMetaData = + IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, file); + if (indexMetaData == null) { + continue; + } + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); + final Index index = indexMetaData.getIndex(); + if (indexName.equals(index.getName()) == false) { + continue; + } + final ShardId shId = new ShardId(index, shardId); + + final Path shardPathLocation = nodePath.resolve(shId); + if (Files.exists(shardPathLocation) == false) { + continue; + } + final ShardPath shardPath = ShardPath.loadShardPath(logger, shId, indexSettings, + new Path[]{shardPathLocation}, nodePath.path); + if (shardPath != null) { + consumer.accept(shardPath); + return; } } } } - } catch (LockObtainFailedException lofe) { - throw new ElasticsearchException("Failed to lock node's directory [" + lofe.getMessage() - + "], is Elasticsearch still running ?"); } + } catch (LockObtainFailedException lofe) { + throw new ElasticsearchException("Failed to lock node's directory [" + lofe.getMessage() + + "], is Elasticsearch still running ?"); } - throw new ElasticsearchException("Unable to resolve shard path for index [" + indexName + "] and shard id [" + shardId + "]"); } public static boolean isCorruptMarkerFileIsPresent(final Directory directory) throws IOException { @@ -238,7 +224,6 @@ protected void dropCorruptMarkerFiles(Terminal terminal, Path path, Directory di terminal); } String[] files = directory.listAll(); - boolean found = false; for (String file : files) { if (file.startsWith(Store.CORRUPTED)) { directory.deleteFile(file); @@ -282,7 +267,6 @@ public void execute(Terminal terminal, OptionSet options, Environment environmen findAndProcessShardPath(options, environment, shardPath -> { final Path indexPath = shardPath.resolveIndex(); final Path translogPath = shardPath.resolveTranslog(); - final Path nodePath = getNodePath(shardPath); if (Files.exists(translogPath) == false || Files.isDirectory(translogPath) == false) { throw new ElasticsearchException("translog directory [" + translogPath + "], must exist and be a directory"); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java b/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java index d93cd988c62a7..32d38d9803414 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java @@ -118,9 +118,8 @@ public boolean isCustomDataPath() { public static ShardPath loadShardPath(Logger logger, NodeEnvironment env, ShardId shardId, IndexSettings indexSettings) throws IOException { final Path[] paths = env.availableShardPaths(shardId); - final int nodeLockId = env.getNodeLockId(); final Path sharedDataPath = env.sharedDataPath(); - return loadShardPath(logger, shardId, indexSettings, paths, nodeLockId, sharedDataPath); + return loadShardPath(logger, shardId, indexSettings, paths, sharedDataPath); } /** @@ -129,7 +128,7 @@ public static ShardPath loadShardPath(Logger logger, NodeEnvironment env, * Note: this method resolves custom data locations for the shard. */ public static ShardPath loadShardPath(Logger logger, ShardId shardId, IndexSettings indexSettings, Path[] availableShardPaths, - int nodeLockId, Path sharedDataPath) throws IOException { + Path sharedDataPath) throws IOException { final String indexUUID = indexSettings.getUUID(); Path loadedPath = null; for (Path path : availableShardPaths) { @@ -157,7 +156,7 @@ public static ShardPath loadShardPath(Logger logger, ShardId shardId, IndexSetti final Path dataPath; final Path statePath = loadedPath; if (indexSettings.hasCustomDataPath()) { - dataPath = NodeEnvironment.resolveCustomLocation(indexSettings, shardId, sharedDataPath, nodeLockId); + dataPath = NodeEnvironment.resolveCustomLocation(indexSettings, shardId, sharedDataPath); } else { dataPath = statePath; } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index 3bbf8378483dd..44f4d7bf4aa53 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -56,10 +56,10 @@ @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.cluster.coordination:TRACE") public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase { - private MockTerminal executeCommand(ElasticsearchNodeCommand command, Environment environment, int nodeOrdinal, boolean abort) + private MockTerminal executeCommand(ElasticsearchNodeCommand command, Environment environment, boolean abort) throws Exception { final MockTerminal terminal = new MockTerminal(); - final OptionSet options = command.getParser().parse("-ordinal", Integer.toString(nodeOrdinal)); + final OptionSet options = command.getParser().parse(); final String input; if (abort) { @@ -80,14 +80,14 @@ private MockTerminal executeCommand(ElasticsearchNodeCommand command, Environmen } private MockTerminal unsafeBootstrap(Environment environment, boolean abort) throws Exception { - final MockTerminal terminal = executeCommand(new UnsafeBootstrapMasterCommand(), environment, 0, abort); + final MockTerminal terminal = executeCommand(new UnsafeBootstrapMasterCommand(), environment, abort); assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.CONFIRMATION_MSG)); assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.MASTER_NODE_BOOTSTRAPPED_MSG)); return terminal; } private MockTerminal detachCluster(Environment environment, boolean abort) throws Exception { - final MockTerminal terminal = executeCommand(new DetachClusterCommand(), environment, 0, abort); + final MockTerminal terminal = executeCommand(new DetachClusterCommand(), environment, abort); assertThat(terminal.getOutput(), containsString(DetachClusterCommand.CONFIRMATION_MSG)); assertThat(terminal.getOutput(), containsString(DetachClusterCommand.NODE_DETACHED_MSG)); return terminal; @@ -490,7 +490,7 @@ public void testCleanupOldMetaDataFails() throws Exception { protected void cleanUpOldMetaData(Terminal terminal, Path[] dataPaths, long newGeneration) { throw new SimulatedDeleteFailureException(); } - }, environment, 0, false); + }, environment, false); // check original meta-data left untouched. @@ -503,7 +503,7 @@ protected void cleanUpOldMetaData(Terminal terminal, Path[] dataPaths, long newG assertNotEquals(originalMetaData.clusterUUID(), secondMetaData.clusterUUID()); // check that a new run will cleanup. - executeCommand(new UnsafeBootstrapMasterCommand(), environment, 0, false); + executeCommand(new UnsafeBootstrapMasterCommand(), environment, false); assertNull(loadMetaData(dataPaths, namedXContentRegistry, originalManifest)); assertNull(loadMetaData(dataPaths, namedXContentRegistry, secondManifest)); diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 89a10af1a6fc2..f21b55b9aee8f 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -59,24 +59,8 @@ public class NodeEnvironmentTests extends ESTestCase { private final IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("foo", Settings.EMPTY); - public void testNodeLockSillySettings() { - try { - NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.get(Settings.builder() - .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), between(Integer.MIN_VALUE, 0)).build()); - fail("expected failure"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("must be >= 1")); - } - - // Even though its silly MAXINT nodes is a-ok! - int value = between(1, Integer.MAX_VALUE); - int max = NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.get( - Settings.builder().put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), value).build()); - assertEquals(value, max); - } - - public void testNodeLockSingleEnvironment() throws IOException { - final Settings settings = buildEnvSettings(Settings.builder().put("node.max_local_storage_nodes", 1).build()); + public void testNodeLock() throws IOException { + final Settings settings = buildEnvSettings(Settings.EMPTY); NodeEnvironment env = newNodeEnvironment(settings); List dataPaths = Environment.PATH_DATA_SETTING.get(settings); @@ -118,19 +102,6 @@ public void testSegmentInfosTracing() { } } - public void testNodeLockMultipleEnvironment() throws IOException { - final Settings settings = buildEnvSettings(Settings.builder().put("node.max_local_storage_nodes", 2).build()); - final NodeEnvironment first = newNodeEnvironment(settings); - List dataPaths = Environment.PATH_DATA_SETTING.get(settings); - NodeEnvironment second = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); - assertEquals(first.nodeDataPaths().length, dataPaths.size()); - assertEquals(second.nodeDataPaths().length, dataPaths.size()); - for (int i = 0; i < dataPaths.size(); i++) { - assertEquals(first.nodeDataPaths()[i].getParent(), second.nodeDataPaths()[i].getParent()); - } - IOUtils.close(first, second); - } - public void testShardLock() throws Exception { final NodeEnvironment env = newNodeEnvironment(); @@ -447,7 +418,7 @@ public void testExistingTempFiles() throws IOException { String[] paths = tmpPaths(); // simulate some previous left over temp files for (String path : randomSubsetOf(randomIntBetween(1, paths.length), paths)) { - final Path nodePath = NodeEnvironment.resolveNodePath(PathUtils.get(path), 0); + final Path nodePath = NodeEnvironment.resolveNodePath(PathUtils.get(path)); Files.createDirectories(nodePath); Files.createFile(nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME)); if (randomBoolean()) { @@ -462,7 +433,7 @@ public void testExistingTempFiles() throws IOException { // check we clean up for (String path: paths) { - final Path nodePath = NodeEnvironment.resolveNodePath(PathUtils.get(path), 0); + final Path nodePath = NodeEnvironment.resolveNodePath(PathUtils.get(path)); final Path tempFile = nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME); assertFalse(tempFile + " should have been cleaned", Files.exists(tempFile)); final Path srcTempFile = nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME + ".src"); diff --git a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java index 2079b80cd386c..c9a7b236d9c8f 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java @@ -91,7 +91,7 @@ public void setup() throws IOException { .putList(Environment.PATH_DATA_SETTING.getKey(), dataDir.toAbsolutePath().toString()).build()); // create same directory structure as prod does - final Path path = NodeEnvironment.resolveNodePath(dataDir, 0); + final Path path = NodeEnvironment.resolveNodePath(dataDir); Files.createDirectories(path); settings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) From 13dc1cf6b1983299da34cc22e3fe0e62168a5580 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Thu, 23 May 2019 16:02:46 +0200 Subject: [PATCH 066/224] Update max_concurrent_shard_request parameter docs (#42227) Some of the docs were outdated as they did not mention that the limit is not per node. Also, The default value changed. Relates to #31206 --- docs/reference/search.asciidoc | 7 +++---- docs/reference/search/multi-search.asciidoc | 19 ++++++++++--------- .../resources/rest-api-spec/api/msearch.json | 4 ++-- .../resources/rest-api-spec/api/search.json | 2 +- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index dd7faca60aa92..e99fb6f388d02 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -154,12 +154,11 @@ configure a soft limit, you can update the `action.search.shard_count.limit` cluster setting in order to reject search requests that hit too many shards. The request parameter `max_concurrent_shard_requests` can be used to control the -maximum number of concurrent shard requests the search API will execute for the -request. This parameter should be used to protect a single request from +maximum number of concurrent shard requests the search API will execute per node +for the request. This parameter should be used to protect a single request from overloading a cluster (e.g., a default request will hit all indices in a cluster which could cause shard request rejections if the number of shards per node is -high). This default is based on the number of data nodes in the cluster but at -most `256`. +high). This default value is `5`. -- diff --git a/docs/reference/search/multi-search.asciidoc b/docs/reference/search/multi-search.asciidoc index 34dc37d794cad..87a87c922b37c 100644 --- a/docs/reference/search/multi-search.asciidoc +++ b/docs/reference/search/multi-search.asciidoc @@ -85,15 +85,16 @@ The msearch's `max_concurrent_searches` request parameter can be used to control the maximum number of concurrent searches the multi search api will execute. This default is based on the number of data nodes and the default search thread pool size. -The request parameter `max_concurrent_shard_requests` can be used to control the -maximum number of concurrent shard requests the each sub search request will execute. -This parameter should be used to protect a single request from overloading a cluster -(e.g., a default request will hit all indices in a cluster which could cause shard request rejections -if the number of shards per node is high). This default is based on the number of -data nodes in the cluster but at most `256`.In certain scenarios parallelism isn't achieved through -concurrent request such that this protection will result in poor performance. For -instance in an environment where only a very low number of concurrent search requests are expected -it might help to increase this value to a higher number. +The request parameter `max_concurrent_shard_requests` can be used to control +the maximum number of concurrent shard requests that each sub search request +will execute per node. This parameter should be used to protect a single +request from overloading a cluster (e.g., a default request will hit all +indices in a cluster which could cause shard request rejections if the number +of shards per node is high). This default value is `5`.In certain scenarios +parallelism isn't achieved through concurrent request such that this protection +will result in poor performance. For instance in an environment where only a +very low number of concurrent search requests are expected it might help to +increase this value to a higher number. [float] [[msearch-security]] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json index 647ed9ed3ac77..73780a1835893 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json @@ -32,8 +32,8 @@ }, "max_concurrent_shard_requests" : { "type" : "number", - "description" : "The number of concurrent shard requests each sub search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests", - "default" : "The default grows with the number of nodes in the cluster but is at most 256." + "description" : "The number of concurrent shard requests each sub search executes concurrently per node. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests", + "default" : 5 }, "rest_total_hits_as_int" : { "type" : "boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index 08d753ee9d558..75444eb66767b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -181,7 +181,7 @@ "max_concurrent_shard_requests" : { "type" : "number", "description" : "The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests", - "default" : "The default is 5." + "default" : 5 }, "pre_filter_shard_size" : { "type" : "number", From 733e589bc91355192e867b475e64b073c213fb0c Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 23 May 2019 08:05:40 -0700 Subject: [PATCH 067/224] Remove leftover code from one shard by default (#42374) We had some logic to determine the number of shards, it was based on the index version created. Now that master would only ever see index versions created >= 7.0.0, this logic is no longer needed. This commit removes this dead code. --- .../metadata/MetaDataCreateIndexService.java | 19 +------------------ .../MetaDataCreateIndexServiceTests.java | 8 -------- 2 files changed, 1 insertion(+), 26 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 1c9794191bf6e..79af84748ad2d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -386,8 +385,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { indexSettingsBuilder.put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), createdVersion); } if (indexSettingsBuilder.get(SETTING_NUMBER_OF_SHARDS) == null) { - final int numberOfShards = getNumberOfShards(indexSettingsBuilder); - indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, numberOfShards)); + indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 1)); } if (indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) { indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1)); @@ -589,21 +587,6 @@ public ClusterState execute(ClusterState currentState) throws Exception { } } - static int getNumberOfShards(final Settings.Builder indexSettingsBuilder) { - // TODO: this logic can be removed when the current major version is 8 - // TODO: https://github.com/elastic/elasticsearch/issues/38556 - // assert Version.CURRENT.major == 7; - final int numberOfShards; - final Version indexVersionCreated = - Version.fromId(Integer.parseInt(indexSettingsBuilder.get(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey()))); - if (indexVersionCreated.before(Version.V_7_0_0)) { - numberOfShards = 5; - } else { - numberOfShards = 1; - } - return numberOfShards; - } - @Override public void onFailure(String source, Exception e) { if (e instanceof ResourceAlreadyExistsException) { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index f83d0aa783c24..3de716acfee44 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -99,14 +99,6 @@ public static boolean isSplitable(int source, int target) { return source * x == target; } - public void testNumberOfShards() { - { - final Version versionCreated = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.CURRENT); - final Settings.Builder indexSettingsBuilder = Settings.builder().put(SETTING_VERSION_CREATED, versionCreated); - assertThat(MetaDataCreateIndexService.IndexCreationTask.getNumberOfShards(indexSettingsBuilder), equalTo(1)); - } - } - public void testValidateShrinkIndex() { int numShards = randomIntBetween(2, 42); ClusterState state = createClusterState("source", numShards, randomIntBetween(0, 10), From 1b0c728cfa2b6fe536a39430c4c4292f30ec1f3f Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 23 May 2019 08:06:07 -0700 Subject: [PATCH 068/224] Remove deprecated search.remote settings (#42381) We deprecated these settings awhile ago, in favor of cluster.remote. In 7.x we were gentle and provided automatic upgrade of these settings to the new settings. Now it is time for them to go. This commit removes the deprecated search.remote settings. --- docs/reference/migration/migrate_8_0.asciidoc | 2 + .../migration/migrate_8_0/settings.asciidoc | 13 +++ .../FullClusterRestartSettingsUpgradeIT.java | 24 ----- .../common/settings/ClusterSettings.java | 12 +-- .../transport/RemoteClusterAware.java | 101 +----------------- .../transport/RemoteClusterService.java | 62 +---------- .../common/settings/UpgradeSettingsIT.java | 34 ------ .../transport/RemoteClusterServiceTests.java | 34 +----- .../transport/RemoteClusterSettingsTests.java | 75 ------------- .../FullClusterRestartSettingsUpgradeIT.java | 24 ----- 10 files changed, 26 insertions(+), 355 deletions(-) create mode 100644 docs/reference/migration/migrate_8_0/settings.asciidoc delete mode 100644 qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.java delete mode 100644 x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartSettingsUpgradeIT.java diff --git a/docs/reference/migration/migrate_8_0.asciidoc b/docs/reference/migration/migrate_8_0.asciidoc index 84672da61635c..b697abf9a9f25 100644 --- a/docs/reference/migration/migrate_8_0.asciidoc +++ b/docs/reference/migration/migrate_8_0.asciidoc @@ -24,6 +24,7 @@ coming[8.0.0] * <> * <> * <> +* <> //NOTE: The notable-breaking-changes tagged regions are re-used in the //Installation and Upgrade Guide @@ -59,3 +60,4 @@ include::migrate_8_0/node.asciidoc[] include::migrate_8_0/transport.asciidoc[] include::migrate_8_0/http.asciidoc[] include::migrate_8_0/reindex.asciidoc[] +include::migrate_8_0/settings.asciidoc[] diff --git a/docs/reference/migration/migrate_8_0/settings.asciidoc b/docs/reference/migration/migrate_8_0/settings.asciidoc new file mode 100644 index 0000000000000..0c21ae4021aa7 --- /dev/null +++ b/docs/reference/migration/migrate_8_0/settings.asciidoc @@ -0,0 +1,13 @@ +[float] +[[breaking_80_settings_changes]] +=== Settings changes + +[float] +[[search-remote-settings-removed]] +==== The `search.remote` settings have been removed + +In 6.5 these settings were deprecated in favor of `cluster.remote`. In 7.x we +provided automatic upgrading of these settings to their `cluster.remote` +counterparts. In 8.0.0, these settings have been removed. Elasticsearch will +refuse to start if you have these settings in your configuration or cluster +state. diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.java deleted file mode 100644 index 9e1e5f93fcd92..0000000000000 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.upgrades; - -public class FullClusterRestartSettingsUpgradeIT extends AbstractFullClusterRestartTestCase { - -} diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index e29ceb7372bcf..026dfa4633991 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -280,19 +280,12 @@ public void apply(Settings value, Settings current, Settings previous) { SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS, TransportSearchAction.SHARD_COUNT_LIMIT_SETTING, RemoteClusterAware.REMOTE_CLUSTERS_SEEDS, - RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_SEEDS, RemoteClusterAware.REMOTE_CLUSTERS_PROXY, - RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY, RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE, - RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE, RemoteClusterService.REMOTE_CONNECTIONS_PER_CLUSTER, - RemoteClusterService.SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER, RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, - RemoteClusterService.SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, RemoteClusterService.REMOTE_NODE_ATTRIBUTE, - RemoteClusterService.SEARCH_REMOTE_NODE_ATTRIBUTE, RemoteClusterService.ENABLE_REMOTE_CLUSTERS, - RemoteClusterService.SEARCH_ENABLE_REMOTE_CLUSTERS, RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE, RemoteClusterService.REMOTE_CLUSTER_COMPRESS, TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, @@ -451,9 +444,6 @@ public void apply(Settings value, Settings current, Settings previous) { ClusterBootstrapService.UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING, LagDetector.CLUSTER_FOLLOWER_LAG_TIMEOUT_SETTING); - static List> BUILT_IN_SETTING_UPGRADERS = List.of( - RemoteClusterAware.SEARCH_REMOTE_CLUSTER_SEEDS_UPGRADER, - RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY_UPGRADER, - RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE_UPGRADER); + static List> BUILT_IN_SETTING_UPGRADERS = Collections.emptyList(); } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index 0c3874f0a0f35..316fcd275a5a0 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.SettingUpgrader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -41,11 +40,8 @@ import java.util.EnumSet; import java.util.HashMap; import java.util.List; -import java.util.Locale; import java.util.Map; -import java.util.NavigableSet; import java.util.Set; -import java.util.TreeSet; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -55,41 +51,6 @@ */ public abstract class RemoteClusterAware { - static { - // remove search.remote.* settings in 8.0.0 - // TODO https://github.com/elastic/elasticsearch/issues/38556 - // assert Version.CURRENT.major < 8; - } - - public static final Setting.AffixSetting> SEARCH_REMOTE_CLUSTERS_SEEDS = - Setting.affixKeySetting( - "search.remote.", - "seeds", - key -> Setting.listSetting( - key, - Collections.emptyList(), - s -> { - parsePort(s); - return s; - }, - Setting.Property.Deprecated, - Setting.Property.Dynamic, - Setting.Property.NodeScope)); - - public static final SettingUpgrader> SEARCH_REMOTE_CLUSTER_SEEDS_UPGRADER = new SettingUpgrader>() { - - @Override - public Setting> getSetting() { - return SEARCH_REMOTE_CLUSTERS_SEEDS; - } - - @Override - public String getKey(final String key) { - return key.replaceFirst("^search", "cluster"); - } - - }; - /** * A list of initial seed nodes to discover eligible nodes from the remote cluster */ @@ -98,10 +59,7 @@ public String getKey(final String key) { "seeds", key -> Setting.listSetting( key, - // the default needs to be emptyList() when fallback is removed - "_na_".equals(key) - ? SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(key) - : SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSetting(key.replaceAll("^cluster", "search")), + Collections.emptyList(), s -> { // validate seed address parsePort(s); @@ -113,35 +71,6 @@ public String getKey(final String key) { public static final char REMOTE_CLUSTER_INDEX_SEPARATOR = ':'; public static final String LOCAL_CLUSTER_GROUP_KEY = ""; - public static final Setting.AffixSetting SEARCH_REMOTE_CLUSTERS_PROXY = Setting.affixKeySetting( - "search.remote.", - "proxy", - key -> Setting.simpleString( - key, - s -> { - if (Strings.hasLength(s)) { - parsePort(s); - } - }, - Setting.Property.Deprecated, - Setting.Property.Dynamic, - Setting.Property.NodeScope), - REMOTE_CLUSTERS_SEEDS); - - public static final SettingUpgrader SEARCH_REMOTE_CLUSTERS_PROXY_UPGRADER = new SettingUpgrader() { - - @Override - public Setting getSetting() { - return SEARCH_REMOTE_CLUSTERS_PROXY; - } - - @Override - public String getKey(final String key) { - return key.replaceFirst("^search", "cluster"); - } - - }; - /** * A proxy address for the remote cluster. */ @@ -150,15 +79,10 @@ public String getKey(final String key) { "proxy", key -> Setting.simpleString( key, - // no default is needed when fallback is removed, use simple string which gives empty - "_na_".equals(key) - ? SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(key) - : SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSetting(key.replaceAll("^cluster", "search")), s -> { if (Strings.hasLength(s)) { parsePort(s); } - return s; }, Setting.Property.Dynamic, Setting.Property.NodeScope), @@ -185,22 +109,8 @@ protected static Map>>>> remoteSeeds = buildRemoteClustersDynamicConfig(settings, REMOTE_CLUSTERS_SEEDS); - final Map>>>> searchRemoteSeeds = - buildRemoteClustersDynamicConfig(settings, SEARCH_REMOTE_CLUSTERS_SEEDS); - // sort the intersection for predictable output order - final NavigableSet intersection = - new TreeSet<>(Arrays.asList( - searchRemoteSeeds.keySet().stream().filter(s -> remoteSeeds.keySet().contains(s)).sorted().toArray(String[]::new))); - if (intersection.isEmpty() == false) { - final String message = String.format( - Locale.ROOT, - "found duplicate remote cluster configurations for cluster alias%s [%s]", - intersection.size() == 1 ? "" : "es", - String.join(",", intersection)); - throw new IllegalArgumentException(message); - } - return Stream - .concat(remoteSeeds.entrySet().stream(), searchRemoteSeeds.entrySet().stream()) + return remoteSeeds.entrySet() + .stream() .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); } @@ -296,11 +206,6 @@ public void listenForUpdates(ClusterSettings clusterSettings) { RemoteClusterAware.REMOTE_CLUSTERS_SEEDS, RemoteClusterService.REMOTE_CLUSTER_COMPRESS, RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE); clusterSettings.addAffixGroupUpdateConsumer(remoteClusterSettings, this::updateRemoteCluster); - clusterSettings.addAffixUpdateConsumer( - RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY, - RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_SEEDS, - (key, value) -> updateRemoteCluster(key, value.v2(), value.v1()), - (namespace, value) -> {}); } static InetSocketAddress parseSeedAddress(String remoteHost) { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 6ab73e8a947fc..4f690d12acf1e 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.SettingUpgrader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.CountDown; @@ -70,15 +69,6 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl private static final ActionListener noopListener = ActionListener.wrap((x) -> {}, (x) -> {}); - static { - // remove search.remote.* settings in 8.0.0 - // TODO - // assert Version.CURRENT.major < 8; - } - - public static final Setting SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER = - Setting.intSetting("search.remote.connections_per_cluster", 3, 1, Setting.Property.NodeScope, Setting.Property.Deprecated); - /** * The maximum number of connections that will be established to a remote cluster. For instance if there is only a single * seed node, other nodes will be discovered up to the given number of nodes in this setting. The default is 3. @@ -86,44 +76,27 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl public static final Setting REMOTE_CONNECTIONS_PER_CLUSTER = Setting.intSetting( "cluster.remote.connections_per_cluster", - SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER, // the default needs to three when fallback is removed + 3, 1, Setting.Property.NodeScope); - public static final Setting SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING = - Setting.positiveTimeSetting( - "search.remote.initial_connect_timeout", - TimeValue.timeValueSeconds(30), - Setting.Property.NodeScope, - Setting.Property.Deprecated); - /** * The initial connect timeout for remote cluster connections */ public static final Setting REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING = Setting.positiveTimeSetting( "cluster.remote.initial_connect_timeout", - SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, // the default needs to be thirty seconds when fallback is removed TimeValue.timeValueSeconds(30), Setting.Property.NodeScope); - public static final Setting SEARCH_REMOTE_NODE_ATTRIBUTE = - Setting.simpleString("search.remote.node.attr", Setting.Property.NodeScope, Setting.Property.Deprecated); - /** * The name of a node attribute to select nodes that should be connected to in the remote cluster. * For instance a node can be configured with {@code node.attr.gateway: true} in order to be eligible as a gateway node between - * clusters. In that case {@code search.remote.node.attr: gateway} can be used to filter out other nodes in the remote cluster. + * clusters. In that case {@code cluster.remote.node.attr: gateway} can be used to filter out other nodes in the remote cluster. * The value of the setting is expected to be a boolean, {@code true} for nodes that can become gateways, {@code false} otherwise. */ public static final Setting REMOTE_NODE_ATTRIBUTE = - Setting.simpleString( - "cluster.remote.node.attr", - SEARCH_REMOTE_NODE_ATTRIBUTE, // no default is needed when fallback is removed, use simple string which gives empty - Setting.Property.NodeScope); - - public static final Setting SEARCH_ENABLE_REMOTE_CLUSTERS = - Setting.boolSetting("search.remote.connect", true, Setting.Property.NodeScope, Setting.Property.Deprecated); + Setting.simpleString("cluster.remote.node.attr", Setting.Property.NodeScope); /** * If true connecting to remote clusters is supported on this node. If false this node will not establish @@ -133,40 +106,16 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl public static final Setting ENABLE_REMOTE_CLUSTERS = Setting.boolSetting( "cluster.remote.connect", - SEARCH_ENABLE_REMOTE_CLUSTERS, // the default needs to be true when fallback is removed + true, Setting.Property.NodeScope); - public static final Setting.AffixSetting SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE = - Setting.affixKeySetting( - "search.remote.", - "skip_unavailable", - key -> boolSetting(key, false, Setting.Property.Deprecated, Setting.Property.Dynamic, Setting.Property.NodeScope), - REMOTE_CLUSTERS_SEEDS); - - public static final SettingUpgrader SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE_UPGRADER = new SettingUpgrader() { - - @Override - public Setting getSetting() { - return SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE; - } - - @Override - public String getKey(final String key) { - return key.replaceFirst("^search", "cluster"); - } - - }; - public static final Setting.AffixSetting REMOTE_CLUSTER_SKIP_UNAVAILABLE = Setting.affixKeySetting( "cluster.remote.", "skip_unavailable", key -> boolSetting( key, - // the default needs to be false when fallback is removed - "_na_".equals(key) - ? SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(key) - : SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSetting(key.replaceAll("^cluster", "search")), + false, Setting.Property.Dynamic, Setting.Property.NodeScope), REMOTE_CLUSTERS_SEEDS); @@ -367,7 +316,6 @@ Set getRemoteClusterNames() { public void listenForUpdates(ClusterSettings clusterSettings) { super.listenForUpdates(clusterSettings); clusterSettings.addAffixUpdateConsumer(REMOTE_CLUSTER_SKIP_UNAVAILABLE, this::updateSkipUnavailable, (alias, value) -> {}); - clusterSettings.addAffixUpdateConsumer(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE, this::updateSkipUnavailable, (alias, value) -> {}); } private synchronized void updateSkipUnavailable(String clusterAlias, Boolean skipUnavailable) { diff --git a/server/src/test/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java b/server/src/test/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java index 99161f842b7c2..839b96e641870 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java @@ -24,7 +24,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.transport.RemoteClusterService; import org.junit.After; import java.util.Arrays; @@ -123,37 +122,4 @@ private void runUpgradeSettingsOnUpdateTest( assertThat(UpgradeSettingsPlugin.newSetting.get(settingsFunction.apply(response.getState().metaData())), equalTo("new." + value)); } - public void testUpgradeRemoteClusterSettings() { - final boolean skipUnavailable = randomBoolean(); - client() - .admin() - .cluster() - .prepareUpdateSettings() - .setPersistentSettings( - Settings.builder() - .put("search.remote.foo.skip_unavailable", skipUnavailable) - .putList("search.remote.foo.seeds", Collections.singletonList("localhost:9200")) - .put("search.remote.foo.proxy", "localhost:9200") - .build()) - .get(); - - final ClusterStateResponse response = client().admin().cluster().prepareState().clear().setMetaData(true).get(); - - final Settings settings = response.getState().metaData().persistentSettings(); - assertFalse(RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings)); - assertTrue(RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings)); - assertThat( - RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").get(settings), - equalTo(skipUnavailable)); - assertFalse(RemoteClusterService.SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings)); - assertTrue(RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings)); - assertThat( - RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").get(settings), - equalTo(Collections.singletonList("localhost:9200"))); - assertFalse(RemoteClusterService.SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace("foo").exists(settings)); - assertTrue(RemoteClusterService.REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace("foo").exists(settings)); - assertThat( - RemoteClusterService.REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace("foo").get(settings), equalTo("localhost:9200")); - } - } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index d2c476571c927..1105fe137e322 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -57,12 +57,10 @@ import java.util.function.Supplier; import java.util.stream.Collectors; -import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; public class RemoteClusterServiceTests extends ESTestCase { @@ -128,8 +126,8 @@ public void testBuildRemoteClustersDynamicConfig() throws Exception { .put("cluster.remote.bar.seeds", "[::1]:9090") .put("cluster.remote.boom.seeds", "boom-node1.internal:1000") .put("cluster.remote.boom.proxy", "foo.bar.com:1234") - .put("search.remote.quux.seeds", "quux:9300") - .put("search.remote.quux.proxy", "quux-proxy:19300") + .put("cluster.remote.quux.seeds", "quux:9300") + .put("cluster.remote.quux.proxy", "quux-proxy:19300") .build()); assertThat(map.keySet(), containsInAnyOrder(equalTo("foo"), equalTo("bar"), equalTo("boom"), equalTo("quux"))); assertThat(map.get("foo").v2(), hasSize(1)); @@ -162,34 +160,6 @@ public void testBuildRemoteClustersDynamicConfig() throws Exception { assertEquals(quux.getId(), "quux#quux:9300"); assertEquals("quux-proxy:19300", map.get("quux").v1()); assertEquals(quux.getVersion(), Version.CURRENT.minimumCompatibilityVersion()); - - assertSettingDeprecationsAndWarnings(new String[]{"search.remote.quux.seeds", "search.remote.quux.proxy"}); - } - - public void testBuildRemoteClustersDynamicConfigWithDuplicate() { - final IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> RemoteClusterService.buildRemoteClustersDynamicConfig( - Settings.builder() - .put("cluster.remote.foo.seeds", "192.168.0.1:8080") - .put("search.remote.foo.seeds", "192.168.0.1:8080") - .build())); - assertThat(e, hasToString(containsString("found duplicate remote cluster configurations for cluster alias [foo]"))); - assertSettingDeprecationsAndWarnings(new String[]{"search.remote.foo.seeds"}); - } - - public void testBuildRemoteClustersDynamicConfigWithDuplicates() { - final IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> RemoteClusterService.buildRemoteClustersDynamicConfig( - Settings.builder() - .put("cluster.remote.foo.seeds", "192.168.0.1:8080") - .put("search.remote.foo.seeds", "192.168.0.1:8080") - .put("cluster.remote.bar.seeds", "192.168.0.1:8080") - .put("search.remote.bar.seeds", "192.168.0.1:8080") - .build())); - assertThat(e, hasToString(containsString("found duplicate remote cluster configurations for cluster aliases [bar,foo]"))); - assertSettingDeprecationsAndWarnings(new String[]{"search.remote.bar.seeds", "search.remote.foo.seeds"}); } public void testGroupClusterIndices() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java index cfffc3839461e..41df47363b0b6 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java @@ -19,125 +19,50 @@ package org.elasticsearch.transport; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; -import java.util.ArrayList; -import java.util.List; import java.util.concurrent.TimeUnit; import static org.elasticsearch.transport.RemoteClusterAware.REMOTE_CLUSTERS_PROXY; import static org.elasticsearch.transport.RemoteClusterAware.REMOTE_CLUSTERS_SEEDS; -import static org.elasticsearch.transport.RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY; -import static org.elasticsearch.transport.RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_SEEDS; import static org.elasticsearch.transport.RemoteClusterService.ENABLE_REMOTE_CLUSTERS; import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE; import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CONNECTIONS_PER_CLUSTER; import static org.elasticsearch.transport.RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING; import static org.elasticsearch.transport.RemoteClusterService.REMOTE_NODE_ATTRIBUTE; -import static org.elasticsearch.transport.RemoteClusterService.SEARCH_ENABLE_REMOTE_CLUSTERS; -import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE; -import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER; -import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING; -import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_NODE_ATTRIBUTE; import static org.hamcrest.Matchers.emptyCollectionOf; import static org.hamcrest.Matchers.equalTo; public class RemoteClusterSettingsTests extends ESTestCase { - public void testConnectionsPerClusterFallback() { - final int value = randomIntBetween(1, 8); - final Settings settings = Settings.builder().put(SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER.getKey(), value).build(); - assertThat(REMOTE_CONNECTIONS_PER_CLUSTER.get(settings), equalTo(value)); - assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER}); - } - public void testConnectionsPerClusterDefault() { assertThat(REMOTE_CONNECTIONS_PER_CLUSTER.get(Settings.EMPTY), equalTo(3)); } - public void testInitialConnectTimeoutFallback() { - final String value = randomTimeValue(30, 300, "s"); - final Settings settings = Settings.builder().put(SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.getKey(), value).build(); - assertThat( - REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings), - equalTo(TimeValue.parseTimeValue(value, SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.getKey()))); - assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING}); - } - public void testInitialConnectTimeoutDefault() { assertThat(REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(Settings.EMPTY), equalTo(new TimeValue(30, TimeUnit.SECONDS))); } - public void testRemoteNodeAttributeFallback() { - final String attribute = randomAlphaOfLength(8); - final Settings settings = Settings.builder().put(SEARCH_REMOTE_NODE_ATTRIBUTE.getKey(), attribute).build(); - assertThat(REMOTE_NODE_ATTRIBUTE.get(settings), equalTo(attribute)); - assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_NODE_ATTRIBUTE}); - } - public void testRemoteNodeAttributeDefault() { assertThat(REMOTE_NODE_ATTRIBUTE.get(Settings.EMPTY), equalTo("")); } - public void testEnableRemoteClustersFallback() { - final boolean enable = randomBoolean(); - final Settings settings = Settings.builder().put(SEARCH_ENABLE_REMOTE_CLUSTERS.getKey(), enable).build(); - assertThat(ENABLE_REMOTE_CLUSTERS.get(settings), equalTo(enable)); - assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_ENABLE_REMOTE_CLUSTERS}); - } - public void testEnableRemoteClustersDefault() { assertTrue(ENABLE_REMOTE_CLUSTERS.get(Settings.EMPTY)); } - public void testSkipUnavailableFallback() { - final String alias = randomAlphaOfLength(8); - final boolean skip = randomBoolean(); - final Settings settings = - Settings.builder().put(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias).getKey(), skip).build(); - assertThat(REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias).get(settings), equalTo(skip)); - assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias)}); - } - public void testSkipUnavailableDefault() { final String alias = randomAlphaOfLength(8); assertFalse(REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias).get(Settings.EMPTY)); } - public void testSeedsFallback() { - final String alias = randomAlphaOfLength(8); - final int numberOfSeeds = randomIntBetween(1, 8); - final List seeds = new ArrayList<>(numberOfSeeds); - for (int i = 0; i < numberOfSeeds; i++) { - seeds.add("localhost:" + Integer.toString(9200 + i)); - } - final Settings settings = - Settings.builder() - .put(SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(alias).getKey(), String.join(",", seeds)).build(); - assertThat(REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(alias).get(settings), equalTo(seeds)); - assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(alias)}); - } - public void testSeedsDefault() { final String alias = randomAlphaOfLength(8); assertThat(REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(alias).get(Settings.EMPTY), emptyCollectionOf(String.class)); } - public void testProxyFallback() { - final String alias = randomAlphaOfLength(8); - final String proxy = randomAlphaOfLength(8); - final int port = randomIntBetween(9200, 9300); - final String value = proxy + ":" + port; - final Settings settings = - Settings.builder() - .put(SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(alias).getKey(), value).build(); - assertThat(REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(alias).get(settings), equalTo(value)); - assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(alias)}); - } - public void testProxyDefault() { final String alias = randomAlphaOfLength(8); assertThat(REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(alias).get(Settings.EMPTY), equalTo("")); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartSettingsUpgradeIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartSettingsUpgradeIT.java deleted file mode 100644 index a679604a546fc..0000000000000 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartSettingsUpgradeIT.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.restart; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; - -import java.nio.charset.StandardCharsets; -import java.util.Base64; - -public class FullClusterRestartSettingsUpgradeIT extends org.elasticsearch.upgrades.FullClusterRestartSettingsUpgradeIT { - - @Override - protected Settings restClientSettings() { - final String token = - "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); - return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); - } - -} From 39a3d637340ab8de0220547671e08cc0b37fa326 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 23 May 2019 17:08:29 +0200 Subject: [PATCH 069/224] Unguice Snapshot / Restore services (#42357) This removes the @Inject annotations from the Snapshot/Restore infrastructure classes and registers them manually in Node.java --- .../status/TransportNodesSnapshotsStatus.java | 2 -- .../java/org/elasticsearch/node/Node.java | 21 +++++++++++++++++-- .../repositories/RepositoriesModule.java | 20 ++++++------------ .../snapshots/RestoreService.java | 2 -- .../snapshots/SnapshotShardsService.java | 2 -- .../snapshots/SnapshotsService.java | 2 -- 6 files changed, 25 insertions(+), 24 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java index 8f71090cc469f..1f55c1e00cef6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; @@ -60,7 +59,6 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction repoPlugins, T } } + Settings settings = env.settings(); Map repositoryTypes = Collections.unmodifiableMap(factories); Map internalRepositoryTypes = Collections.unmodifiableMap(internalFactories); - repositoriesService = new RepositoriesService(env.settings(), clusterService, transportService, repositoryTypes, + repositoriesService = new RepositoriesService(settings, clusterService, transportService, repositoryTypes, internalRepositoryTypes, threadPool); } - @Override - protected void configure() { - bind(RepositoriesService.class).toInstance(repositoriesService); - bind(SnapshotsService.class).asEagerSingleton(); - bind(SnapshotShardsService.class).asEagerSingleton(); - bind(TransportNodesSnapshotsStatus.class).asEagerSingleton(); - bind(RestoreService.class).asEagerSingleton(); + public RepositoriesService getRepositoryService() { + return repositoriesService; } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index cb9e7fee04249..f48ea7e41d555 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -58,7 +58,6 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; @@ -152,7 +151,6 @@ public class RestoreService implements ClusterStateApplier { private final CleanRestoreStateTaskExecutor cleanRestoreStateTaskExecutor; - @Inject public RestoreService(ClusterService clusterService, RepositoriesService repositoriesService, AllocationService allocationService, MetaDataCreateIndexService createIndexService, MetaDataIndexUpgradeService metaDataIndexUpgradeService, ClusterSettings clusterSettings) { diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index b21df093fadd2..65e1191211ec2 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -49,7 +49,6 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -115,7 +114,6 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements private final SnapshotStateExecutor snapshotStateExecutor = new SnapshotStateExecutor(); private final UpdateSnapshotStatusAction updateSnapshotStatusHandler; - @Inject public SnapshotShardsService(Settings settings, ClusterService clusterService, SnapshotsService snapshotsService, ThreadPool threadPool, TransportService transportService, IndicesService indicesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index e606bff0cb9e4..a6138b8f6052b 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -57,7 +57,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -131,7 +130,6 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus // Set of snapshots that are currently being ended by this node private final Set endingSnapshots = Collections.synchronizedSet(new HashSet<>()); - @Inject public SnapshotsService(Settings settings, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, RepositoriesService repositoriesService, ThreadPool threadPool) { this.clusterService = clusterService; From 9cadfd2b218c79eef20a643385988780b63c10e8 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 23 May 2019 09:16:34 -0700 Subject: [PATCH 070/224] Mute slow and flaky build-tools integration tests --- .../java/org/elasticsearch/gradle/BuildExamplePluginsIT.java | 2 ++ .../elasticsearch/gradle/testclusters/TestClustersPluginIT.java | 1 + 2 files changed, 3 insertions(+) diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java index bf982fa3aa2d2..7b4b315fd0028 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.GradleRunner; import org.junit.BeforeClass; +import org.junit.Ignore; import org.junit.Rule; import org.junit.rules.TemporaryFolder; @@ -38,6 +39,7 @@ import java.util.Objects; import java.util.stream.Collectors; +@Ignore("https://github.com/elastic/elasticsearch/issues/42453") public class BuildExamplePluginsIT extends GradleIntegrationTestCase { private static final List EXAMPLE_PLUGINS = Collections.unmodifiableList( diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index 39651ff896057..a59f54e132073 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -27,6 +27,7 @@ import java.util.Arrays; +@Ignore("https://github.com/elastic/elasticsearch/issues/42453") public class TestClustersPluginIT extends GradleIntegrationTestCase { private GradleRunner runner; From 274b634936d1763daf3e6d1bfe1f0dacb5c4cb34 Mon Sep 17 00:00:00 2001 From: emasab Date: Thu, 23 May 2019 18:35:39 +0200 Subject: [PATCH 071/224] Build local year inside DateFormat lambda bugfix for https://github.com/elastic/elasticsearch/issues/41797 (#42120) This makes sure that the year can change between when the lambda is generated and when it is executed without causing the incorrect year to be used. Resolves #41797 --- .../main/java/org/elasticsearch/ingest/common/DateFormat.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java index 65efdb40a5cc1..be5d7e47f1c02 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java @@ -89,7 +89,6 @@ Function getFunction(String format, ZoneId zoneId, Locale boolean isUtc = ZoneOffset.UTC.equals(zoneId); - int year = LocalDate.now(ZoneOffset.UTC).getYear(); DateFormatter dateFormatter = DateFormatter.forPattern(format) .withLocale(locale); // if UTC zone is set here, the time zone specified in the format will be ignored, leading to wrong dates @@ -102,6 +101,7 @@ Function getFunction(String format, ZoneId zoneId, Locale // if there is no year, we fall back to the current one and // fill the rest of the date up with the parsed date if (accessor.isSupported(ChronoField.YEAR) == false) { + int year = LocalDate.now(ZoneOffset.UTC).getYear(); ZonedDateTime newTime = Instant.EPOCH.atZone(ZoneOffset.UTC).withYear(year); for (ChronoField field : FIELDS) { if (accessor.isSupported(field)) { From 4520e88b2257ec7fc8b8cdeca29cb68d28df0b4b Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 23 May 2019 10:16:48 -0700 Subject: [PATCH 072/224] Remove transport client from xpack (#42202) This commit removes support for the transport client from xpack. --- build.gradle | 3 +- .../gradle/test/RestIntegTestTask.groovy | 11 +- .../build.gradle | 2 +- .../example/CustomAuthorizationEngineIT.java | 70 +++--- .../java/org/elasticsearch/node/Node.java | 2 + .../java/org/elasticsearch/node/MockNode.java | 5 + .../elasticsearch/test/ESIntegTestCase.java | 16 +- .../test/ExternalTestCluster.java | 57 +++-- .../ccs-clients-integrations/java.asciidoc | 203 ----------------- x-pack/docs/en/watcher/java.asciidoc | 130 ----------- .../java/org/elasticsearch/xpack/ccr/Ccr.java | 6 - .../xpack/core/XPackClientPlugin.java | 53 +---- .../elasticsearch/xpack/core/XPackPlugin.java | 16 +- .../AbstractLicensesIntegrationTestCase.java | 1 + .../license/StartBasicLicenseTests.java | 2 +- .../license/StartTrialLicenseTests.java | 2 +- .../snapshots/SourceOnlySnapshotIT.java | 2 +- .../integration/DataFrameIntegTestCase.java | 210 +++++++++--------- .../integration/DataFrameTransformIT.java | 24 +- .../DataFrameTransformProgressIT.java | 83 +++---- .../xpack/dataframe/DataFrame.java | 14 +- .../transforms/TransformProgressGatherer.java | 27 ++- .../xpack/indexlifecycle/IndexLifecycle.java | 10 +- .../IndexLifecycleInitialisationTests.java | 20 +- .../xpack/logstash/Logstash.java | 6 - .../ml/integration/MlNativeIntegTestCase.java | 46 +++- .../xpack/ml/MachineLearning.java | 18 +- .../xpack/ml/MachineLearningFeatureSet.java | 3 +- .../xpack/monitoring/Monitoring.java | 8 +- .../MonitoringPluginClientTests.java | 13 -- .../elasticsearch/xpack/rollup/Rollup.java | 10 +- .../xpack/security/Security.java | 52 ++--- .../support/AbstractSecurityModule.java | 47 ---- .../integration/BulkUpdateTests.java | 10 +- .../integration/ClearRolesCacheTests.java | 2 +- .../integration/FieldLevelSecurityTests.java | 2 +- .../MultipleIndicesPermissionsTests.java | 4 +- .../PermissionPrecedenceTests.java | 16 +- .../integration/SecurityClearScrollTests.java | 4 +- .../ShrinkIndexWithSecurityTests.java | 2 +- .../test/NativeRealmIntegTestCase.java | 2 +- .../test/SecurityIntegTestCase.java | 53 +---- .../xpack/security/SecurityTests.java | 12 +- .../xpack/security/TemplateUpgraderTests.java | 2 +- .../AuditTrailSettingsUpdateTests.java | 2 +- .../security/authz/SecurityScrollTests.java | 11 - .../filter/IpFilteringIntegrationTests.java | 6 - .../filter/IpFilteringUpdateTests.java | 2 +- .../netty4/IPHostnameVerificationTests.java | 88 -------- .../xpack/ssl/SSLClientAuthTests.java | 2 +- .../xpack/ssl/SSLTrustRestrictionsTests.java | 11 +- .../elasticsearch/xpack/watcher/Watcher.java | 10 +- .../xpack/security/ReindexWithSecurityIT.java | 188 +++++++++++----- x-pack/qa/security-client-tests/build.gradle | 40 ---- .../qa/SecurityTransportClientIT.java | 125 ----------- .../build.gradle | 2 +- .../example/realm/CustomRealmIT.java | 108 ++------- .../example/role/CustomRolesProviderIT.java | 69 +++--- .../smoketest/PreventFailingBuildIT.java | 4 +- x-pack/qa/smoke-test-plugins-ssl/build.gradle | 1 + .../SmokeTestMonitoringWithSecurityIT.java | 171 +++++++++----- x-pack/qa/transport-client-tests/build.gradle | 22 -- .../ml/client/ESXPackSmokeClientTestCase.java | 153 ------------- .../xpack/ml/client/MLTransportClientIT.java | 179 --------------- x-pack/transport-client/build.gradle | 41 ---- .../client/PreBuiltXPackTransportClient.java | 66 ------ .../PreBuiltXPackTransportClientTests.java | 30 --- 67 files changed, 699 insertions(+), 1913 deletions(-) delete mode 100644 x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc delete mode 100644 x-pack/docs/en/watcher/java.asciidoc delete mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/AbstractSecurityModule.java delete mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IPHostnameVerificationTests.java delete mode 100644 x-pack/qa/security-client-tests/build.gradle delete mode 100644 x-pack/qa/security-client-tests/src/test/java/org/elasticsearch/xpack/security/qa/SecurityTransportClientIT.java delete mode 100644 x-pack/qa/transport-client-tests/build.gradle delete mode 100644 x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/ESXPackSmokeClientTestCase.java delete mode 100644 x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/MLTransportClientIT.java delete mode 100644 x-pack/transport-client/build.gradle delete mode 100644 x-pack/transport-client/src/main/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClient.java delete mode 100644 x-pack/transport-client/src/test/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClientTests.java diff --git a/build.gradle b/build.gradle index 8794a1f930523..7de02b814da86 100644 --- a/build.gradle +++ b/build.gradle @@ -241,8 +241,7 @@ allprojects { "org.elasticsearch.plugin:percolator-client:${version}": ':modules:percolator', "org.elasticsearch.plugin:rank-eval-client:${version}": ':modules:rank-eval', // for security example plugins - "org.elasticsearch.plugin:x-pack-core:${version}": ':x-pack:plugin:core', - "org.elasticsearch.client:x-pack-transport:${version}": ':x-pack:transport-client' + "org.elasticsearch.plugin:x-pack-core:${version}": ':x-pack:plugin:core' ] /* diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index ef784b6f901d1..52c498aa98d79 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -114,13 +114,14 @@ class RestIntegTestTask extends DefaultTask { runner.ext.nonInputProperties = nonInputProperties if (System.getProperty("tests.rest.cluster") == null) { - if (System.getProperty("tests.cluster") != null) { - throw new IllegalArgumentException("tests.rest.cluster and tests.cluster must both be null or non-null") + if (System.getProperty("tests.cluster") != null || System.getProperty("tests.clustername") != null) { + throw new IllegalArgumentException("tests.rest.cluster, tests.cluster, and tests.clustername must all be null or non-null") } if (usesTestclusters == true) { ElasticsearchCluster cluster = project.testClusters."${name}" nonInputProperties.systemProperty('tests.rest.cluster', "${-> cluster.allHttpSocketURI.join(",") }") nonInputProperties.systemProperty('tests.cluster', "${-> cluster.transportPortURI }") + nonInputProperties.systemProperty('tests.clustername', "${-> cluster.getName() }") } else { // we pass all nodes to the rest cluster to allow the clients to round-robin between them // this is more realistic than just talking to a single node @@ -130,6 +131,7 @@ class RestIntegTestTask extends DefaultTask { // that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass // both as separate sysprops nonInputProperties.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}") + nonInputProperties.systemProperty('tests.clustername', "${-> nodes[0].clusterName}") // dump errors and warnings from cluster log on failure TaskExecutionAdapter logDumpListener = new TaskExecutionAdapter() { @@ -150,12 +152,13 @@ class RestIntegTestTask extends DefaultTask { } } } else { - if (System.getProperty("tests.cluster") == null) { - throw new IllegalArgumentException("tests.rest.cluster and tests.cluster must both be null or non-null") + if (System.getProperty("tests.cluster") == null || System.getProperty("tests.clustername") == null) { + throw new IllegalArgumentException("tests.rest.cluster, tests.cluster, and tests.clustername must all be null or non-null") } // an external cluster was specified and all responsibility for cluster configuration is taken by the user runner.systemProperty('tests.rest.cluster', System.getProperty("tests.rest.cluster")) runner.systemProperty('test.cluster', System.getProperty("tests.cluster")) + runner.systemProperty('test.clustername', System.getProperty("tests.clustername")) } // copy the rest spec/tests into the test resources diff --git a/plugins/examples/security-authorization-engine/build.gradle b/plugins/examples/security-authorization-engine/build.gradle index fba9580525bcc..787cc230eeb18 100644 --- a/plugins/examples/security-authorization-engine/build.gradle +++ b/plugins/examples/security-authorization-engine/build.gradle @@ -12,7 +12,7 @@ esplugin { dependencies { compileOnly "org.elasticsearch.plugin:x-pack-core:${versions.elasticsearch}" - testCompile "org.elasticsearch.client:x-pack-transport:${versions.elasticsearch}" + testCompile "org.elasticsearch.client:elasticsearch-rest-high-level-client:${versions.elasticsearch}" } integTest { diff --git a/plugins/examples/security-authorization-engine/src/test/java/org/elasticsearch/example/CustomAuthorizationEngineIT.java b/plugins/examples/security-authorization-engine/src/test/java/org/elasticsearch/example/CustomAuthorizationEngineIT.java index 9daf9bd01a8bc..4342b2a4b88f0 100644 --- a/plugins/examples/security-authorization-engine/src/test/java/org/elasticsearch/example/CustomAuthorizationEngineIT.java +++ b/plugins/examples/security-authorization-engine/src/test/java/org/elasticsearch/example/CustomAuthorizationEngineIT.java @@ -24,22 +24,21 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.security.PutUserRequest; +import org.elasticsearch.client.security.RefreshPolicy; +import org.elasticsearch.client.security.user.User; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xpack.core.XPackClientPlugin; -import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.Base64; -import java.util.Collection; import java.util.Collections; +import java.util.List; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.containsString; @@ -50,26 +49,21 @@ * an external cluster with the custom authorization plugin installed to validate the functionality * when running as a plugin */ -public class CustomAuthorizationEngineIT extends ESIntegTestCase { +public class CustomAuthorizationEngineIT extends ESRestTestCase { @Override - protected Settings externalClusterClientSettings() { + protected Settings restClientSettings() { final String token = "Basic " + Base64.getEncoder().encodeToString(("test_user:x-pack-test-password").getBytes(StandardCharsets.UTF_8)); return Settings.builder() .put(ThreadContext.PREFIX + ".Authorization", token) - .put(NetworkModule.TRANSPORT_TYPE_KEY, "security4") .build(); } - @Override - protected Collection> transportClientPlugins() { - return Collections.singleton(XPackClientPlugin.class); - } - public void testClusterAction() throws IOException { - SecurityClient securityClient = new SecurityClient(client()); - securityClient.preparePutUser("custom_user", "x-pack-test-password".toCharArray(), Hasher.BCRYPT, "custom_superuser").get(); + RestHighLevelClient restClient = new TestRestHighLevelClient(); + restClient.security().putUser(PutUserRequest.withPassword(new User("custom_user", List.of("custom_superuser")), + "x-pack-test-password".toCharArray(), true, RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); { RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); @@ -77,25 +71,27 @@ public void testClusterAction() throws IOException { basicAuthHeaderValue("custom_user", new SecureString("x-pack-test-password".toCharArray()))); Request request = new Request("GET", "_cluster/health"); request.setOptions(options); - Response response = getRestClient().performRequest(request); + Response response = client().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); } { - securityClient.preparePutUser("custom_user2", "x-pack-test-password".toCharArray(), Hasher.BCRYPT, "not_superuser").get(); + restClient.security().putUser(PutUserRequest.withPassword(new User("custom_user2", List.of("not_superuser")), + "x-pack-test-password".toCharArray(), true, RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); options.addHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, basicAuthHeaderValue("custom_user2", new SecureString("x-pack-test-password".toCharArray()))); Request request = new Request("GET", "_cluster/health"); request.setOptions(options); - ResponseException e = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request)); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); } } public void testIndexAction() throws IOException { - SecurityClient securityClient = new SecurityClient(client()); - securityClient.preparePutUser("custom_user", "x-pack-test-password".toCharArray(), Hasher.BCRYPT, "custom_superuser").get(); + RestHighLevelClient restClient = new TestRestHighLevelClient(); + restClient.security().putUser(PutUserRequest.withPassword(new User("custom_user", List.of("custom_superuser")), + "x-pack-test-password".toCharArray(), true, RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); { RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); @@ -103,27 +99,31 @@ public void testIndexAction() throws IOException { basicAuthHeaderValue("custom_user", new SecureString("x-pack-test-password".toCharArray()))); Request request = new Request("PUT", "/index"); request.setOptions(options); - Response response = getRestClient().performRequest(request); + Response response = client().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); } { - securityClient.preparePutUser("custom_user2", "x-pack-test-password".toCharArray(), Hasher.BCRYPT, "not_superuser").get(); + restClient.security().putUser(PutUserRequest.withPassword(new User("custom_user2", List.of("not_superuser")), + "x-pack-test-password".toCharArray(), true, RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); options.addHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, basicAuthHeaderValue("custom_user2", new SecureString("x-pack-test-password".toCharArray()))); Request request = new Request("PUT", "/index"); request.setOptions(options); - ResponseException e = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request)); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); } } public void testRunAs() throws IOException { - SecurityClient securityClient = new SecurityClient(client()); - securityClient.preparePutUser("custom_user", "x-pack-test-password".toCharArray(), Hasher.BCRYPT, "custom_superuser").get(); - securityClient.preparePutUser("custom_user2", "x-pack-test-password".toCharArray(), Hasher.BCRYPT, "custom_superuser").get(); - securityClient.preparePutUser("custom_user3", "x-pack-test-password".toCharArray(), Hasher.BCRYPT, "not_superuser").get(); + RestHighLevelClient restClient = new TestRestHighLevelClient(); + restClient.security().putUser(PutUserRequest.withPassword(new User("custom_user", List.of("custom_superuser")), + "x-pack-test-password".toCharArray(), true, RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); + restClient.security().putUser(PutUserRequest.withPassword(new User("custom_user2", List.of("custom_superuser")), + "x-pack-test-password".toCharArray(), true, RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); + restClient.security().putUser(PutUserRequest.withPassword(new User("custom_user3", List.of("not_superuser")), + "x-pack-test-password".toCharArray(), true, RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); { RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); @@ -132,7 +132,7 @@ public void testRunAs() throws IOException { options.addHeader("es-security-runas-user", "custom_user2"); Request request = new Request("GET", "/_security/_authenticate"); request.setOptions(options); - Response response = getRestClient().performRequest(request); + Response response = client().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); String responseStr = EntityUtils.toString(response.getEntity()); assertThat(responseStr, containsString("custom_user2")); @@ -145,7 +145,7 @@ public void testRunAs() throws IOException { options.addHeader("es-security-runas-user", "custom_user3"); Request request = new Request("PUT", "/index"); request.setOptions(options); - ResponseException e = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request)); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); } @@ -156,8 +156,14 @@ public void testRunAs() throws IOException { options.addHeader("es-security-runas-user", "custom_user2"); Request request = new Request("PUT", "/index"); request.setOptions(options); - ResponseException e = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request)); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); } } + + private class TestRestHighLevelClient extends RestHighLevelClient { + TestRestHighLevelClient() { + super(client(), restClient -> {}, Collections.emptyList()); + } + } } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index d338a4663a32a..fab08ab1c03f7 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -251,6 +251,7 @@ public class Node implements Closeable { private final Collection pluginLifecycleComponents; private final LocalNodeFactory localNodeFactory; private final NodeService nodeService; + final NamedWriteableRegistry namedWriteableRegistry; public Node(Environment environment) { this(environment, Collections.emptyList(), true); @@ -589,6 +590,7 @@ protected Node( this.pluginLifecycleComponents = Collections.unmodifiableList(pluginLifecycleComponents); client.initialize(injector.getInstance(new Key>() {}), () -> clusterService.localNode().getId(), transportService.getRemoteClusterService()); + this.namedWriteableRegistry = namedWriteableRegistry; logger.debug("initializing HTTP handlers ..."); actionModule.initRestHandlers(() -> clusterService.state().nodes()); diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java index 31b8ba01dc4a8..b43e438bc3210 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java +++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.MockInternalClusterInfoService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -179,4 +180,8 @@ protected HttpServerTransport newHttpTransport(NetworkModule networkModule) { protected void configureNodeAndClusterIdStateListener(ClusterService clusterService) { //do not configure this in tests as this is causing SetOnce to throw exceptions when jvm is used for multiple tests } + + public NamedWriteableRegistry getNamedWriteableRegistry() { + return namedWriteableRegistry; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index d45c83444b2fc..94a8e9b7728ce 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -289,6 +289,11 @@ public abstract class ESIntegTestCase extends ESTestCase { */ public static final String TESTS_CLUSTER = "tests.cluster"; + /** + * Key used to eventually switch to using an external cluster and provide the cluster name + */ + public static final String TESTS_CLUSTER_NAME = "tests.clustername"; + /** * Key used to retrieve the index random seed from the index settings on a running node. * The value of this seed can be used to initialize a random context for a specific index. @@ -1829,7 +1834,7 @@ protected Settings transportClientSettings() { return Settings.EMPTY; } - private ExternalTestCluster buildExternalCluster(String clusterAddresses) throws IOException { + private ExternalTestCluster buildExternalCluster(String clusterAddresses, String clusterName) throws IOException { String[] stringAddresses = clusterAddresses.split(","); TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; int i = 0; @@ -1838,7 +1843,8 @@ private ExternalTestCluster buildExternalCluster(String clusterAddresses) throws InetAddress inetAddress = InetAddress.getByName(url.getHost()); transportAddresses[i++] = new TransportAddress(new InetSocketAddress(inetAddress, url.getPort())); } - return new ExternalTestCluster(createTempDir(), externalClusterClientSettings(), transportClientPlugins(), transportAddresses); + return new ExternalTestCluster(createTempDir(), externalClusterClientSettings(), nodePlugins(), getClientWrapper(), clusterName, + transportAddresses); } protected Settings externalClusterClientSettings() { @@ -1855,7 +1861,11 @@ protected TestCluster buildTestCluster(Scope scope, long seed) throws IOExceptio if (scope == Scope.TEST) { throw new IllegalArgumentException("Cannot run TEST scope test with " + TESTS_CLUSTER); } - return buildExternalCluster(clusterAddresses); + String clusterName = System.getProperty(TESTS_CLUSTER_NAME); + if (Strings.isNullOrEmpty(clusterName)) { + throw new IllegalArgumentException("External test cluster name must be provided"); + } + return buildExternalCluster(clusterAddresses, clusterName); } final String nodePrefix; diff --git a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java index 74edfd3a46514..e77d143e50d99 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java @@ -21,6 +21,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; @@ -32,18 +33,23 @@ import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; +import org.elasticsearch.node.MockNode; +import org.elasticsearch.node.NodeValidationException; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.transport.MockTransportClient; import org.elasticsearch.transport.nio.MockNioTransportPlugin; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; -import java.util.Collections; +import java.util.List; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.stream.Collectors; import static org.elasticsearch.test.ESTestCase.getTestTransportType; import static org.hamcrest.Matchers.equalTo; @@ -61,7 +67,8 @@ public final class ExternalTestCluster extends TestCluster { private static final AtomicInteger counter = new AtomicInteger(); public static final String EXTERNAL_CLUSTER_PREFIX = "external_"; - private final MockTransportClient client; + private final MockNode node; + private final Client client; private final InetSocketAddress[] httpAddresses; @@ -71,13 +78,21 @@ public final class ExternalTestCluster extends TestCluster { private final int numMasterAndDataNodes; public ExternalTestCluster(Path tempDir, Settings additionalSettings, Collection> pluginClasses, - TransportAddress... transportAddresses) { + Function clientWrapper, String clusterName, TransportAddress... transportAddresses) { super(0); + this.clusterName = clusterName; Settings.Builder clientSettingsBuilder = Settings.builder() .put(additionalSettings) - .put("node.name", InternalTestCluster.TRANSPORT_CLIENT_PREFIX + EXTERNAL_CLUSTER_PREFIX + counter.getAndIncrement()) - .put("client.transport.ignore_cluster_name", true) - .put(Environment.PATH_HOME_SETTING.getKey(), tempDir); + .put("node.master", false) + .put("node.data", false) + .put("node.ingest", false) + .put("node.name", EXTERNAL_CLUSTER_PREFIX + counter.getAndIncrement()) + .put("cluster.name", clusterName) + .putList("discovery.seed_hosts", + Arrays.stream(transportAddresses).map(TransportAddress::toString).collect(Collectors.toList())); + if (Environment.PATH_HOME_SETTING.exists(additionalSettings) == false) { + clientSettingsBuilder.put(Environment.PATH_HOME_SETTING.getKey(), tempDir); + } boolean addMockTcpTransport = additionalSettings.get(NetworkModule.TRANSPORT_TYPE_KEY) == null; if (addMockTcpTransport) { @@ -88,13 +103,15 @@ public ExternalTestCluster(Path tempDir, Settings additionalSettings, Collection pluginClasses.add(MockNioTransportPlugin.class); } } + pluginClasses = new ArrayList<>(pluginClasses); + pluginClasses.add(MockHttpTransport.TestPlugin.class); Settings clientSettings = clientSettingsBuilder.build(); - MockTransportClient client = new MockTransportClient(clientSettings, pluginClasses); + MockNode node = new MockNode(clientSettings, pluginClasses); + Client client = clientWrapper.apply(node.client()); try { - client.addTransportAddresses(transportAddresses); + node.start(); NodesInfoResponse nodeInfos = client.admin().cluster().prepareNodesInfo().clear().setSettings(true).setHttp(true).get(); httpAddresses = new InetSocketAddress[nodeInfos.getNodes().size()]; - this.clusterName = nodeInfos.getClusterName().value(); int dataNodes = 0; int masterAndDataNodes = 0; for (int i = 0; i < nodeInfos.getNodes().size(); i++) { @@ -110,10 +127,22 @@ public ExternalTestCluster(Path tempDir, Settings additionalSettings, Collection this.numDataNodes = dataNodes; this.numMasterAndDataNodes = masterAndDataNodes; this.client = client; + this.node = node; logger.info("Setup ExternalTestCluster [{}] made of [{}] nodes", nodeInfos.getClusterName().value(), size()); + } catch (NodeValidationException e) { + try { + IOUtils.close(client, node); + } catch (IOException e1) { + e.addSuppressed(e1); + } + throw new ElasticsearchException(e); } catch (Exception e) { - client.close(); + try { + IOUtils.close(client, node); + } catch (IOException e1) { + e.addSuppressed(e1); + } throw e; } } @@ -150,7 +179,7 @@ public InetSocketAddress[] httpAddresses() { @Override public void close() throws IOException { - client.close(); + IOUtils.close(client, node); } @Override @@ -181,12 +210,12 @@ public void ensureEstimatedStats() { @Override public Iterable getClients() { - return Collections.singleton(client); + return List.of(client); } @Override public NamedWriteableRegistry getNamedWriteableRegistry() { - return client.getNamedWriteableRegistry(); + return node.getNamedWriteableRegistry(); } @Override diff --git a/x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc deleted file mode 100644 index a19532bdb67c5..0000000000000 --- a/x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc +++ /dev/null @@ -1,203 +0,0 @@ -[[java-clients]] -=== Java Client and security - -deprecated[7.0.0, The `TransportClient` is deprecated in favour of the {java-rest}/java-rest-high.html[Java High Level REST Client] and will be removed in Elasticsearch 8.0. The {java-rest}/java-rest-high-level-migration.html[migration guide] describes all the steps needed to migrate.] - -The {es} {security-features} support the Java http://www.elastic.co/guide/en/elasticsearch/client/java-api/current/transport-client.html[transport client] for Elasticsearch. -The transport client uses the same transport protocol that the cluster nodes use -for inter-node communication. It is very efficient as it does not have to marshall -and unmarshall JSON requests like a typical REST client. - -NOTE: Using the Java Node Client with secured clusters is not recommended or - supported. - -[float] -[[transport-client]] -==== Configuring the Transport Client to work with a Secured Cluster - -To use the transport client with a secured cluster, you need to: - -[[java-transport-client-role]] -. {ref}/setup-xpack-client.html[Configure the {xpack} transport client]. - -. Configure a user with the privileges required to start the transport client. -A default `transport_client` role is built-in to the {es} {security-features}, -which grants the -appropriate cluster permissions for the transport client to work with the secured -cluster. The transport client uses the _Nodes Info API_ to fetch information about -the nodes in the cluster. - -. Set up the transport client. At a minimum, you must configure `xpack.security.user` to -include the name and password of your transport client user in your requests. The -following snippet configures the user credentials globally--every request -submitted with this client includes the `transport_client_user` credentials in -its headers. -+ --- -[source,java] -------------------------------------------------------------------------------------------------- -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; -... - -TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() - .put("cluster.name", "myClusterName") - .put("xpack.security.user", "transport_client_user:x-pack-test-password") - ... - .build()) - .addTransportAddress(new TransportAddress("localhost", 9300)) - .addTransportAddress(new TransportAddress("localhost", 9301)); -------------------------------------------------------------------------------------------------- - -WARNING: If you configure a transport client without SSL, passwords are sent in - clear text. - -You can also add an `Authorization` header to each request. If you've configured -global authorization credentials, the `Authorization` header overrides the global -authentication credentials. This is useful when an application has multiple users -who access Elasticsearch using the same client. You can set the global token to -a user that only has the `transport_client` role, and add the `transport_client` -role to the individual users. - -For example, the following snippet adds the `Authorization` header to a search -request: - -[source,java] --------------------------------------------------------------------------------------------------- -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; - -import static UsernamePasswordToken.basicAuthHeaderValue; -... - -TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() - .put("cluster.name", "myClusterName") - .put("xpack.security.user", "transport_client_user:x-pack-test-password") - ... - .build()) - .build() - .addTransportAddress(new TransportAddress(InetAddress.getByName("localhost"), 9300)) - .addTransportAddress(new TransportAddress(InetAddress.getByName("localhost"), 9301)) - -String token = basicAuthHeaderValue("test_user", new SecureString("x-pack-test-password".toCharArray())); - -client.filterWithHeader(Collections.singletonMap("Authorization", token)) - .prepareSearch().get(); --------------------------------------------------------------------------------------------------- --- - -. Enable SSL to authenticate clients and encrypt communications. To enable SSL, -you need to: - -.. Configure the paths to the client's key and certificate in addition to the certificate authorities. -Client authentication requires every client to have a certification signed by a trusted CA. -+ --- -NOTE: Client authentication is enabled by default. For information about - disabling client authentication, see <>. - -[source,java] --------------------------------------------------------------------------------------------------- -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; -... - -TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() - .put("cluster.name", "myClusterName") - .put("xpack.security.user", "transport_client_user:x-pack-test-password") - .put("xpack.security.transport.ssl.enabled", true) - .put("xpack.security.transport.ssl.key", "/path/to/client.key") - .put("xpack.security.transport.ssl.certificate", "/path/to/client.crt") - .put("xpack.security.transport.ssl.certificate_authorities", "/path/to/ca.crt") - ... - .build()); --------------------------------------------------------------------------------------------------- --- - -.. Enable the SSL transport by setting `xpack.security.transport.ssl.enabled` to `true` in the -client configuration. -+ --- -[source,java] --------------------------------------------------------------------------------------------------- -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; -... - -TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() - .put("cluster.name", "myClusterName") - .put("xpack.security.user", "transport_client_user:x-pack-test-password") - .put("xpack.security.transport.ssl.enabled", true) - .put("xpack.security.transport.ssl.key", "/path/to/client.key") - .put("xpack.security.transport.ssl.certificate", "/path/to/client.crt") - .put("xpack.security.transport.ssl.certificate_authorities", "/path/to/ca.crt") - .put("xpack.security.transport.ssl.enabled", "true") - ... - .build()) - .addTransportAddress(new TransportAddress(InetAddress.getByName("localhost"), 9300)) - .addTransportAddress(new TransportAddress(InetAddress.getByName("localhost"), 9301)) --------------------------------------------------------------------------------------------------- --- - -[float] -[[disabling-client-auth]] -===== Disabling client authentication - -If you want to disable client authentication, you can use a client-specific -transport protocol. For more information see <>. - -If you are not using client authentication and sign the Elasticsearch node -certificates with your own CA, you need to provide the path to the CA -certificate in your client configuration. - -[source,java] ------------------------------------------------------------------------------------------------------- -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; -... - -TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() - .put("cluster.name", "myClusterName") - .put("xpack.security.user", "test_user:x-pack-test-password") - .put("xpack.security.transport.ssl.certificate_authorities", "/path/to/ca.crt") - .put("xpack.security.transport.ssl.enabled", "true") - ... - .build()) - .addTransportAddress(new TransportAddress("localhost", 9300)) - .addTransportAddress(new TransportAddress("localhost", 9301)); ------------------------------------------------------------------------------------------------------- - -NOTE: If you are using a public CA that is already trusted by the Java runtime, - you do not need to set the `xpack.security.transport.ssl.certificate_authorities`. - -[float] -[[connecting-anonymously]] -===== Connecting anonymously - -To enable the transport client to connect anonymously, you must assign the -anonymous user the privileges defined in the <> -role. Anonymous access must also be enabled, of course. For more information, -see <>. - -[float] -[[security-client]] -==== Security client - -The {stack} {security-features} expose an API through the `SecurityClient` class. -To get a hold of a `SecurityClient` you first need to create the `XPackClient`, -which is a wrapper around the existing {es} clients (any client class implementing -`org.elasticsearch.client.Client`). - -The following example shows how you can clear the realm caches using -the `SecurityClient`: - -[source,java] ------------------------------------------------------------------------------------------------------- -Client client = ... // create the transport client - -XPackClient xpackClient = new XPackClient(client); -SecurityClient securityClient = xpackClient.security(); -ClearRealmCacheResponse response = securityClient.authc().prepareClearRealmCache() - .realms("ldap1", "ad1") <1> - .usernames("rdeniro") - .get(); ------------------------------------------------------------------------------------------------------- -<1> Clears the `ldap1` and `ad1` realm caches for the `rdeniro` user. diff --git a/x-pack/docs/en/watcher/java.asciidoc b/x-pack/docs/en/watcher/java.asciidoc deleted file mode 100644 index 7224196834f9b..0000000000000 --- a/x-pack/docs/en/watcher/java.asciidoc +++ /dev/null @@ -1,130 +0,0 @@ -[[api-java]] -== Java API - -deprecated[7.0.0, The `TransportClient` is deprecated in favour of the {java-rest}/java-rest-high.html[Java High Level REST Client] and will be removed in Elasticsearch 8.0. The {java-rest}/java-rest-high-level-migration.html[migration guide] describes all the steps needed to migrate.] - -{xpack} provides a Java client called `WatcherClient` that adds native Java -support for the {watcher}. - -To obtain a `WatcherClient` instance, make sure you first set up the -`XPackClient`. - -[float] -=== Installing XPackClient - -You first need to make sure the +x-pack-transport-{version}+ JAR file is in the classpath. -You can extract this jar from the downloaded {xpack} bundle. - -If you use Maven to manage dependencies, add the following to the `pom.xml`: - -["source","xml",subs="attributes,callouts"] --------------------------------------------------- - - - - - - elasticsearch-releases - https://artifacts.elastic.co/maven - - true - - - false - - - ... - - ... - - - - - org.elasticsearch.client - x-pack-transport - {version} - - ... - - ... - - --------------------------------------------------- - -If you use Gradle, add the dependencies to `build.gradle`: - -["source","groovy",subs="attributes,callouts"] --------------------------------------------------------------- -repositories { - /* ... Any other repositories ... */ - - // Add the Elasticsearch Maven Repository - maven { - name "elastic" - url "https://artifacts.elastic.co/maven" - } -} - -dependencies { - // Provide the x-pack jar on the classpath for compilation and at runtime - compile "org.elasticsearch.client:x-pack-transport:{version}" - - /* ... */ -} --------------------------------------------------------------- - -You can also download the https://artifacts.elastic.co/maven/org/elasticsearch/client/x-pack-transport/{version}/x-pack-transport-{version}.jar[X-Pack Transport JAR] -manually, directly from our Maven repository. - -[float] -=== Obtaining the `WatcherClient` - -To obtain an instance of the `WatcherClient` you first need to create the -`XPackClient`. The `XPackClient` is a wrapper around the standard Java -Elasticsearch `Client`: - -[source,java] --------------------------------------------------- -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.XPackPlugin; -import org.elasticsearch.core.watcher.client.WatcherClient; -... - -TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() - .put("cluster.name", "myClusterName") - ... - .build()) - .addTransportAddress(new TransportAddress(InetAddress.getByName("localhost"), 9300)); - -XPackClient xpackClient = new XPackClient(client); -WatcherClient watcherClient = xpackClient.watcher(); --------------------------------------------------- - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/put-watch.asciidoc -include::java/put-watch.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/get-watch.asciidoc -include::java/get-watch.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/delete-watch.asciidoc -include::java/delete-watch.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/execute-watch.asciidoc -include::java/execute-watch.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/ack-watch.asciidoc -include::java/ack-watch.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/activate-watch.asciidoc -include::java/activate-watch.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/deactivate-watch.asciidoc -include::java/deactivate-watch.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/stats.asciidoc -include::java/stats.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/service.asciidoc -include::java/service.asciidoc[] diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index 3eda554a84bd4..c74e39f017a3c 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -133,7 +133,6 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E private final SetOnce restoreSourceService = new SetOnce<>(); private final SetOnce ccrSettings = new SetOnce<>(); private Client client; - private final boolean transportClientMode; /** * Construct an instance of the CCR container with the specified settings. @@ -155,7 +154,6 @@ public Ccr(final Settings settings) { this.settings = settings; this.enabled = CCR_ENABLED_SETTING.get(settings); this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); - this.transportClientMode = XPackPlugin.transportClientMode(settings); } @Override @@ -340,10 +338,6 @@ public void onIndexModule(IndexModule indexModule) { @Override public Collection createGuiceModules() { - if (transportClientMode) { - return Collections.emptyList(); - } - return Collections.singleton(b -> XPackPlugin.bindFeatureSet(b, CCRFeatureSet.class)); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index a145569898ee6..6b457ae2fda9e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.ClusterState; @@ -13,12 +12,9 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.license.DeleteLicenseAction; import org.elasticsearch.license.GetBasicStatusAction; import org.elasticsearch.license.GetLicenseAction; @@ -34,8 +30,6 @@ import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.Transport; import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction; import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackUsageAction; @@ -147,8 +141,6 @@ import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus; import org.elasticsearch.xpack.core.security.SecurityFeatureSetUsage; -import org.elasticsearch.xpack.core.security.SecurityField; -import org.elasticsearch.xpack.core.security.SecuritySettings; import org.elasticsearch.xpack.core.security.action.CreateApiKeyAction; import org.elasticsearch.xpack.core.security.action.GetApiKeyAction; import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyAction; @@ -178,9 +170,7 @@ import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivileges; -import org.elasticsearch.xpack.core.security.transport.netty4.SecurityNetty4Transport; import org.elasticsearch.xpack.core.sql.SqlFeatureSetUsage; -import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.action.GetCertificateInfoAction; import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction; import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction; @@ -197,12 +187,10 @@ import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Optional; -import java.util.function.Supplier; +// TODO: merge this into XPackPlugin public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPlugin { static Optional X_PACK_FEATURE = Optional.of("x-pack"); @@ -235,22 +223,6 @@ public List> getSettings() { return settings; } - @Override - public Settings additionalSettings() { - return additionalSettings(settings, XPackSettings.SECURITY_ENABLED.get(settings), XPackPlugin.transportClientMode(settings)); - } - - static Settings additionalSettings(final Settings settings, final boolean enabled, final boolean transportClientMode) { - if (enabled && transportClientMode) { - return Settings.builder() - .put(SecuritySettings.addTransportSettings(settings)) - .put(SecuritySettings.addUserSettings(settings)) - .build(); - } else { - return Settings.EMPTY; - } - } - @Override public List> getClientActions() { return Arrays.asList( @@ -505,27 +477,4 @@ public List getNamedXContent() { DataFrameTransformState::fromXContent) ); } - - @Override - public Map> getTransports( - final Settings settings, - final ThreadPool threadPool, - final PageCacheRecycler pageCacheRecycler, - final CircuitBreakerService circuitBreakerService, - final NamedWriteableRegistry namedWriteableRegistry, - final NetworkService networkService) { - // this should only be used in the transport layer, so do not add it if it is not in transport mode or we are disabled - if (XPackPlugin.transportClientMode(settings) == false || XPackSettings.SECURITY_ENABLED.get(settings) == false) { - return Collections.emptyMap(); - } - final SSLService sslService; - try { - sslService = new SSLService(settings, null); - } catch (Exception e) { - throw new RuntimeException(e); - } - return Collections.singletonMap(SecurityField.NAME4, () -> new SecurityNetty4Transport(settings, Version.CURRENT, threadPool, - networkService, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, sslService)); - } - } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index 2038b35b4e6e0..ababc3c21289a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.inject.Binder; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.inject.multibindings.Multibinder; -import org.elasticsearch.common.inject.util.Providers; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.ClusterSettings; @@ -125,7 +124,6 @@ public Void run() { protected final Settings settings; //private final Environment env; - protected boolean transportClientMode; protected final Licensing licensing; // These should not be directly accessed as they cannot be overridden in tests. Please use the getters so they can be overridden. private static final SetOnce licenseState = new SetOnce<>(); @@ -137,8 +135,7 @@ public XPackPlugin( final Path configPath) { super(settings); this.settings = settings; - this.transportClientMode = transportClientMode(settings); - Environment env = transportClientMode ? null : new Environment(settings, configPath); + Environment env = new Environment(settings, configPath); setSslService(new SSLService(settings, env)); setLicenseState(new XPackLicenseState(settings)); @@ -222,12 +219,7 @@ public Settings additionalSettings() { if (settings.get(xpackInstalledNodeAttrSetting) != null) { throw new IllegalArgumentException("Directly setting [" + xpackInstalledNodeAttrSetting + "] is not permitted"); } - - if (transportClientMode) { - return super.additionalSettings(); - } else { - return Settings.builder().put(super.additionalSettings()).put(xpackInstalledNodeAttrSetting, "true").build(); - } + return Settings.builder().put(super.additionalSettings()).put(xpackInstalledNodeAttrSetting, "true").build(); } @Override @@ -236,10 +228,6 @@ public Collection createGuiceModules() { //modules.add(b -> b.bind(Clock.class).toInstance(getClock())); // used to get core up and running, we do not bind the actual feature set here modules.add(b -> XPackPlugin.createFeatureSetMultiBinder(b, EmptyXPackFeatureSet.class)); - - if (transportClientMode) { - modules.add(b -> b.bind(XPackLicenseState.class).toProvider(Providers.of(null))); - } return modules; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java index 9696ca6e7fde7..2d4991d514027 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java @@ -22,6 +22,7 @@ import java.util.Collection; import java.util.concurrent.CountDownLatch; +@ESIntegTestCase.ClusterScope(transportClientRatio = 0.0) public abstract class AbstractLicensesIntegrationTestCase extends ESIntegTestCase { @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java index 1b7d889d7262a..1f09f959883f3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java @@ -25,7 +25,7 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; -@ESIntegTestCase.ClusterScope(scope = SUITE) +@ESIntegTestCase.ClusterScope(scope = SUITE, transportClientRatio = 0.0) public class StartBasicLicenseTests extends AbstractLicensesIntegrationTestCase { @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java index ca1c361a5b99f..eac145dd0ffa8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java @@ -24,7 +24,7 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; -@ESIntegTestCase.ClusterScope(scope = SUITE) +@ESIntegTestCase.ClusterScope(scope = SUITE, transportClientRatio = 0.0) public class StartTrialLicenseTests extends AbstractLicensesIntegrationTestCase { @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java index 81be978d33103..b03f51d1d195b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java @@ -54,7 +54,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -@ESIntegTestCase.ClusterScope(numDataNodes = 0) +@ESIntegTestCase.ClusterScope(numDataNodes = 0, transportClientRatio = 0.0) public class SourceOnlySnapshotIT extends ESIntegTestCase { @Override diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java index 3a6ab2e5b71d2..122cd570ab108 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java @@ -7,16 +7,36 @@ package org.elasticsearch.xpack.dataframe.integration; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.core.AcknowledgedResponse; +import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsResponse; +import org.elasticsearch.client.dataframe.PutDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.StartDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.StartDataFrameTransformResponse; +import org.elasticsearch.client.dataframe.StopDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.StopDataFrameTransformResponse; +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.dataframe.transforms.DestConfig; +import org.elasticsearch.client.dataframe.transforms.QueryConfig; +import org.elasticsearch.client.dataframe.transforms.SourceConfig; +import org.elasticsearch.client.dataframe.transforms.pivot.AggregationConfig; +import org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource; +import org.elasticsearch.client.dataframe.transforms.pivot.GroupConfig; +import org.elasticsearch.client.dataframe.transforms.pivot.PivotConfig; +import org.elasticsearch.client.dataframe.transforms.pivot.SingleGroupSource; +import org.elasticsearch.client.indices.CreateIndexRequest; +import org.elasticsearch.client.indices.CreateIndexResponse; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; @@ -26,36 +46,15 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.SecuritySettingsSourceField; -import org.elasticsearch.transport.Netty4Plugin; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.XPackClientPlugin; -import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction; -import org.elasticsearch.xpack.core.dataframe.action.PutDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DestConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.QueryConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.SourceConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.AggregationConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.DateHistogramGroupSource; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.GroupConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.SingleGroupSource; -import org.elasticsearch.xpack.core.security.SecurityField; - -import java.net.URISyntaxException; -import java.nio.file.Path; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.time.ZoneId; -import java.util.Arrays; -import java.util.Collection; +import java.util.Base64; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -64,18 +63,18 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.core.Is.is; -abstract class DataFrameIntegTestCase extends ESIntegTestCase { +abstract class DataFrameIntegTestCase extends ESRestTestCase { protected static final String REVIEWS_INDEX_NAME = "data_frame_reviews"; private Map transformConfigs = new HashMap<>(); - protected void cleanUp() { + protected void cleanUp() throws IOException { cleanUpTransforms(); waitForPendingTasks(); } - protected void cleanUpTransforms() { + protected void cleanUpTransforms() throws IOException { for (DataFrameTransformConfig config : transformConfigs.values()) { stopDataFrameTransform(config.getId()); deleteDataFrameTransform(config.getId()); @@ -83,41 +82,42 @@ protected void cleanUpTransforms() { transformConfigs.clear(); } - protected StopDataFrameTransformAction.Response stopDataFrameTransform(String id) { - return client().execute(StopDataFrameTransformAction.INSTANCE, - new StopDataFrameTransformAction.Request(id, true, false, null)).actionGet(); + protected StopDataFrameTransformResponse stopDataFrameTransform(String id) throws IOException { + RestHighLevelClient restClient = new TestRestHighLevelClient(); + return restClient.dataFrame().stopDataFrameTransform(new StopDataFrameTransformRequest(id, true, null), RequestOptions.DEFAULT); } - protected StartDataFrameTransformAction.Response startDataFrameTransform(String id) { - return client().execute(StartDataFrameTransformAction.INSTANCE, - new StartDataFrameTransformAction.Request(id, false)).actionGet(); + protected StartDataFrameTransformResponse startDataFrameTransform(String id, RequestOptions options) throws IOException { + RestHighLevelClient restClient = new TestRestHighLevelClient(); + return restClient.dataFrame().startDataFrameTransform(new StartDataFrameTransformRequest(id), options); } - protected AcknowledgedResponse deleteDataFrameTransform(String id) { - AcknowledgedResponse response = client().execute(DeleteDataFrameTransformAction.INSTANCE, - new DeleteDataFrameTransformAction.Request(id)) - .actionGet(); + protected AcknowledgedResponse deleteDataFrameTransform(String id) throws IOException { + RestHighLevelClient restClient = new TestRestHighLevelClient(); + AcknowledgedResponse response = + restClient.dataFrame().deleteDataFrameTransform(new DeleteDataFrameTransformRequest(id), RequestOptions.DEFAULT); if (response.isAcknowledged()) { transformConfigs.remove(id); } return response; } - protected AcknowledgedResponse putDataFrameTransform(DataFrameTransformConfig config) { + protected AcknowledgedResponse putDataFrameTransform(DataFrameTransformConfig config, RequestOptions options) throws IOException { if (transformConfigs.keySet().contains(config.getId())) { throw new IllegalArgumentException("data frame transform [" + config.getId() + "] is already registered"); } - AcknowledgedResponse response = client().execute(PutDataFrameTransformAction.INSTANCE, - new PutDataFrameTransformAction.Request(config)) - .actionGet(); + RestHighLevelClient restClient = new TestRestHighLevelClient(); + AcknowledgedResponse response = + restClient.dataFrame().putDataFrameTransform(new PutDataFrameTransformRequest(config), options); if (response.isAcknowledged()) { transformConfigs.put(config.getId(), config); } return response; } - protected GetDataFrameTransformsStatsAction.Response getDataFrameTransformStats(String id) { - return client().execute(GetDataFrameTransformsStatsAction.INSTANCE, new GetDataFrameTransformsStatsAction.Request(id)).actionGet(); + protected GetDataFrameTransformStatsResponse getDataFrameTransformStats(String id) throws IOException { + RestHighLevelClient restClient = new TestRestHighLevelClient(); + return restClient.dataFrame().getDataFrameTransformStats(new GetDataFrameTransformStatsRequest(id), RequestOptions.DEFAULT); } protected void waitUntilCheckpoint(String id, long checkpoint) throws Exception { @@ -136,38 +136,40 @@ protected void waitUntilCheckpoint(String id, long checkpoint, TimeValue waitTim } protected DateHistogramGroupSource createDateHistogramGroupSource(String field, long interval, ZoneId zone, String format) { - DateHistogramGroupSource source = new DateHistogramGroupSource(field); - source.setFormat(format); - source.setInterval(interval); - source.setTimeZone(zone); - return source; + DateHistogramGroupSource.Builder builder = DateHistogramGroupSource.builder() + .setField(field) + .setFormat(format) + .setInterval(interval) + .setTimeZone(zone); + return builder.build(); } protected DateHistogramGroupSource createDateHistogramGroupSource(String field, DateHistogramInterval interval, ZoneId zone, String format) { - DateHistogramGroupSource source = new DateHistogramGroupSource(field); - source.setFormat(format); - source.setDateHistogramInterval(interval); - source.setTimeZone(zone); - return source; + DateHistogramGroupSource.Builder builder = DateHistogramGroupSource.builder() + .setField(field) + .setFormat(format) + .setDateHistgramInterval(interval) + .setTimeZone(zone); + return builder.build(); } protected GroupConfig createGroupConfig(Map groups) throws Exception { - Map lazyParsed = new HashMap<>(groups.size()); - for(Map.Entry sgs : groups.entrySet()) { - lazyParsed.put(sgs.getKey(), Collections.singletonMap(sgs.getValue().getType().value(), toLazy(sgs.getValue()))); + GroupConfig.Builder builder = GroupConfig.builder(); + for (Map.Entry sgs : groups.entrySet()) { + builder.groupBy(sgs.getKey(), sgs.getValue()); } - return new GroupConfig(lazyParsed, groups); + return builder.build(); } protected QueryConfig createQueryConfig(QueryBuilder queryBuilder) throws Exception { - return new QueryConfig(toLazy(queryBuilder), queryBuilder); + return new QueryConfig(queryBuilder); } protected AggregationConfig createAggConfig(AggregatorFactories.Builder aggregations) throws Exception { - return new AggregationConfig(toLazy(aggregations), aggregations); + return new AggregationConfig(aggregations); } protected PivotConfig createPivotConfig(Map groups, @@ -178,7 +180,11 @@ protected PivotConfig createPivotConfig(Map groups, protected PivotConfig createPivotConfig(Map groups, AggregatorFactories.Builder aggregations, Integer size) throws Exception { - return new PivotConfig(createGroupConfig(groups), createAggConfig(aggregations), size); + PivotConfig.Builder builder = PivotConfig.builder() + .setGroups(createGroupConfig(groups)) + .setAggregationConfig(createAggConfig(aggregations)) + .setMaxPageSearchSize(size); + return builder.build(); } protected DataFrameTransformConfig createTransformConfig(String id, @@ -195,16 +201,18 @@ protected DataFrameTransformConfig createTransformConfig(String id, String destinationIndex, QueryBuilder queryBuilder, String... sourceIndices) throws Exception { - return new DataFrameTransformConfig(id, - new SourceConfig(sourceIndices, createQueryConfig(queryBuilder)), - new DestConfig(destinationIndex), - Collections.emptyMap(), - createPivotConfig(groups, aggregations), - "Test data frame transform config id: " + id); + return DataFrameTransformConfig.builder() + .setId(id) + .setSource(SourceConfig.builder().setIndex(sourceIndices).setQueryConfig(createQueryConfig(queryBuilder)).build()) + .setDest(new DestConfig(destinationIndex)) + .setPivotConfig(createPivotConfig(groups, aggregations)) + .setDescription("Test data frame transform config id: " + id) + .build(); } protected void createReviewsIndex() throws Exception { final int numDocs = 1000; + RestHighLevelClient restClient = new TestRestHighLevelClient(); // create mapping try (XContentBuilder builder = jsonBuilder()) { @@ -229,16 +237,13 @@ protected void createReviewsIndex() throws Exception { .endObject(); } builder.endObject(); - CreateIndexResponse response = client().admin() - .indices() - .prepareCreate(REVIEWS_INDEX_NAME) - .addMapping("_doc", builder) - .get(); + CreateIndexResponse response = + restClient.indices().create(new CreateIndexRequest(REVIEWS_INDEX_NAME).mapping(builder), RequestOptions.DEFAULT); assertThat(response.isAcknowledged(), is(true)); } // create index - BulkRequestBuilder bulk = client().prepareBulk(REVIEWS_INDEX_NAME, "_doc"); + BulkRequest bulk = new BulkRequest(REVIEWS_INDEX_NAME); int day = 10; for (int i = 0; i < numDocs; i++) { long user = i % 28; @@ -267,15 +272,15 @@ protected void createReviewsIndex() throws Exception { bulk.add(new IndexRequest().source(sourceBuilder.toString(), XContentType.JSON)); if (i % 50 == 0) { - BulkResponse response = client().bulk(bulk.request()).get(); + BulkResponse response = restClient.bulk(bulk, RequestOptions.DEFAULT); assertThat(response.buildFailureMessage(), response.hasFailures(), is(false)); - bulk = client().prepareBulk(REVIEWS_INDEX_NAME, "_doc"); + bulk = new BulkRequest(REVIEWS_INDEX_NAME); day += 1; } } - BulkResponse response = client().bulk(bulk.request()).get(); + BulkResponse response = restClient.bulk(bulk, RequestOptions.DEFAULT); assertThat(response.buildFailureMessage(), response.hasFailures(), is(false)); - client().admin().indices().prepareRefresh(REVIEWS_INDEX_NAME).get(); + restClient.indices().refresh(new RefreshRequest(REVIEWS_INDEX_NAME), RequestOptions.DEFAULT); } protected Map toLazy(ToXContent parsedObject) throws Exception { @@ -293,8 +298,9 @@ private void waitForPendingTasks() { listTasksRequest.setWaitForCompletion(true); listTasksRequest.setDetailed(true); listTasksRequest.setTimeout(TimeValue.timeValueSeconds(10)); + RestHighLevelClient restClient = new TestRestHighLevelClient(); try { - admin().cluster().listTasks(listTasksRequest).get(); + restClient.tasks().list(listTasksRequest, RequestOptions.DEFAULT); } catch (Exception e) { throw new AssertionError("Failed to wait for pending tasks to complete", e); } @@ -307,33 +313,17 @@ protected NamedXContentRegistry xContentRegistry() { } @Override - protected Settings externalClusterClientSettings() { - Path key; - Path certificate; - try { - key = PathUtils.get(getClass().getResource("/testnode.pem").toURI()); - certificate = PathUtils.get(getClass().getResource("/testnode.crt").toURI()); - } catch (URISyntaxException e) { - throw new IllegalStateException("error trying to get keystore path", e); - } - Settings.Builder builder = Settings.builder(); - builder.put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4); - builder.put(SecurityField.USER_SETTING.getKey(), "x_pack_rest_user:" + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); - builder.put("xpack.security.transport.ssl.enabled", true); - builder.put("xpack.security.transport.ssl.key", key.toAbsolutePath().toString()); - builder.put("xpack.security.transport.ssl.certificate", certificate.toAbsolutePath().toString()); - builder.put("xpack.security.transport.ssl.key_passphrase", "testnode"); - builder.put("xpack.security.transport.ssl.verification_mode", "certificate"); - return builder.build(); - } - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, Netty4Plugin.class); + protected Settings restClientSettings() { + final String token = "Basic " + + Base64.getEncoder().encodeToString(("x_pack_rest_user:x-pack-test-password").getBytes(StandardCharsets.UTF_8)); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(XPackClientPlugin.class, Netty4Plugin.class); + private class TestRestHighLevelClient extends RestHighLevelClient { + TestRestHighLevelClient() { + super(client(), restClient -> {}, Collections.emptyList()); + } } } diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java index cc2e8c4436e06..363218d1b0f14 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java @@ -6,16 +6,18 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.core.IndexerState; +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformStateAndStats; +import org.elasticsearch.client.dataframe.transforms.pivot.SingleGroupSource; +import org.elasticsearch.client.dataframe.transforms.pivot.TermsGroupSource; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.SingleGroupSource; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.TermsGroupSource; -import org.elasticsearch.xpack.core.indexing.IndexerState; import org.junit.After; +import java.io.IOException; import java.util.HashMap; import java.util.Map; @@ -24,7 +26,7 @@ public class DataFrameTransformIT extends DataFrameIntegTestCase { @After - public void cleanTransforms() { + public void cleanTransforms() throws IOException { cleanUp(); } @@ -34,8 +36,8 @@ public void testDataFrameTransformCrud() throws Exception { Map groups = new HashMap<>(); groups.put("by-day", createDateHistogramGroupSource("timestamp", DateHistogramInterval.DAY, null, null)); - groups.put("by-user", new TermsGroupSource("user_id")); - groups.put("by-business", new TermsGroupSource("business_id")); + groups.put("by-user", TermsGroupSource.builder().setField("user_id").build()); + groups.put("by-business", TermsGroupSource.builder().setField("business_id").build()); AggregatorFactories.Builder aggs = AggregatorFactories.builder() .addAggregator(AggregationBuilders.avg("review_score").field("stars")) @@ -47,8 +49,10 @@ public void testDataFrameTransformCrud() throws Exception { "reviews-by-user-business-day", REVIEWS_INDEX_NAME); - assertTrue(putDataFrameTransform(config).isAcknowledged()); - assertTrue(startDataFrameTransform(config.getId()).isStarted()); + final RequestOptions options = + expectWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); + assertTrue(putDataFrameTransform(config, options).isAcknowledged()); + assertTrue(startDataFrameTransform(config.getId(), options).isStarted()); waitUntilCheckpoint(config.getId(), 1L); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java index 7d0fb179a2228..d6ef3cc641be2 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java @@ -7,24 +7,23 @@ package org.elasticsearch.xpack.dataframe.integration; import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.indices.CreateIndexRequest; +import org.elasticsearch.client.indices.CreateIndexResponse; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.SecuritySettingsSourceField; -import org.elasticsearch.transport.Netty4Plugin; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.XPackClientPlugin; +import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; import org.elasticsearch.xpack.core.dataframe.transforms.DestConfig; @@ -34,11 +33,10 @@ import org.elasticsearch.xpack.core.dataframe.transforms.pivot.GroupConfig; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.HistogramGroupSource; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfig; -import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.dataframe.transforms.TransformProgressGatherer; -import java.util.Arrays; -import java.util.Collection; +import java.nio.charset.StandardCharsets; +import java.util.Base64; import java.util.Collections; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -47,10 +45,11 @@ import static org.hamcrest.Matchers.is; @LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") -public class DataFrameTransformProgressIT extends ESIntegTestCase { +public class DataFrameTransformProgressIT extends ESRestTestCase { protected void createReviewsIndex() throws Exception { final int numDocs = 1000; + final RestHighLevelClient restClient = new TestRestHighLevelClient(); // create mapping try (XContentBuilder builder = jsonBuilder()) { @@ -75,16 +74,13 @@ protected void createReviewsIndex() throws Exception { .endObject(); } builder.endObject(); - CreateIndexResponse response = client().admin() - .indices() - .prepareCreate(REVIEWS_INDEX_NAME) - .addMapping("_doc", builder) - .get(); + CreateIndexResponse response = restClient.indices() + .create(new CreateIndexRequest(REVIEWS_INDEX_NAME).mapping(builder), RequestOptions.DEFAULT); assertThat(response.isAcknowledged(), is(true)); } // create index - BulkRequestBuilder bulk = client().prepareBulk(REVIEWS_INDEX_NAME, "_doc"); + BulkRequest bulk = new BulkRequest(REVIEWS_INDEX_NAME); int day = 10; for (int i = 0; i < numDocs; i++) { long user = i % 28; @@ -113,14 +109,14 @@ protected void createReviewsIndex() throws Exception { bulk.add(new IndexRequest().source(sourceBuilder.toString(), XContentType.JSON)); if (i % 50 == 0) { - BulkResponse response = client().bulk(bulk.request()).get(); + BulkResponse response = restClient.bulk(bulk, RequestOptions.DEFAULT); assertThat(response.buildFailureMessage(), response.hasFailures(), is(false)); - bulk = client().prepareBulk(REVIEWS_INDEX_NAME, "_doc"); + bulk = new BulkRequest(REVIEWS_INDEX_NAME); day += 1; } } - client().bulk(bulk.request()).get(); - client().admin().indices().prepareRefresh(REVIEWS_INDEX_NAME).get(); + restClient.bulk(bulk, RequestOptions.DEFAULT); + restClient.indices().refresh(new RefreshRequest(REVIEWS_INDEX_NAME), RequestOptions.DEFAULT); } public void testGetProgress() throws Exception { @@ -140,10 +136,11 @@ public void testGetProgress() throws Exception { pivotConfig, null); - PlainActionFuture progressFuture = new PlainActionFuture<>(); - TransformProgressGatherer.getInitialProgress(client(), config, progressFuture); + final RestHighLevelClient restClient = new TestRestHighLevelClient(); + SearchResponse response = restClient.search(TransformProgressGatherer.getSearchRequest(config), RequestOptions.DEFAULT); - DataFrameTransformProgress progress = progressFuture.get(); + DataFrameTransformProgress progress = + TransformProgressGatherer.searchResponseToDataFrameTransformProgressFunction().apply(response); assertThat(progress.getTotalDocs(), equalTo(1000L)); assertThat(progress.getRemainingDocs(), equalTo(1000L)); @@ -160,34 +157,28 @@ public void testGetProgress() throws Exception { pivotConfig, null); - - progressFuture = new PlainActionFuture<>(); - - TransformProgressGatherer.getInitialProgress(client(), config, progressFuture); - progress = progressFuture.get(); + response = restClient.search(TransformProgressGatherer.getSearchRequest(config), RequestOptions.DEFAULT); + progress = TransformProgressGatherer.searchResponseToDataFrameTransformProgressFunction().apply(response); assertThat(progress.getTotalDocs(), equalTo(35L)); assertThat(progress.getRemainingDocs(), equalTo(35L)); assertThat(progress.getPercentComplete(), equalTo(0.0)); - client().admin().indices().prepareDelete(REVIEWS_INDEX_NAME).get(); + deleteIndex(REVIEWS_INDEX_NAME); } @Override - protected Settings externalClusterClientSettings() { - Settings.Builder builder = Settings.builder(); - builder.put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4); - builder.put(SecurityField.USER_SETTING.getKey(), "x_pack_rest_user:" + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); - return builder.build(); + protected Settings restClientSettings() { + final String token = "Basic " + + Base64.getEncoder().encodeToString(("x_pack_rest_user:x-pack-test-password").getBytes(StandardCharsets.UTF_8)); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); } - @Override - protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, Netty4Plugin.class); - } - - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(XPackClientPlugin.class, Netty4Plugin.class); + private class TestRestHighLevelClient extends RestHighLevelClient { + TestRestHighLevelClient() { + super(client(), restClient -> {}, Collections.emptyList()); + } } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java index 34343e5fe8820..e8206311c012b 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java @@ -92,7 +92,6 @@ public class DataFrame extends Plugin implements ActionPlugin, PersistentTaskPlu private final boolean enabled; private final Settings settings; - private final boolean transportClientMode; private final SetOnce dataFrameTransformsConfigManager = new SetOnce<>(); private final SetOnce dataFrameAuditor = new SetOnce<>(); private final SetOnce dataFrameTransformsCheckpointService = new SetOnce<>(); @@ -100,19 +99,12 @@ public class DataFrame extends Plugin implements ActionPlugin, PersistentTaskPlu public DataFrame(Settings settings) { this.settings = settings; - this.enabled = XPackSettings.DATA_FRAME_ENABLED.get(settings); - this.transportClientMode = XPackPlugin.transportClientMode(settings); } @Override public Collection createGuiceModules() { List modules = new ArrayList<>(); - - if (transportClientMode) { - return modules; - } - modules.add(b -> XPackPlugin.bindFeatureSet(b, DataFrameFeatureSet.class)); return modules; } @@ -159,7 +151,7 @@ public List getRestHandlers(final Settings settings, final RestCont @Override public List> getExecutorBuilders(Settings settings) { - if (false == enabled || transportClientMode) { + if (false == enabled) { return emptyList(); } @@ -173,7 +165,7 @@ public List> getExecutorBuilders(Settings settings) { public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, ResourceWatcherService resourceWatcherService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, Environment environment, NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { - if (enabled == false || transportClientMode) { + if (enabled == false) { return emptyList(); } dataFrameAuditor.set(new DataFrameAuditor(client, clusterService.getNodeName())); @@ -203,7 +195,7 @@ public UnaryOperator> getIndexTemplateMetaDat @Override public List> getPersistentTasksExecutor(ClusterService clusterService, ThreadPool threadPool, Client client, SettingsModule settingsModule) { - if (enabled == false || transportClientMode) { + if (enabled == false) { return emptyList(); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/TransformProgressGatherer.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/TransformProgressGatherer.java index 23168627d442e..18a341e217294 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/TransformProgressGatherer.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/TransformProgressGatherer.java @@ -11,10 +11,13 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; +import java.util.function.Function; + /** * Utility class to gather the progress information for a given config and its cursor position */ @@ -29,17 +32,10 @@ public final class TransformProgressGatherer { public static void getInitialProgress(Client client, DataFrameTransformConfig config, ActionListener progressListener) { - SearchRequest request = client.prepareSearch(config.getSource().getIndex()) - .setSize(0) - .setAllowPartialSearchResults(false) - .setTrackTotalHits(true) - .setQuery(config.getSource().getQueryConfig().getQuery()) - .request(); + SearchRequest request = getSearchRequest(config); ActionListener searchResponseActionListener = ActionListener.wrap( - searchResponse -> { - progressListener.onResponse(new DataFrameTransformProgress(searchResponse.getHits().getTotalHits().value, null)); - }, + searchResponse -> progressListener.onResponse(searchResponseToDataFrameTransformProgressFunction().apply(searchResponse)), progressListener::onFailure ); ClientHelper.executeWithHeadersAsync(config.getHeaders(), @@ -50,4 +46,17 @@ public static void getInitialProgress(Client client, searchResponseActionListener); } + public static SearchRequest getSearchRequest(DataFrameTransformConfig config) { + SearchRequest request = new SearchRequest(config.getSource().getIndex()); + request.allowPartialSearchResults(false); + request.source(new SearchSourceBuilder() + .size(0) + .trackTotalHits(true) + .query(config.getSource().getQueryConfig().getQuery())); + return request; + } + + public static Function searchResponseToDataFrameTransformProgressFunction() { + return searchResponse -> new DataFrameTransformProgress(searchResponse.getHits().getTotalHits().value, null); + } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java index 2e7d2fbbc555d..0b6c6001ca0ef 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java @@ -93,12 +93,10 @@ public class IndexLifecycle extends Plugin implements ActionPlugin { private final SetOnce indexLifecycleInitialisationService = new SetOnce<>(); private Settings settings; private boolean enabled; - private boolean transportClientMode; public IndexLifecycle(Settings settings) { this.settings = settings; this.enabled = XPackSettings.INDEX_LIFECYCLE_ENABLED.get(settings); - this.transportClientMode = XPackPlugin.transportClientMode(settings); } // overridable by tests @@ -108,13 +106,7 @@ protected Clock getClock() { public Collection createGuiceModules() { List modules = new ArrayList<>(); - - if (transportClientMode) { - return modules; - } - modules.add(b -> XPackPlugin.bindFeatureSet(b, IndexLifecycleFeatureSet.class)); - return modules; } @@ -132,7 +124,7 @@ public Collection createComponents(Client client, ClusterService cluster ResourceWatcherService resourceWatcherService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, Environment environment, NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { - if (enabled == false || transportClientMode) { + if (enabled == false) { return emptyList(); } indexLifecycleInitialisationService.set(new IndexLifecycleService(settings, client, clusterService, threadPool, diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java index a1a37beb1d129..673c10f885447 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java @@ -77,7 +77,7 @@ import static org.hamcrest.core.Is.is; import static org.hamcrest.core.IsNull.nullValue; -@ESIntegTestCase.ClusterScope(scope = Scope.TEST, numDataNodes = 0) +@ESIntegTestCase.ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0.0) public class IndexLifecycleInitialisationTests extends ESIntegTestCase { private Settings settings; private LifecyclePolicy lifecyclePolicy; @@ -109,29 +109,11 @@ protected boolean ignoreExternalCluster() { return true; } - @Override - protected Settings transportClientSettings() { - Settings.Builder settings = Settings.builder().put(super.transportClientSettings()); - settings.put(XPackSettings.INDEX_LIFECYCLE_ENABLED.getKey(), true); - settings.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false); - settings.put(XPackSettings.SECURITY_ENABLED.getKey(), false); - settings.put(XPackSettings.WATCHER_ENABLED.getKey(), false); - settings.put(XPackSettings.MONITORING_ENABLED.getKey(), false); - settings.put(XPackSettings.GRAPH_ENABLED.getKey(), false); - settings.put(XPackSettings.LOGSTASH_ENABLED.getKey(), false); - return settings.build(); - } - @Override protected Collection> nodePlugins() { return Arrays.asList(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class, TestILMPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - @Before public void init() { settings = Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_SHARDS, 1) diff --git a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java index d3ca4dd0098b7..9de1c2f56d1fd 100644 --- a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java +++ b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java @@ -36,21 +36,15 @@ public class Logstash extends Plugin implements ActionPlugin { Pattern.quote("${logstash.template.version}"); private final boolean enabled; - private final boolean transportClientMode; public Logstash(Settings settings) { this.enabled = XPackSettings.LOGSTASH_ENABLED.get(settings); - this.transportClientMode = XPackPlugin.transportClientMode(settings); } boolean isEnabled() { return enabled; } - boolean isTransportClient() { - return transportClientMode; - } - public Collection createGuiceModules() { List modules = new ArrayList<>(); modules.add(b -> { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java index 89b2ec81f87ef..128bcce67994b 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java @@ -16,7 +16,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; import org.elasticsearch.index.reindex.ReindexPlugin; +import org.elasticsearch.license.LicenseService; import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.plugins.Plugin; @@ -24,9 +26,8 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.SecuritySettingsSourceField; import org.elasticsearch.transport.Netty4Plugin; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.XPackClientPlugin; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; @@ -35,9 +36,12 @@ import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.security.authc.TokenMetaData; +import org.elasticsearch.xpack.ml.LocalStateMachineLearning; import java.io.IOException; +import java.io.UncheckedIOException; import java.net.URISyntaxException; +import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; @@ -45,9 +49,12 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.function.Function; import static org.elasticsearch.test.XContentTestUtils.convertToMap; import static org.elasticsearch.test.XContentTestUtils.differenceBetweenMapsIgnoringArrayOrder; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.elasticsearch.xpack.security.test.SecurityTestUtils.writeFile; /** * Base class of ML integration tests that use a native autodetect process @@ -62,16 +69,34 @@ protected NamedXContentRegistry xContentRegistry() { @Override protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, Netty4Plugin.class); + return Arrays.asList(LocalStateMachineLearning.class, Netty4Plugin.class, ReindexPlugin.class); } @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(XPackClientPlugin.class, Netty4Plugin.class, ReindexPlugin.class); + protected Function getClientWrapper() { + final Map headers = + Map.of("Authorization", basicAuthHeaderValue("x_pack_rest_user", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); + // we need to wrap node clients because we do not specify a user for nodes and all requests will use the system + // user. This is ok for internal n2n stuff but the test framework does other things like wiping indices, repositories, etc + // that the system user cannot do. so we wrap the node client with a user that can do these things since the client() calls + // are randomized to return both node clients and transport clients + // transport clients do not need to be wrapped since we specify the xpack.security.user setting that sets the default user to be + // used for the transport client. If we did not set a default user then the transport client would not even be allowed + // to connect + return client -> client.filterWithHeader(headers); } - @Override protected Settings externalClusterClientSettings() { + final Path home = createTempDir(); + final Path xpackConf = home.resolve("config"); + try { + Files.createDirectories(xpackConf); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + writeFile(xpackConf, "users", "x_pack_rest_user" + ":" + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING + "\n"); + writeFile(xpackConf, "users_roles", "superuser:x_pack_rest_user\n"); + Path key; Path certificate; try { @@ -80,10 +105,17 @@ protected Settings externalClusterClientSettings() { } catch (URISyntaxException e) { throw new IllegalStateException("error trying to get keystore path", e); } + Settings.Builder builder = Settings.builder(); + builder.put("node.ml", false); builder.put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4); - builder.put(SecurityField.USER_SETTING.getKey(), "x_pack_rest_user:" + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); builder.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), true); + builder.put(XPackSettings.SECURITY_ENABLED.getKey(), true); + builder.put(MachineLearningField.AUTODETECT_PROCESS.getKey(), false); + builder.put(XPackSettings.WATCHER_ENABLED.getKey(), false); + builder.put(XPackSettings.MONITORING_ENABLED.getKey(), false); + builder.put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial"); + builder.put(Environment.PATH_HOME_SETTING.getKey(), home); builder.put("xpack.security.transport.ssl.enabled", true); builder.put("xpack.security.transport.ssl.key", key.toAbsolutePath().toString()); builder.put("xpack.security.transport.ssl.certificate", certificate.toAbsolutePath().toString()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index f679170bc673d..42e945ffec8fd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -305,7 +305,6 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu private final Settings settings; private final Environment env; private final boolean enabled; - private final boolean transportClientMode; private final SetOnce autodetectProcessManager = new SetOnce<>(); private final SetOnce datafeedManager = new SetOnce<>(); @@ -314,8 +313,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu public MachineLearning(Settings settings, Path configPath) { this.settings = settings; this.enabled = XPackSettings.MACHINE_LEARNING_ENABLED.get(settings); - this.transportClientMode = XPackPlugin.transportClientMode(settings); - this.env = transportClientMode ? null : new Environment(settings, configPath); + this.env = new Environment(settings, configPath); } protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } @@ -349,7 +347,7 @@ public Settings additionalSettings() { String maxOpenJobsPerNodeNodeAttrName = "node.attr." + MAX_OPEN_JOBS_NODE_ATTR; String machineMemoryAttrName = "node.attr." + MACHINE_MEMORY_NODE_ATTR; - if (enabled == false || transportClientMode) { + if (enabled == false) { disallowMlNodeAttributes(mlEnabledNodeAttrName, maxOpenJobsPerNodeNodeAttrName, machineMemoryAttrName); return Settings.EMPTY; } @@ -405,7 +403,7 @@ public Collection createComponents(Client client, ClusterService cluster ResourceWatcherService resourceWatcherService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, Environment environment, NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { - if (enabled == false || transportClientMode) { + if (enabled == false) { // special holder for @link(MachineLearningFeatureSetUsage) which needs access to job manager, empty if ML is disabled return Collections.singletonList(new JobManagerHolder()); } @@ -506,7 +504,7 @@ public List> getPersistentTasksExecutor(ClusterServic ThreadPool threadPool, Client client, SettingsModule settingsModule) { - if (enabled == false || transportClientMode) { + if (enabled == false) { return emptyList(); } @@ -519,15 +517,9 @@ public List> getPersistentTasksExecutor(ClusterServic public Collection createGuiceModules() { List modules = new ArrayList<>(); - - if (transportClientMode) { - return modules; - } - modules.add(b -> { XPackPlugin.bindFeatureSet(b, MachineLearningFeatureSet.class); }); - return modules; } @@ -650,7 +642,7 @@ public List getRestHandlers(Settings settings, RestController restC @Override public List> getExecutorBuilders(Settings settings) { - if (false == enabled || transportClientMode) { + if (false == enabled) { return emptyList(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java index bcfab50c21e00..2a5b4369cdf9a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java @@ -22,7 +22,6 @@ import org.elasticsearch.plugins.Platforms; import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ml.MachineLearningFeatureSetUsage; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; @@ -76,7 +75,7 @@ public MachineLearningFeatureSet(Environment environment, ClusterService cluster // Don't try to get the native code version if ML is disabled - it causes too much controversy // if ML has been disabled because of some OS incompatibility. Also don't try to get the native // code version in the transport client - the controller process won't be running. - if (enabled && XPackPlugin.transportClientMode(environment.settings()) == false) { + if (enabled) { try { if (isRunningOnMlPlatform(true)) { NativeController nativeController = NativeControllerHolder.getNativeController(clusterService.getNodeName(), diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java index e2b137bd95d19..0472d909d0fa9 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java @@ -83,11 +83,9 @@ public class Monitoring extends Plugin implements ActionPlugin { protected final Settings settings; private final boolean enabled; - private final boolean transportClientMode; public Monitoring(Settings settings) { this.settings = settings; - this.transportClientMode = XPackPlugin.transportClientMode(settings); this.enabled = XPackSettings.MONITORING_ENABLED.get(settings); } @@ -100,16 +98,12 @@ boolean isEnabled() { return enabled; } - boolean isTransportClient() { - return transportClientMode; - } - @Override public Collection createGuiceModules() { List modules = new ArrayList<>(); modules.add(b -> { XPackPlugin.bindFeatureSet(b, MonitoringFeatureSet.class); - if (transportClientMode || enabled == false) { + if (enabled == false) { b.bind(MonitoringService.class).toProvider(Providers.of(null)); b.bind(Exporters.class).toProvider(Providers.of(null)); } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringPluginClientTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringPluginClientTests.java index 6fb967c79782e..b4dc9b3112ece 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringPluginClientTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringPluginClientTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.monitoring; import org.elasticsearch.client.Client; -import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; @@ -14,17 +13,6 @@ public class MonitoringPluginClientTests extends ESTestCase { - public void testModulesWithClientSettings() throws Exception { - Settings settings = Settings.builder() - .put("path.home", createTempDir()) - .put(Client.CLIENT_TYPE_SETTING_S.getKey(), TransportClient.CLIENT_TYPE) - .build(); - - Monitoring plugin = new Monitoring(settings); - assertThat(plugin.isEnabled(), is(true)); - assertThat(plugin.isTransportClient(), is(true)); - } - public void testModulesWithNodeSettings() throws Exception { // these settings mimic what ES does when running as a node... Settings settings = Settings.builder() @@ -33,6 +21,5 @@ public void testModulesWithNodeSettings() throws Exception { .build(); Monitoring plugin = new Monitoring(settings); assertThat(plugin.isEnabled(), is(true)); - assertThat(plugin.isTransportClient(), is(false)); } } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java index 8ebbf1bccf864..faa713efb7d11 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java @@ -108,12 +108,10 @@ public class Rollup extends Plugin implements ActionPlugin, PersistentTaskPlugin private final SetOnce schedulerEngine = new SetOnce<>(); private final Settings settings; private final boolean enabled; - private final boolean transportClientMode; public Rollup(Settings settings) { this.settings = settings; this.enabled = XPackSettings.ROLLUP_ENABLED.get(settings); - this.transportClientMode = XPackPlugin.transportClientMode(settings); } @Override @@ -127,10 +125,6 @@ public Collection createComponents(Client client, ClusterService cluster @Override public Collection createGuiceModules() { List modules = new ArrayList<>(); - - if (transportClientMode) { - return modules; - } modules.add(b -> XPackPlugin.bindFeatureSet(b, RollupFeatureSet.class)); return modules; } @@ -178,7 +172,7 @@ public List getRestHandlers(Settings settings, RestController restC @Override public List> getExecutorBuilders(Settings settings) { - if (false == enabled || transportClientMode) { + if (false == enabled) { return emptyList(); } @@ -193,7 +187,7 @@ public List> getPersistentTasksExecutor(ClusterServic ThreadPool threadPool, Client client, SettingsModule settingsModule) { - if (enabled == false || transportClientMode ) { + if (enabled == false) { return emptyList(); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index a6218522fb7e5..c7ada6e79a9ac 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -270,7 +270,6 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw private final Settings settings; private final Environment env; private final boolean enabled; - private final boolean transportClientMode; /* what a PITA that we need an extra indirection to initialize this. Yet, once we got rid of guice we can thing about how * to fix this or make it simpler. Today we need several service that are created in createComponents but we need to register * an instance of TransportInterceptor way earlier before createComponents is called. */ @@ -293,10 +292,9 @@ public Security(Settings settings, final Path configPath) { Security(Settings settings, final Path configPath, List extensions) { this.settings = settings; - this.transportClientMode = XPackPlugin.transportClientMode(settings); - this.env = transportClientMode ? null : new Environment(settings, configPath); + this.env = new Environment(settings, configPath); this.enabled = XPackSettings.SECURITY_ENABLED.get(settings); - if (enabled && transportClientMode == false) { + if (enabled) { runStartupChecks(settings); // we load them all here otherwise we can't access secure settings since they are closed once the checks are // fetched @@ -327,24 +325,11 @@ private static void runStartupChecks(Settings settings) { @Override public Collection createGuiceModules() { List modules = new ArrayList<>(); - if (enabled == false || transportClientMode) { + if (enabled == false) { modules.add(b -> b.bind(IPFilter.class).toProvider(Providers.of(null))); } - - if (transportClientMode) { - if (enabled == false) { - return modules; - } - modules.add(b -> { - // for transport client we still must inject these ssl classes with guice - b.bind(SSLService.class).toInstance(getSslService()); - }); - - return modules; - } modules.add(b -> XPackPlugin.bindFeatureSet(b, SecurityFeatureSet.class)); - if (enabled == false) { modules.add(b -> { b.bind(Realms.class).toProvider(Providers.of(null)); // for SecurityFeatureSet @@ -568,12 +553,12 @@ private AuthenticationFailureHandler createAuthenticationFailureHandler(final Re @Override public Settings additionalSettings() { - return additionalSettings(settings, enabled, transportClientMode); + return additionalSettings(settings, enabled); } // visible for tests - static Settings additionalSettings(final Settings settings, final boolean enabled, final boolean transportClientMode) { - if (enabled && transportClientMode == false) { + static Settings additionalSettings(final Settings settings, final boolean enabled) { + if (enabled) { final Settings.Builder builder = Settings.builder(); builder.put(SecuritySettings.addTransportSettings(settings)); @@ -606,19 +591,15 @@ static Settings additionalSettings(final Settings settings, final boolean enable @Override public List> getSettings() { - return getSettings(transportClientMode, securityExtensions); + return getSettings(securityExtensions); } /** * Get the {@link Setting setting configuration} for all security components, including those defined in extensions. */ - public static List> getSettings(boolean transportClientMode, List securityExtensions) { + public static List> getSettings(List securityExtensions) { List> settingsList = new ArrayList<>(); - if (transportClientMode) { - return settingsList; - } - // The following just apply in node mode settingsList.add(XPackSettings.FIPS_MODE_ENABLED); @@ -657,9 +638,6 @@ public static List> getSettings(boolean transportClientMode, List getRestHeaders() { - if (transportClientMode) { - return Collections.emptyList(); - } Set headers = new HashSet<>(); headers.add(UsernamePasswordToken.BASIC_AUTH_HEADER); if (XPackSettings.AUDIT_ENABLED.get(settings)) { @@ -773,11 +751,7 @@ public List getActionFilters() { if (enabled == false) { return emptyList(); } - // registering the security filter only for nodes - if (transportClientMode == false) { - return singletonList(securityActionFilter.get()); - } - return emptyList(); + return singletonList(securityActionFilter.get()); } @Override @@ -865,7 +839,7 @@ static void validateRealmSettings(Settings settings) { @Override public List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry, ThreadContext threadContext) { - if (transportClientMode || enabled == false) { // don't register anything if we are not enabled + if (enabled == false) { // don't register anything if we are not enabled // interceptors are not installed if we are running on the transport client return Collections.emptyList(); } @@ -890,7 +864,7 @@ public AsyncSender interceptSender(AsyncSender sender) { public Map> getTransports(Settings settings, ThreadPool threadPool, PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService) { - if (transportClientMode || enabled == false) { // don't register anything if we are not enabled, or in transport client mode + if (enabled == false) { // don't register anything if we are not enabled, or in transport client mode return Collections.emptyMap(); } @@ -944,7 +918,7 @@ public Map> getHttpTransports(Settings set @Override public UnaryOperator getRestHandlerWrapper(ThreadContext threadContext) { - if (enabled == false || transportClientMode) { + if (enabled == false) { return null; } final boolean ssl = HTTP_SSL_ENABLED.get(settings); @@ -955,7 +929,7 @@ public UnaryOperator getRestHandlerWrapper(ThreadContext threadCont @Override public List> getExecutorBuilders(final Settings settings) { - if (enabled && transportClientMode == false) { + if (enabled) { return Collections.singletonList( new FixedExecutorBuilder(settings, TokenService.THREAD_POOL_NAME, 1, 1000, "xpack.security.authc.token.thread_pool")); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/AbstractSecurityModule.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/AbstractSecurityModule.java deleted file mode 100644 index 0dfb369bc371f..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/AbstractSecurityModule.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.support; - -import org.elasticsearch.client.Client; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.core.XPackSettings; - -public abstract class AbstractSecurityModule extends AbstractModule { - - protected final Settings settings; - protected final boolean clientMode; - protected final boolean securityEnabled; - - public AbstractSecurityModule(Settings settings) { - this.settings = settings; - this.clientMode = TransportClient.CLIENT_TYPE.equals(settings.get(Client.CLIENT_TYPE_SETTING_S.getKey())); - this.securityEnabled = XPackSettings.SECURITY_ENABLED.get(settings); - } - - @Override - protected final void configure() { - configure(clientMode); - } - - protected abstract void configure(boolean clientMode); - - public abstract static class Node extends AbstractSecurityModule { - - protected Node(Settings settings) { - super(settings); - } - - @Override - protected final void configure(boolean clientMode) { - assert !clientMode : "[" + getClass().getSimpleName() + "] is a node only module"; - configureNode(); - } - - protected abstract void configureNode(); - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/BulkUpdateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/BulkUpdateTests.java index bb0036e9f870a..db5a22c5e6e9d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/BulkUpdateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/BulkUpdateTests.java @@ -42,7 +42,7 @@ public Settings nodeSettings(int nodeOrdinal) { public void testThatBulkUpdateDoesNotLoseFields() { assertEquals(DocWriteResponse.Result.CREATED, client().prepareIndex("index1", "type").setSource("{\"test\": \"test\"}", XContentType.JSON).setId("1").get().getResult()); - GetResponse getResponse = internalCluster().transportClient().prepareGet("index1", "type", "1").get(); + GetResponse getResponse = client().prepareGet("index1", "type", "1").get(); assertEquals("test", getResponse.getSource().get("test")); if (randomBoolean()) { @@ -50,9 +50,9 @@ public void testThatBulkUpdateDoesNotLoseFields() { } // update with a new field - assertEquals(DocWriteResponse.Result.UPDATED, internalCluster().transportClient().prepareUpdate("index1", "type", "1") + assertEquals(DocWriteResponse.Result.UPDATED, client().prepareUpdate("index1", "type", "1") .setDoc("{\"not test\": \"not test\"}", XContentType.JSON).get().getResult()); - getResponse = internalCluster().transportClient().prepareGet("index1", "type", "1").get(); + getResponse = client().prepareGet("index1", "type", "1").get(); assertEquals("test", getResponse.getSource().get("test")); assertEquals("not test", getResponse.getSource().get("not test")); @@ -61,10 +61,10 @@ public void testThatBulkUpdateDoesNotLoseFields() { flushAndRefresh(); // do it in a bulk - BulkResponse response = internalCluster().transportClient().prepareBulk().add(client().prepareUpdate("index1", "type", "1") + BulkResponse response = client().prepareBulk().add(client().prepareUpdate("index1", "type", "1") .setDoc("{\"bulk updated\": \"bulk updated\"}", XContentType.JSON)).get(); assertEquals(DocWriteResponse.Result.UPDATED, response.getItems()[0].getResponse().getResult()); - getResponse = internalCluster().transportClient().prepareGet("index1", "type", "1").get(); + getResponse = client().prepareGet("index1", "type", "1").get(); assertEquals("test", getResponse.getSource().get("test")); assertEquals("not test", getResponse.getSource().get("not test")); assertEquals("bulk updated", getResponse.getSource().get("bulk updated")); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java index 6d7eacfe26cfa..2a9f2017c50f3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java @@ -74,7 +74,7 @@ protected boolean addMockHttpTransport() { } public void testModifyingViaApiClearsCache() throws Exception { - Client client = internalCluster().transportClient(); + Client client = client(); SecurityClient securityClient = securityClient(client); int modifiedRolesCount = randomIntBetween(1, roles.length); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java index 3055d1b0f456b..5e83ef99563d9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java @@ -64,7 +64,7 @@ import static org.hamcrest.Matchers.nullValue; // The random usage of meta fields such as _timestamp add noise to the test, so disable random index templates: -@ESIntegTestCase.ClusterScope +@ESIntegTestCase.ClusterScope(transportClientRatio = 0.0) public class FieldLevelSecurityTests extends SecurityIntegTestCase { protected static final SecureString USERS_PASSWD = new SecureString("change_me".toCharArray()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java index da03e9ffe3d1e..2035a8b6c19dd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java @@ -125,7 +125,7 @@ public void testSingleRole() throws Exception { refresh(); - Client client = internalCluster().transportClient(); + Client client = client(); // no specifying an index, should replace indices with the permitted ones (test & test1) SearchResponse searchResponse = client.prepareSearch().setQuery(matchAllQuery()).get(); @@ -246,7 +246,7 @@ public void testMultipleRoles() throws Exception { refresh(); - Client client = internalCluster().transportClient(); + Client client = client(); SearchResponse response = client .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_a", USERS_PASSWD))) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/PermissionPrecedenceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/PermissionPrecedenceTests.java index c07491dc86314..fb85061110e08 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/PermissionPrecedenceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/PermissionPrecedenceTests.java @@ -72,24 +72,14 @@ protected SecureString nodeClientPassword() { return new SecureString("test123".toCharArray()); } - @Override - protected String transportClientUsername() { - return "admin"; - } - - @Override - protected SecureString transportClientPassword() { - return new SecureString("test123".toCharArray()); - } - public void testDifferentCombinationsOfIndices() throws Exception { - Client client = internalCluster().transportClient(); + Client client = client(); // first lets try with "admin"... all should work AcknowledgedResponse putResponse = client .filterWithHeader(Collections.singletonMap(UsernamePasswordToken.BASIC_AUTH_HEADER, - basicAuthHeaderValue(transportClientUsername(), transportClientPassword()))) + basicAuthHeaderValue(nodeClientUsername(), nodeClientPassword()))) .admin().indices().preparePutTemplate("template1") .setPatterns(Collections.singletonList("test_*")) .get(); @@ -103,7 +93,7 @@ public void testDifferentCombinationsOfIndices() throws Exception { // now lets try with "user" Map auth = Collections.singletonMap(UsernamePasswordToken.BASIC_AUTH_HEADER, basicAuthHeaderValue("user", - transportClientPassword())); + nodeClientPassword())); assertThrowsAuthorizationException(client.filterWithHeader(auth).admin().indices().preparePutTemplate("template1") .setPatterns(Collections.singletonList("test_*"))::get, PutIndexTemplateAction.NAME, "user"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityClearScrollTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityClearScrollTests.java index 9f86887566ac4..4c189e3e7f3da 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityClearScrollTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityClearScrollTests.java @@ -93,7 +93,7 @@ public void testThatClearingAllScrollIdsWorks() throws Exception { Map headers = new HashMap<>(); headers.put(SecurityField.USER_SETTING.getKey(), user); headers.put(BASIC_AUTH_HEADER, basicAuth); - ClearScrollResponse clearScrollResponse = internalCluster().transportClient().filterWithHeader(headers) + ClearScrollResponse clearScrollResponse = client().filterWithHeader(headers) .prepareClearScroll() .addScrollId("_all").get(); assertThat(clearScrollResponse.isSucceeded(), is(true)); @@ -107,7 +107,7 @@ public void testThatClearingAllScrollIdsRequirePermissions() throws Exception { Map headers = new HashMap<>(); headers.put(SecurityField.USER_SETTING.getKey(), user); headers.put(BASIC_AUTH_HEADER, basicAuth); - assertThrows(internalCluster().transportClient().filterWithHeader(headers) + assertThrows(client().filterWithHeader(headers) .prepareClearScroll() .addScrollId("_all"), ElasticsearchSecurityException.class, "action [cluster:admin/indices/scroll/clear_all] is unauthorized for user [denied_user]"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java index 87db72bcf0285..349bef3fc3152 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java @@ -18,7 +18,7 @@ /** * Integration test that uses multiple data nodes to test that the shrink index api works with security. */ -@ClusterScope(minNumDataNodes = 2) +@ClusterScope(minNumDataNodes = 2, transportClientRatio = 0.0) public class ShrinkIndexWithSecurityTests extends SecurityIntegTestCase { @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java index 671a94452fa0a..78d95ecbca0b8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java @@ -46,7 +46,7 @@ public void stopESNativeStores() throws Exception { if (getCurrentClusterScope() == Scope.SUITE) { // Clear the realm cache for all realms since we use a SUITE scoped cluster - SecurityClient client = securityClient(internalCluster().transportClient()); + SecurityClient client = securityClient(client()); client.prepareClearRealmCache().get(); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java index 462e4e26541e6..d862d248976da 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java @@ -7,7 +7,6 @@ import io.netty.util.ThreadDeathWatcher; import io.netty.util.concurrent.GlobalEventExecutor; - import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; @@ -26,7 +25,6 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.network.NetworkAddress; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -40,7 +38,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.client.SecurityClient; @@ -77,6 +74,7 @@ * * @see SecuritySettingsSource */ +@ESIntegTestCase.ClusterScope(transportClientRatio = 0.0) public abstract class SecurityIntegTestCase extends ESIntegTestCase { private static SecuritySettingsSource SECURITY_DEFAULT_SETTINGS; @@ -260,14 +258,6 @@ protected Path nodeConfigPath(int nodeOrdinal) { return customSecuritySettingsSource.nodeConfigPath(nodeOrdinal); } - @Override - protected Settings transportClientSettings() { - return Settings.builder().put(super.transportClientSettings()) - .put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NIO) - .put(customSecuritySettingsSource.transportClientSettings()) - .build(); - } - @Override protected boolean addMockTransportService() { return false; // security has its own transport service @@ -278,19 +268,6 @@ protected Collection> nodePlugins() { return customSecuritySettingsSource.nodePlugins(); } - @Override - protected Collection> transportClientPlugins() { - return customSecuritySettingsSource.transportClientPlugins(); - } - - @Override - protected Settings externalClusterClientSettings() { - return Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), SecuritySettingsSource.TEST_USER_NAME + ":" - + SecuritySettingsSourceField.TEST_PASSWORD) - .build(); - } - /** * Allows to override the users config file when the {@link org.elasticsearch.test.ESIntegTestCase.ClusterScope} is set to * {@link org.elasticsearch.test.ESIntegTestCase.Scope#SUITE} or {@link org.elasticsearch.test.ESIntegTestCase.Scope#TEST} @@ -333,24 +310,6 @@ protected SecureString nodeClientPassword() { return SECURITY_DEFAULT_SETTINGS.nodeClientPassword(); } - /** - * Allows to override the transport client username (used while sending requests to the test cluster) when the - * {@link org.elasticsearch.test.ESIntegTestCase.ClusterScope} is set to - * {@link org.elasticsearch.test.ESIntegTestCase.Scope#SUITE} or {@link org.elasticsearch.test.ESIntegTestCase.Scope#TEST} - */ - protected String transportClientUsername() { - return SECURITY_DEFAULT_SETTINGS.transportClientUsername(); - } - - /** - * Allows to override the transport client password (used while sending requests to the test cluster) when the - * {@link org.elasticsearch.test.ESIntegTestCase.ClusterScope} is set to - * {@link org.elasticsearch.test.ESIntegTestCase.Scope#SUITE} or {@link org.elasticsearch.test.ESIntegTestCase.Scope#TEST} - */ - protected SecureString transportClientPassword() { - return SECURITY_DEFAULT_SETTINGS.transportClientPassword(); - } - /** * Allows to control whether ssl key information is auto generated or not on the transport layer */ @@ -392,16 +351,6 @@ protected String nodeClientUsername() { protected SecureString nodeClientPassword() { return SecurityIntegTestCase.this.nodeClientPassword(); } - - @Override - protected String transportClientUsername() { - return SecurityIntegTestCase.this.transportClientUsername(); - } - - @Override - protected SecureString transportClientPassword() { - return SecurityIntegTestCase.this.transportClientPassword(); - } } protected static void assertGreenClusterState(Client client) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 8e6e00f32a90e..768bc38813c0b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -118,8 +118,8 @@ protected SSLService getSslService() { }; ThreadPool threadPool = mock(ThreadPool.class); ClusterService clusterService = mock(ClusterService.class); - settings = Security.additionalSettings(settings, true, false); - Set> allowedSettings = new HashSet<>(Security.getSettings(false, null)); + settings = Security.additionalSettings(settings, true); + Set> allowedSettings = new HashSet<>(Security.getSettings(null)); allowedSettings.addAll(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterSettings clusterSettings = new ClusterSettings(settings, allowedSettings); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); @@ -180,7 +180,7 @@ public void testDisabledByDefault() throws Exception { } public void testHttpSettingDefaults() throws Exception { - final Settings defaultSettings = Security.additionalSettings(Settings.EMPTY, true, false); + final Settings defaultSettings = Security.additionalSettings(Settings.EMPTY, true); assertThat(SecurityField.NAME4, equalTo(NetworkModule.TRANSPORT_TYPE_SETTING.get(defaultSettings))); assertThat(SecurityField.NAME4, equalTo(NetworkModule.HTTP_TYPE_SETTING.get(defaultSettings))); } @@ -189,7 +189,7 @@ public void testTransportSettingNetty4Both() { Settings both4 = Security.additionalSettings(Settings.builder() .put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4) .put(NetworkModule.HTTP_TYPE_KEY, SecurityField.NAME4) - .build(), true, false); + .build(), true); assertFalse(NetworkModule.TRANSPORT_TYPE_SETTING.exists(both4)); assertFalse(NetworkModule.HTTP_TYPE_SETTING.exists(both4)); } @@ -198,13 +198,13 @@ public void testTransportSettingValidation() { final String badType = randomFrom("netty4", "other", "security1"); Settings settingsTransport = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, badType).build(); IllegalArgumentException badTransport = expectThrows(IllegalArgumentException.class, - () -> Security.additionalSettings(settingsTransport, true, false)); + () -> Security.additionalSettings(settingsTransport, true)); assertThat(badTransport.getMessage(), containsString(SecurityField.NAME4)); assertThat(badTransport.getMessage(), containsString(NetworkModule.TRANSPORT_TYPE_KEY)); Settings settingsHttp = Settings.builder().put(NetworkModule.HTTP_TYPE_KEY, badType).build(); IllegalArgumentException badHttp = expectThrows(IllegalArgumentException.class, - () -> Security.additionalSettings(settingsHttp, true, false)); + () -> Security.additionalSettings(settingsHttp, true)); assertThat(badHttp.getMessage(), containsString(SecurityField.NAME4)); assertThat(badHttp.getMessage(), containsString(NetworkModule.HTTP_TYPE_KEY)); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java index f6e5552ddbc53..b04b8c8ac3d36 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java @@ -32,7 +32,7 @@ * templates when started within security, as this requires certain * system privileges */ -@ClusterScope(maxNumDataNodes = 1, scope = Scope.SUITE, numClientNodes = 0) +@ClusterScope(maxNumDataNodes = 1, scope = Scope.SUITE, numClientNodes = 0, transportClientRatio = 0.0) public class TemplateUpgraderTests extends SecurityIntegTestCase { public void testTemplatesWorkAsExpected() throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java index 23408f5668ec9..866c52989af6f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java @@ -29,7 +29,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -@ClusterScope(scope = TEST, numDataNodes = 1) +@ClusterScope(scope = TEST, numDataNodes = 1, transportClientRatio = 0.0) public class AuditTrailSettingsUpdateTests extends SecurityIntegTestCase { private static Settings startupFilterSettings; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java index f507edf97874f..3290aba27e37f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; -import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.SearchContextMissingException; import org.elasticsearch.test.SecurityIntegTestCase; @@ -99,14 +98,4 @@ public void testSearchAndClearScroll() throws Exception { public void cleanupSecurityIndex() throws Exception { super.deleteSecurityIndex(); } - - @Override - public String transportClientUsername() { - return this.nodeClientUsername(); - } - - @Override - public SecureString transportClientPassword() { - return this.nodeClientPassword(); - } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java index bc17626b1f426..9f0b7863d30e7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.security.transport.filter; -import org.elasticsearch.client.Client; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; @@ -61,11 +60,6 @@ public void testThatIpFilteringIsIntegratedIntoNettyPipelineViaHttp() throws Exc } } - public void testThatIpFilteringIsNotAppliedForDefaultTransport() throws Exception { - Client client = internalCluster().transportClient(); - assertGreenClusterState(client); - } - public void testThatIpFilteringIsAppliedForProfile() throws Exception { try (Socket socket = new Socket()){ trySocketConnection(socket, getProfileAddress("client")); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java index 65a5fb080cdb0..96922aa8822e4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java @@ -21,7 +21,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.is; -@ClusterScope(scope = TEST, supportsDedicatedMasters = false, numDataNodes = 1) +@ClusterScope(scope = TEST, supportsDedicatedMasters = false, numDataNodes = 1, transportClientRatio = 0.0) public class IpFilteringUpdateTests extends SecurityIntegTestCase { private static int randomClientPort; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IPHostnameVerificationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IPHostnameVerificationTests.java deleted file mode 100644 index fe1b65e851d0f..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IPHostnameVerificationTests.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.transport.netty4; - -import org.elasticsearch.client.Client; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.test.SecuritySettingsSource; -import org.elasticsearch.transport.TransportSettings; -import org.elasticsearch.xpack.core.ssl.SSLClientAuth; - -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.List; - -import static org.elasticsearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; -import static org.hamcrest.CoreMatchers.is; - -// TODO delete this test? -public class IPHostnameVerificationTests extends SecurityIntegTestCase { - private Path certPath; - private Path keyPath; - - @Override - protected boolean transportSSLEnabled() { - return true; - } - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - Settings settings = super.nodeSettings(nodeOrdinal); - Settings.Builder builder = Settings.builder() - .put(settings.filter((s) -> s.startsWith("xpack.security.transport.ssl.") == false), false); - settings = builder.build(); - - // The default Unicast test behavior is to use 'localhost' with the port number. For this test we need to use IP - List newUnicastAddresses = new ArrayList<>(); - for (String address : settings.getAsList(DISCOVERY_SEED_HOSTS_SETTING.getKey())) { - newUnicastAddresses.add(address.replace("localhost", "127.0.0.1")); - } - - Settings.Builder settingsBuilder = Settings.builder() - .put(settings) - .putList(DISCOVERY_SEED_HOSTS_SETTING.getKey(), newUnicastAddresses); - - try { - //Use a cert with a CN of "Elasticsearch Test Node" and IPv4+IPv6 ip addresses as SubjectAlternativeNames - certPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-ip-only.crt"); - keyPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-ip-only.pem"); - assertThat(Files.exists(certPath), is(true)); - } catch (Exception e) { - throw new RuntimeException(e); - } - - SecuritySettingsSource.addSecureSettings(settingsBuilder, secureSettings -> { - secureSettings.setString("xpack.security.transport.ssl.secure_key_passphrase", "testnode-ip-only"); - }); - return settingsBuilder.put("xpack.security.transport.ssl.key", keyPath.toAbsolutePath()) - .put("xpack.security.transport.ssl.certificate", certPath.toAbsolutePath()) - .put("xpack.security.transport.ssl.certificate_authorities", certPath.toAbsolutePath()) - .put(TransportSettings.BIND_HOST.getKey(), "127.0.0.1") - .put("network.host", "127.0.0.1") - .put("xpack.security.transport.ssl.client_authentication", SSLClientAuth.NONE) - .put("xpack.security.transport.ssl.verification_mode", "full") - .build(); - } - - @Override - protected Settings transportClientSettings() { - Settings clientSettings = super.transportClientSettings(); - return Settings.builder().put(clientSettings.filter(k -> k.startsWith("xpack.security.transport.ssl.") == false)) - .put("xpack.security.transport.ssl.verification_mode", "certificate") - .put("xpack.security.transport.ssl.key", keyPath.toAbsolutePath()) - .put("xpack.security.transport.ssl.certificate", certPath.toAbsolutePath()) - .put("xpack.security.transport.ssl.key_passphrase", "testnode-ip-only") - .put("xpack.security.transport.ssl.certificate_authorities", certPath) - .build(); - } - - public void testTransportClientConnectionWorksWithIPOnlyHostnameVerification() throws Exception { - Client client = internalCluster().transportClient(); - assertGreenClusterState(client); - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java index ce0cc5c111265..88d27d4171a19 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java @@ -119,7 +119,7 @@ public void testThatHttpWorksWithSslClientAuth() throws IOException { try (RestClient restClient = createRestClient(httpClientBuilder -> httpClientBuilder.setSSLStrategy(sessionStrategy), "https")) { Request request = new Request("GET", "/"); RequestOptions.Builder options = request.getOptions().toBuilder(); - options.addHeader("Authorization", basicAuthHeaderValue(transportClientUsername(), transportClientPassword())); + options.addHeader("Authorization", basicAuthHeaderValue(nodeClientUsername(), nodeClientPassword())); request.setOptions(options); Response response = restClient.performRequest(request); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java index 944c3306763a6..9c540f559b688 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java @@ -49,7 +49,7 @@ * * @see RestrictedTrustManager */ -@ESIntegTestCase.ClusterScope(numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) +@ESIntegTestCase.ClusterScope(numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false, transportClientRatio = 0.0) @TestLogging("org.elasticsearch.xpack.ssl.RestrictedTrustManager:DEBUG") public class SSLTrustRestrictionsTests extends SecurityIntegTestCase { @@ -149,15 +149,6 @@ private void writeRestrictions(String trustedPattern) { runResourceWatcher(); } - @Override - protected Settings transportClientSettings() { - Settings parentSettings = super.transportClientSettings(); - Settings.Builder builder = Settings.builder() - .put(parentSettings.filter((s) -> s.startsWith("xpack.security.transport.ssl.") == false)) - .put(nodeSSL); - return builder.build(); - } - @Override protected boolean transportSSLEnabled() { return true; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index dd7f268e1a6f5..4226ff4fed0d9 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -226,16 +226,14 @@ public class Watcher extends Plugin implements ActionPlugin, ScriptPlugin, Reloa private BulkProcessor bulkProcessor; protected final Settings settings; - protected final boolean transportClient; protected final boolean enabled; protected List reloadableServices = new ArrayList<>(); public Watcher(final Settings settings) { this.settings = settings; - this.transportClient = XPackPlugin.transportClientMode(settings); this.enabled = XPackSettings.WATCHER_ENABLED.get(settings); - if (enabled && transportClient == false) { + if (enabled) { validAutoCreateIndex(settings, logger); } } @@ -433,7 +431,7 @@ public Collection createGuiceModules() { modules.add(b -> b.bind(Clock.class).toInstance(getClock())); //currently assuming the only place clock is bound modules.add(b -> { XPackPlugin.bindFeatureSet(b, WatcherFeatureSet.class); - if (transportClient || enabled == false) { + if (enabled == false) { b.bind(WatcherService.class).toProvider(Providers.of(null)); } }); @@ -567,7 +565,7 @@ public List getRestHandlers(Settings settings, RestController restC @Override public void onIndexModule(IndexModule module) { - if (enabled == false || transportClient) { + if (enabled == false) { return; } @@ -676,7 +674,7 @@ public void close() throws IOException { */ @Override public void reload(Settings settings) { - if (enabled == false || transportClient) { + if (enabled == false) { return; } reloadableServices.forEach(s -> s.reload(settings)); diff --git a/x-pack/qa/reindex-tests-with-security/src/test/java/org/elasticsearch/xpack/security/ReindexWithSecurityIT.java b/x-pack/qa/reindex-tests-with-security/src/test/java/org/elasticsearch/xpack/security/ReindexWithSecurityIT.java index 0a75565fbc075..855162a0b86fc 100644 --- a/x-pack/qa/reindex-tests-with-security/src/test/java/org/elasticsearch/xpack/security/ReindexWithSecurityIT.java +++ b/x-pack/qa/reindex-tests-with-security/src/test/java/org/elasticsearch/xpack/security/ReindexWithSecurityIT.java @@ -5,91 +5,175 @@ */ package org.elasticsearch.xpack.security; -import org.elasticsearch.index.reindex.BulkByScrollResponse; -import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.index.reindex.DeleteByQueryAction; -import org.elasticsearch.index.reindex.DeleteByQueryRequestBuilder; -import org.elasticsearch.index.reindex.ReindexAction; -import org.elasticsearch.index.reindex.ReindexRequestBuilder; -import org.elasticsearch.index.reindex.UpdateByQueryAction; -import org.elasticsearch.index.reindex.UpdateByQueryRequestBuilder; -import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.xpack.core.security.SecurityField; - - +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.DeleteByQueryRequest; +import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URL; +import java.nio.file.Path; +import java.util.Collections; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + + +public class ReindexWithSecurityIT extends ESRestTestCase { + + private static final String USER = "test_admin"; + private static final String PASS = "x-pack-test-password"; + + private static Path httpTrustStore; + + @BeforeClass + public static void findTrustStore( ) throws Exception { + final URL resource = ReindexWithSecurityClientYamlTestSuiteIT.class.getResource("/ssl/ca.p12"); + if (resource == null) { + throw new FileNotFoundException("Cannot find classpath resource /ssl/ca.p12"); + } + httpTrustStore = PathUtils.get(resource.toURI()); + } -public class ReindexWithSecurityIT extends SecurityIntegTestCase { + @AfterClass + public static void cleanupStatics() { + httpTrustStore = null; + } @Override - protected Settings externalClusterClientSettings() { - Settings.Builder builder = Settings.builder().put(super.externalClusterClientSettings()); - builder.put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4); - builder.put(SecurityField.USER_SETTING.getKey(), "test_admin:x-pack-test-password"); - return builder.build(); + protected String getProtocol() { + return "https"; } /** - * TODO: this entire class should be removed. SecurityIntegTestCase is meant for tests, but we run against real xpack + * All tests run as a an administrative user but use es-security-runas-user to become a less privileged user. */ @Override - public void doAssertXPackIsInstalled() { - // this assertion doesn't make sense with a real distribution, since there is not currently a way - // from nodes info to see which modules are loaded + protected Settings restClientSettings() { + String token = basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .put(TRUSTSTORE_PATH , httpTrustStore) + .put(TRUSTSTORE_PASSWORD, "password") + .build(); } - public void testDeleteByQuery() { + public void testDeleteByQuery() throws IOException { createIndicesWithRandomAliases("test1", "test2", "test3"); - BulkByScrollResponse response = new DeleteByQueryRequestBuilder(client(), DeleteByQueryAction.INSTANCE) - .source("test1", "test2") - .filter(QueryBuilders.matchAllQuery()) - .get(); + RestHighLevelClient restClient = new TestRestHighLevelClient(); + BulkByScrollResponse response = restClient.deleteByQuery((DeleteByQueryRequest) new DeleteByQueryRequest() + .setQuery(QueryBuilders.matchAllQuery()) + .indices("test1", "test2"), RequestOptions.DEFAULT); assertNotNull(response); - response = new DeleteByQueryRequestBuilder(client(), DeleteByQueryAction.INSTANCE) - .source("test*") - .filter(QueryBuilders.matchAllQuery()) - .get(); + response = restClient.deleteByQuery((DeleteByQueryRequest) new DeleteByQueryRequest() + .setQuery(QueryBuilders.matchAllQuery()) + .indices("test*"), RequestOptions.DEFAULT); assertNotNull(response); - IndexNotFoundException e = expectThrows(IndexNotFoundException.class, - () -> new DeleteByQueryRequestBuilder(client(), DeleteByQueryAction.INSTANCE) - .source("test1", "index1") - .filter(QueryBuilders.matchAllQuery()) - .get()); - assertEquals("no such index [index1]", e.getMessage()); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> restClient.deleteByQuery((DeleteByQueryRequest) new DeleteByQueryRequest() + .setQuery(QueryBuilders.matchAllQuery()) + .indices("test1", "index1"), RequestOptions.DEFAULT)); + assertThat(e.getMessage(), containsString("no such index [index1]")); } - public void testUpdateByQuery() { + public void testUpdateByQuery() throws IOException { createIndicesWithRandomAliases("test1", "test2", "test3"); - BulkByScrollResponse response = new UpdateByQueryRequestBuilder(client(), UpdateByQueryAction.INSTANCE) - .source("test1", "test2").get(); + RestHighLevelClient restClient = new TestRestHighLevelClient(); + BulkByScrollResponse response = + restClient.updateByQuery((UpdateByQueryRequest) new UpdateByQueryRequest().indices("test1", "test2"), RequestOptions.DEFAULT); assertNotNull(response); - response = new UpdateByQueryRequestBuilder(client(), UpdateByQueryAction.INSTANCE).source("test*").get(); + response = restClient.updateByQuery((UpdateByQueryRequest) new UpdateByQueryRequest().indices("test*"), RequestOptions.DEFAULT); assertNotNull(response); - IndexNotFoundException e = expectThrows(IndexNotFoundException.class, - () -> new UpdateByQueryRequestBuilder(client(), UpdateByQueryAction.INSTANCE).source("test1", "index1").get()); - assertEquals("no such index [index1]", e.getMessage()); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> restClient.updateByQuery((UpdateByQueryRequest) new UpdateByQueryRequest().indices("test1", "index1"), + RequestOptions.DEFAULT)); + assertThat(e.getMessage(), containsString("no such index [index1]")); } - public void testReindex() { + public void testReindex() throws IOException { createIndicesWithRandomAliases("test1", "test2", "test3", "dest"); - BulkByScrollResponse response = new ReindexRequestBuilder(client(), ReindexAction.INSTANCE).source("test1", "test2") - .destination("dest").get(); + RestHighLevelClient restClient = new TestRestHighLevelClient(); + BulkByScrollResponse response = restClient.reindex(new ReindexRequest().setSourceIndices("test1", "test2").setDestIndex("dest"), + RequestOptions.DEFAULT); assertNotNull(response); - response = new ReindexRequestBuilder(client(), ReindexAction.INSTANCE).source("test*").destination("dest").get(); + response = restClient.reindex(new ReindexRequest().setSourceIndices("test*").setDestIndex("dest"), + RequestOptions.DEFAULT); assertNotNull(response); - IndexNotFoundException e = expectThrows(IndexNotFoundException.class, - () -> new ReindexRequestBuilder(client(), ReindexAction.INSTANCE).source("test1", "index1").destination("dest").get()); - assertEquals("no such index [index1]", e.getMessage()); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> restClient.reindex(new ReindexRequest().setSourceIndices("test1", "index1").setDestIndex("dest"), + RequestOptions.DEFAULT)); + assertThat(e.getMessage(), containsString("no such index [index1]")); + } + + /** + * Creates the indices provided as argument, randomly associating them with aliases, indexes one dummy document per index + * and refreshes the new indices + */ + private void createIndicesWithRandomAliases(String... indices) throws IOException { + for (String index : indices) { + createIndex(index, Settings.EMPTY); + } + + RestHighLevelClient restClient = new TestRestHighLevelClient(); + if (frequently()) { + boolean aliasAdded = false; + + IndicesAliasesRequest request = new IndicesAliasesRequest(); + for (String index : indices) { + if (frequently()) { + //one alias per index with prefix "alias-" + request.addAliasAction(AliasActions.add().index(index).alias("alias-" + index)); + aliasAdded = true; + } + } + // If we get to this point and we haven't added an alias to the request we need to add one + // or the request will fail so use noAliasAdded to force adding the alias in this case + if (aliasAdded == false || randomBoolean()) { + //one alias pointing to all indices + for (String index : indices) { + request.addAliasAction(AliasActions.add().index(index).alias("alias")); + } + } + AcknowledgedResponse response = restClient.indices().updateAliases(request, RequestOptions.DEFAULT); + assertThat(response.isAcknowledged(), is(true)); + } + + for (String index : indices) { + restClient.index(new IndexRequest(index).source("field", "value"), RequestOptions.DEFAULT); + } + restClient.indices().refresh(new RefreshRequest(indices), RequestOptions.DEFAULT); + } + + private class TestRestHighLevelClient extends RestHighLevelClient { + TestRestHighLevelClient() { + super(client(), restClient -> {}, Collections.emptyList()); + } } } diff --git a/x-pack/qa/security-client-tests/build.gradle b/x-pack/qa/security-client-tests/build.gradle deleted file mode 100644 index 556e36e51467f..0000000000000 --- a/x-pack/qa/security-client-tests/build.gradle +++ /dev/null @@ -1,40 +0,0 @@ -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.rest-test' - -dependencies { - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" - testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') -} - -String outputDir = "${buildDir}/generated-resources/${project.name}" -task copyXPackPluginProps(type: Copy) { - from project(xpackModule('core')).file('src/main/plugin-metadata') - from project(xpackModule('core')).tasks.pluginProperties - into outputDir -} -project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) - -integTestRunner { - systemProperty 'tests.security.manager', 'false' -} - -integTestCluster { - setting 'xpack.ilm.enabled', 'false' - setting 'xpack.security.enabled', 'true' - setting 'xpack.ml.enabled', 'false' - setting 'xpack.license.self_generated.type', 'trial' - setupCommand 'setupDummyUser', - 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - setupCommand 'setupTransportClientUser', - 'bin/elasticsearch-users', 'useradd', 'transport', '-p', 'x-pack-test-password', '-r', 'transport_client' - waitCondition = { node, ant -> - File tmpFile = new File(node.cwd, 'wait.success') - ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", - dest: tmpFile.toString(), - username: 'test_user', - password: 'x-pack-test-password', - ignoreerrors: true, - retries: 10) - return tmpFile.exists() - } -} diff --git a/x-pack/qa/security-client-tests/src/test/java/org/elasticsearch/xpack/security/qa/SecurityTransportClientIT.java b/x-pack/qa/security-client-tests/src/test/java/org/elasticsearch/xpack/security/qa/SecurityTransportClientIT.java deleted file mode 100644 index 519f365d515a0..0000000000000 --- a/x-pack/qa/security-client-tests/src/test/java/org/elasticsearch/xpack/security/qa/SecurityTransportClientIT.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.qa; - -import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xpack.core.XPackClientPlugin; -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; -import org.elasticsearch.xpack.core.security.SecurityField; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.is; - -/** - * Integration tests that test a transport client with security being loaded that connect to an external cluster - */ -public class SecurityTransportClientIT extends ESIntegTestCase { - static final String ADMIN_USER_PW = "test_user:x-pack-test-password"; - static final String TRANSPORT_USER_PW = "transport:x-pack-test-password"; - - @Override - protected Settings externalClusterClientSettings() { - return Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), ADMIN_USER_PW) - .put(NetworkModule.TRANSPORT_TYPE_KEY, "security4") - .build(); - } - - @Override - protected Collection> transportClientPlugins() { - return Collections.singletonList(XPackClientPlugin.class); - } - - public void testThatTransportClientWithoutAuthenticationDoesNotWork() throws Exception { - try (TransportClient client = transportClient(Settings.EMPTY)) { - boolean connected = awaitBusy(() -> { - return client.connectedNodes().size() > 0; - }, 5L, TimeUnit.SECONDS); - - assertThat(connected, is(false)); - } - } - - public void testThatTransportClientAuthenticationWithTransportClientRole() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TRANSPORT_USER_PW) - .build(); - try (TransportClient client = transportClient(settings)) { - boolean connected = awaitBusy(() -> { - return client.connectedNodes().size() > 0; - }, 5L, TimeUnit.SECONDS); - - assertThat(connected, is(true)); - - // this checks that the transport client is really running in a limited state - try { - client.admin().cluster().prepareHealth().get(); - fail("the transport user should not be be able to get health!"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.toString(), containsString("unauthorized")); - } - } - } - - public void testTransportClientWithAdminUser() throws Exception { - final boolean useTransportUser = randomBoolean(); - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), useTransportUser ? TRANSPORT_USER_PW : ADMIN_USER_PW) - .build(); - try (TransportClient client = transportClient(settings)) { - boolean connected = awaitBusy(() -> { - return client.connectedNodes().size() > 0; - }, 5L, TimeUnit.SECONDS); - - assertThat(connected, is(true)); - - // this checks that the transport client is really running in a limited state - ClusterHealthResponse response; - if (useTransportUser) { - response = client.filterWithHeader(Collections.singletonMap("Authorization", - basicAuthHeaderValue("test_user", new SecureString("x-pack-test-password".toCharArray())))) - .admin().cluster().prepareHealth().get(); - } else { - response = client.admin().cluster().prepareHealth().get(); - } - - assertThat(response.isTimedOut(), is(false)); - } - } - - TransportClient transportClient(Settings extraSettings) { - NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get(); - List nodes = nodeInfos.getNodes(); - assertTrue(nodes.isEmpty() == false); - TransportAddress publishAddress = randomFrom(nodes).getTransport().address().publishAddress(); - String clusterName = nodeInfos.getClusterName().value(); - - Settings settings = Settings.builder() - .put(extraSettings) - .put("cluster.name", clusterName) - .build(); - - TransportClient client = new PreBuiltXPackTransportClient(settings); - client.addTransportAddress(publishAddress); - return client; - } -} diff --git a/x-pack/qa/security-example-spi-extension/build.gradle b/x-pack/qa/security-example-spi-extension/build.gradle index 1ff65519c367d..4790df3609c35 100644 --- a/x-pack/qa/security-example-spi-extension/build.gradle +++ b/x-pack/qa/security-example-spi-extension/build.gradle @@ -9,7 +9,7 @@ esplugin { dependencies { compileOnly "org.elasticsearch.plugin:x-pack-core:${version}" - testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') + testCompile "org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}" } diff --git a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmIT.java b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmIT.java index 4487187a80b6d..e75c7705ef9bf 100644 --- a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmIT.java +++ b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmIT.java @@ -5,60 +5,43 @@ */ package org.elasticsearch.example.realm; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.env.Environment; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; -import org.elasticsearch.xpack.core.XPackClientPlugin; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; +import org.elasticsearch.test.rest.ESRestTestCase; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; /** * Integration test to test authentication with the custom realm */ -public class CustomRealmIT extends ESIntegTestCase { +public class CustomRealmIT extends ESRestTestCase { @Override - protected Settings externalClusterClientSettings() { + protected Settings restClientSettings() { return Settings.builder() .put(ThreadContext.PREFIX + "." + CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER) .put(ThreadContext.PREFIX + "." + CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW.toString()) - .put(NetworkModule.TRANSPORT_TYPE_KEY, "security4") .build(); } - @Override - protected Collection> transportClientPlugins() { - return Collections.>singleton(XPackClientPlugin.class); - } - - public void testHttpConnectionWithNoAuthentication() throws Exception { - try { - getRestClient().performRequest(new Request("GET", "/")); - fail("request should have failed"); - } catch(ResponseException e) { - Response response = e.getResponse(); - assertThat(response.getStatusLine().getStatusCode(), is(401)); - String value = response.getHeader("WWW-Authenticate"); - assertThat(value, is("custom-challenge")); - } + public void testHttpConnectionWithNoAuthentication() { + Request request = new Request("GET", "/"); + RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); + builder.addHeader(CustomRealm.USER_HEADER, ""); + builder.addHeader(CustomRealm.PW_HEADER, ""); + request.setOptions(builder); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), is(401)); + String value = response.getHeader("WWW-Authenticate"); + assertThat(value, is("custom-challenge")); } public void testHttpAuthentication() throws Exception { @@ -67,59 +50,16 @@ public void testHttpAuthentication() throws Exception { options.addHeader(CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER); options.addHeader(CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW.toString()); request.setOptions(options); - Response response = getRestClient().performRequest(request); + Response response = client().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); } - public void testTransportClient() throws Exception { - NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get(); - List nodes = nodeInfos.getNodes(); - assertTrue(nodes.isEmpty() == false); - TransportAddress publishAddress = randomFrom(nodes).getTransport().address().publishAddress(); - String clusterName = nodeInfos.getClusterName().value(); - - Settings settings = Settings.builder() - .put("cluster.name", clusterName) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) - .put(ThreadContext.PREFIX + "." + CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER) - .put(ThreadContext.PREFIX + "." + CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW.toString()) - .build(); - try (TransportClient client = new PreBuiltXPackTransportClient(settings)) { - client.addTransportAddress(publishAddress); - ClusterHealthResponse response = client.admin().cluster().prepareHealth().execute().actionGet(); - assertThat(response.isTimedOut(), is(false)); - } - } - - public void testTransportClientWrongAuthentication() throws Exception { - NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get(); - List nodes = nodeInfos.getNodes(); - assertTrue(nodes.isEmpty() == false); - TransportAddress publishAddress = randomFrom(nodes).getTransport().address().publishAddress(); - String clusterName = nodeInfos.getClusterName().value(); - - Settings settings = Settings.builder() - .put("cluster.name", clusterName) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) - .put(ThreadContext.PREFIX + "." + CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER + randomAlphaOfLength(1)) - .put(ThreadContext.PREFIX + "." + CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW.toString()) - .build(); - try (TransportClient client = new PreBuiltXPackTransportClient(settings)) { - client.addTransportAddress(publishAddress); - client.admin().cluster().prepareHealth().execute().actionGet(); - fail("authentication failure should have resulted in a NoNodesAvailableException"); - } catch (NoNodeAvailableException e) { - // expected - } - } - public void testSettingsFiltering() throws Exception { - NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().clear().setSettings(true).get(); - for(NodeInfo info : nodeInfos.getNodes()) { - Settings settings = info.getSettings(); - assertNotNull(settings); - assertNull(settings.get("xpack.security.authc.realms.custom.custom.filtered_setting")); - assertEquals("0", settings.get("xpack.security.authc.realms.custom.custom.order")); - } + Request request = new Request("GET", "/_nodes/_all/settings"); + request.addParameter("flat_settings", "true"); + Response response = client().performRequest(request); + String responseString = EntityUtils.toString(response.getEntity()); + assertThat(responseString, not(containsString("xpack.security.authc.realms.custom.custom.filtered_setting"))); + assertThat(responseString, containsString("xpack.security.authc.realms.custom.custom.order")); } } diff --git a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/role/CustomRolesProviderIT.java b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/role/CustomRolesProviderIT.java index 57a895848e3a8..3aab2a36562de 100644 --- a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/role/CustomRolesProviderIT.java +++ b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/role/CustomRolesProviderIT.java @@ -9,20 +9,20 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.security.PutUserRequest; +import org.elasticsearch.client.security.RefreshPolicy; +import org.elasticsearch.client.security.user.User; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.example.realm.CustomRealm; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xpack.core.XPackClientPlugin; -import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.xpack.core.security.client.SecurityClient; -import java.util.Collection; +import java.io.IOException; import java.util.Collections; +import java.util.List; import static org.elasticsearch.example.role.CustomInMemoryRolesProvider.INDEX; import static org.elasticsearch.example.role.CustomInMemoryRolesProvider.ROLE_A; @@ -33,7 +33,7 @@ /** * Integration test for custom roles providers. */ -public class CustomRolesProviderIT extends ESIntegTestCase { +public class CustomRolesProviderIT extends ESRestTestCase { private static final String TEST_USER = "test_user"; private static final String TEST_PWD = "change_me"; @@ -46,22 +46,17 @@ public class CustomRolesProviderIT extends ESIntegTestCase { } @Override - protected Settings externalClusterClientSettings() { + protected Settings restClientSettings() { return Settings.builder() - .put(ThreadContext.PREFIX + "." + CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER) - .put(ThreadContext.PREFIX + "." + CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW.toString()) - .put(NetworkModule.TRANSPORT_TYPE_KEY, "security4") - .build(); + .put(ThreadContext.PREFIX + "." + CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER) + .put(ThreadContext.PREFIX + "." + CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW.toString()) + .build(); } - @Override - protected Collection> transportClientPlugins() { - return Collections.singleton(XPackClientPlugin.class); - } - - public void setupTestUser(String role) { - SecurityClient securityClient = new SecurityClient(client()); - securityClient.preparePutUser(TEST_USER, TEST_PWD.toCharArray(), Hasher.BCRYPT, role).get(); + public void setupTestUser(String role) throws IOException { + new TestRestHighLevelClient().security().putUser( + PutUserRequest.withPassword(new User(TEST_USER, List.of(role)), TEST_PWD.toCharArray(), true, RefreshPolicy.IMMEDIATE), + RequestOptions.DEFAULT); } public void testAuthorizedCustomRoleSucceeds() throws Exception { @@ -69,7 +64,7 @@ public void testAuthorizedCustomRoleSucceeds() throws Exception { // roleB has all permissions on index "foo", so creating "foo" should succeed Request request = new Request("PUT", "/" + INDEX); request.setOptions(AUTH_OPTIONS); - Response response = getRestClient().performRequest(request); + Response response = client().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); } @@ -79,27 +74,23 @@ public void testFirstResolvedRoleTakesPrecedence() throws Exception { // the first custom role provider appears first in order, it should take precedence and deny // permission to create the index setupTestUser(ROLE_A); - // roleB has all permissions on index "foo", so creating "foo" should succeed - try { - Request request = new Request("PUT", "/" + INDEX); - request.setOptions(AUTH_OPTIONS); - getRestClient().performRequest(request); - fail(ROLE_A + " should not be authorized to create index " + INDEX); - } catch (ResponseException e) { - assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); - } + Request request = new Request("PUT", "/" + INDEX); + request.setOptions(AUTH_OPTIONS); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); } public void testUnresolvedRoleDoesntSucceed() throws Exception { setupTestUser("unknown"); - // roleB has all permissions on index "foo", so creating "foo" should succeed - try { - Request request = new Request("PUT", "/" + INDEX); - request.setOptions(AUTH_OPTIONS); - getRestClient().performRequest(request); - fail(ROLE_A + " should not be authorized to create index " + INDEX); - } catch (ResponseException e) { - assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); + Request request = new Request("PUT", "/" + INDEX); + request.setOptions(AUTH_OPTIONS); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); + } + + private class TestRestHighLevelClient extends RestHighLevelClient { + TestRestHighLevelClient() { + super(client(), restClient -> {}, Collections.emptyList()); } } } diff --git a/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/PreventFailingBuildIT.java b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/PreventFailingBuildIT.java index 2c2cdd044aab7..6ac2cdd3fb654 100644 --- a/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/PreventFailingBuildIT.java +++ b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/PreventFailingBuildIT.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.smoketest; -import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.rest.ESRestTestCase; -public class PreventFailingBuildIT extends ESIntegTestCase { +public class PreventFailingBuildIT extends ESRestTestCase { public void testSoThatTestsDoNotFail() { // Noop diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle index e88eac3028f3d..d4fe2129363c5 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/build.gradle +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -7,6 +7,7 @@ apply plugin: 'elasticsearch.rest-test' dependencies { testCompile "org.elasticsearch.plugin:x-pack-core:${version}" + testCompile project(':client:rest-high-level') } String outputDir = "${buildDir}/generated-resources/${project.name}" diff --git a/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestMonitoringWithSecurityIT.java b/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestMonitoringWithSecurityIT.java index 6a49e18ca93ef..5662990580f0f 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestMonitoringWithSecurityIT.java +++ b/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestMonitoringWithSecurityIT.java @@ -7,35 +7,55 @@ import io.netty.util.ThreadDeathWatcher; import io.netty.util.concurrent.GlobalEventExecutor; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; -import org.elasticsearch.common.network.NetworkAddress; -import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.indices.GetIndexRequest; +import org.elasticsearch.client.indices.GetIndexTemplatesRequest; +import org.elasticsearch.client.indices.GetIndexTemplatesResponse; +import org.elasticsearch.client.xpack.XPackUsageRequest; +import org.elasticsearch.client.xpack.XPackUsageResponse; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xpack.core.XPackPlugin; -import org.elasticsearch.xpack.core.action.XPackUsageRequestBuilder; -import org.elasticsearch.xpack.core.action.XPackUsageResponse; -import org.elasticsearch.xpack.core.monitoring.MonitoringFeatureSetUsage; -import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; import org.junit.After; +import org.junit.AfterClass; import org.junit.Before; +import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.rules.ExternalResource; -import java.net.InetSocketAddress; -import java.util.Collection; +import java.io.IOException; +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.Optional; +import java.util.Map; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; /** * This test checks that a Monitoring's HTTP exporter correctly exports to a monitoring cluster @@ -46,7 +66,13 @@ * then uses a transport client to check that the data have been correctly received and * indexed in the cluster. */ -public class SmokeTestMonitoringWithSecurityIT extends ESIntegTestCase { +public class SmokeTestMonitoringWithSecurityIT extends ESRestTestCase { + + public class TestRestHighLevelClient extends RestHighLevelClient { + TestRestHighLevelClient() { + super(client(), RestClient::close, Collections.emptyList()); + } + } /** * A JUnit class level rule that runs after the AfterClass method in {@link ESIntegTestCase}, @@ -78,19 +104,45 @@ protected void after() { }; private static final String USER = "test_user"; - private static final String PASS = "x-pack-test-password"; + private static final SecureString PASS = new SecureString("x-pack-test-password".toCharArray()); + private static final String KEYSTORE_PASS = "testnode"; private static final String MONITORING_PATTERN = ".monitoring-*"; + static Path keyStore; + + @BeforeClass + public static void getKeyStore() { + try { + keyStore = PathUtils.get(SmokeTestMonitoringWithSecurityIT.class.getResource("/testnode.jks").toURI()); + } catch (URISyntaxException e) { + throw new ElasticsearchException("exception while reading the store", e); + } + if (!Files.exists(keyStore)) { + throw new IllegalStateException("Keystore file [" + keyStore + "] does not exist."); + } + } + + @AfterClass + public static void clearKeyStore() { + keyStore = null; + } + + RestHighLevelClient newHighLevelClient() { + return new TestRestHighLevelClient(); + } + @Override - protected Collection> transportClientPlugins() { - return Collections.singletonList(XPackPlugin.class); + protected String getProtocol() { + return "https"; } @Override - protected Settings externalClusterClientSettings() { + protected Settings restClientSettings() { + String token = basicAuthHeaderValue(USER, PASS); return Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), USER + ":" + PASS) - .put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4).build(); + .put(ThreadContext.PREFIX + ".Authorization", token) + .put(ESRestTestCase.TRUSTSTORE_PATH, keyStore) + .put(ESRestTestCase.TRUSTSTORE_PASSWORD, KEYSTORE_PASS).build(); } @Before @@ -100,73 +152,92 @@ public void enableExporter() throws Exception { .put("xpack.monitoring.exporters._http.enabled", true) .put("xpack.monitoring.exporters._http.host", "https://" + randomNodeHttpAddress()) .build(); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(exporterSettings)); + ClusterUpdateSettingsResponse response = newHighLevelClient().cluster().putSettings( + new ClusterUpdateSettingsRequest().transientSettings(exporterSettings), RequestOptions.DEFAULT); + assertTrue(response.isAcknowledged()); } @After - public void disableExporter() { + public void disableExporter() throws IOException { Settings exporterSettings = Settings.builder() .putNull("xpack.monitoring.collection.enabled") .putNull("xpack.monitoring.exporters._http.enabled") .putNull("xpack.monitoring.exporters._http.host") .build(); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(exporterSettings)); + ClusterUpdateSettingsResponse response = newHighLevelClient().cluster().putSettings( + new ClusterUpdateSettingsRequest().transientSettings(exporterSettings), RequestOptions.DEFAULT); + assertTrue(response.isAcknowledged()); } private boolean getMonitoringUsageExportersDefined() throws Exception { - final XPackUsageResponse usageResponse = new XPackUsageRequestBuilder(client()).execute().get(); - final Optional monitoringUsage = - usageResponse.getUsages() - .stream() - .filter(usage -> usage instanceof MonitoringFeatureSetUsage) - .map(usage -> (MonitoringFeatureSetUsage)usage) - .findFirst(); - - assertThat("Monitoring feature set does not exist", monitoringUsage.isPresent(), is(true)); - - return monitoringUsage.get().getExporters().isEmpty() == false; + RestHighLevelClient client = newHighLevelClient(); + final XPackUsageResponse usageResponse = client.xpack().usage(new XPackUsageRequest(), RequestOptions.DEFAULT); + Map monitoringUsage = usageResponse.getUsages().get("monitoring"); + assertThat("Monitoring feature set does not exist", monitoringUsage, notNullValue()); + + @SuppressWarnings("unchecked") + Map exporters = (Map) monitoringUsage.get("enabled_exporters"); + return exporters != null && exporters.isEmpty() == false; } public void testHTTPExporterWithSSL() throws Exception { // Ensures that the exporter is actually on assertBusy(() -> assertThat("[_http] exporter is not defined", getMonitoringUsageExportersDefined(), is(true))); + RestHighLevelClient client = newHighLevelClient(); // Checks that the monitoring index templates have been installed + GetIndexTemplatesRequest templateRequest = new GetIndexTemplatesRequest(MONITORING_PATTERN); assertBusy(() -> { - GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates(MONITORING_PATTERN).get(); - assertThat(response.getIndexTemplates().size(), greaterThanOrEqualTo(2)); + try { + GetIndexTemplatesResponse response = client.indices().getIndexTemplate(templateRequest, RequestOptions.DEFAULT); + assertThat(response.getIndexTemplates().size(), greaterThanOrEqualTo(2)); + } catch (Exception e) { + fail("template not ready yet: " + e.getMessage()); + } }); + GetIndexRequest indexRequest = new GetIndexRequest(MONITORING_PATTERN); // Waits for monitoring indices to be created assertBusy(() -> { try { - assertThat(client().admin().indices().prepareExists(MONITORING_PATTERN).get().isExists(), equalTo(true)); + assertThat(client.indices().exists(indexRequest, RequestOptions.DEFAULT), equalTo(true)); } catch (Exception e) { - fail("exception when checking for monitoring documents: " + e.getMessage()); + fail("monitoring index not created yet: " + e.getMessage()); } }); // Waits for indices to be ready - ensureYellowAndNoInitializingShards(MONITORING_PATTERN); + ClusterHealthRequest healthRequest = new ClusterHealthRequest(MONITORING_PATTERN); + healthRequest.waitForStatus(ClusterHealthStatus.YELLOW); + healthRequest.waitForEvents(Priority.LANGUID); + healthRequest.waitForNoRelocatingShards(true); + healthRequest.waitForNoInitializingShards(true); + ClusterHealthResponse response = client.cluster().health(healthRequest, RequestOptions.DEFAULT); + assertThat(response.isTimedOut(), is(false)); // Checks that the HTTP exporter has successfully exported some data + SearchRequest searchRequest = new SearchRequest(new String[] { MONITORING_PATTERN }, new SearchSourceBuilder().size(0)); assertBusy(() -> { try { - assertThat(client().prepareSearch(MONITORING_PATTERN).setSize(0).get().getHits().getTotalHits().value, greaterThan(0L)); + assertThat(client.search(searchRequest, RequestOptions.DEFAULT).getHits().getTotalHits().value, greaterThan(0L)); } catch (Exception e) { - fail("exception when checking for monitoring documents: " + e.getMessage()); + fail("monitoring date not exported yet: " + e.getMessage()); } }); } - private String randomNodeHttpAddress() { - List nodes = client().admin().cluster().prepareNodesInfo().clear().setHttp(true).get().getNodes(); - assertThat(nodes.size(), greaterThan(0)); - - InetSocketAddress[] httpAddresses = new InetSocketAddress[nodes.size()]; - for (int i = 0; i < nodes.size(); i++) { - httpAddresses[i] = nodes.get(i).getHttp().address().publishAddress().address(); + private String randomNodeHttpAddress() throws IOException { + Response response = client().performRequest(new Request("GET", "/_nodes")); + assertOK(response); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + Map nodesAsMap = objectPath.evaluate("nodes"); + List httpAddresses = new ArrayList<>(); + for (Map.Entry entry : nodesAsMap.entrySet()) { + Map nodeDetails = (Map) entry.getValue(); + Map httpInfo = (Map) nodeDetails.get("http"); + httpAddresses.add((String) httpInfo.get("publish_address")); } - return NetworkAddress.format(randomFrom(httpAddresses)); + assertThat(httpAddresses.size(), greaterThan(0)); + return randomFrom(httpAddresses); } } diff --git a/x-pack/qa/transport-client-tests/build.gradle b/x-pack/qa/transport-client-tests/build.gradle deleted file mode 100644 index 5ca96eb0d7a87..0000000000000 --- a/x-pack/qa/transport-client-tests/build.gradle +++ /dev/null @@ -1,22 +0,0 @@ -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.rest-test' - -dependencies { - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" - testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') -} - -integTestCluster { - setting 'xpack.security.enabled', 'false' - setting 'xpack.license.self_generated.type', 'trial' -} - - -testingConventions { - naming.clear() - naming { - IT { - baseClass 'org.elasticsearch.xpack.ml.client.ESXPackSmokeClientTestCase' - } - } -} \ No newline at end of file diff --git a/x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/ESXPackSmokeClientTestCase.java b/x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/ESXPackSmokeClientTestCase.java deleted file mode 100644 index 28267614dd36d..0000000000000 --- a/x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/ESXPackSmokeClientTestCase.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.client; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.env.Environment; -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; - -import java.io.IOException; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.URL; -import java.nio.file.Path; -import java.util.Locale; -import java.util.concurrent.atomic.AtomicInteger; - -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength; -import static org.hamcrest.Matchers.notNullValue; - -/** - * An abstract base class to run integration tests against an Elasticsearch - * cluster running outside of the test process. - *

- * You can define a list of transport addresses from where you can reach your - * cluster by setting "tests.cluster" system property. It defaults to - * "localhost:9300". If you run this from `gradle integTest` then it will start - * the clsuter for you and set up the property. - *

- * If you want to debug this module from your IDE, then start an external - * cluster by yourself, maybe with `gradle run`, then run JUnit. If you changed - * the default port, set "-Dtests.cluster=localhost:PORT" when running your - * test. - */ -@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") -public abstract class ESXPackSmokeClientTestCase extends LuceneTestCase { - - /** - * Key used to eventually switch to using an external cluster and provide - * its transport addresses - */ - public static final String TESTS_CLUSTER = "tests.cluster"; - - protected static final Logger logger = LogManager.getLogger(ESXPackSmokeClientTestCase.class); - - private static final AtomicInteger counter = new AtomicInteger(); - private static Client client; - private static String clusterAddresses; - protected String index; - - private static Client startClient(Path tempDir, TransportAddress... transportAddresses) { - Settings.Builder builder = Settings.builder() - .put("node.name", "qa_xpack_smoke_client_" + counter.getAndIncrement()) - .put("client.transport.ignore_cluster_name", true) - .put("xpack.security.enabled", false) - .put(Environment.PATH_HOME_SETTING.getKey(), tempDir); - TransportClient client = new PreBuiltXPackTransportClient(builder.build()) - .addTransportAddresses(transportAddresses); - - logger.info("--> Elasticsearch Java TransportClient started"); - - Exception clientException = null; - try { - ClusterHealthResponse health = client.admin().cluster().prepareHealth().get(); - logger.info("--> connected to [{}] cluster which is running [{}] node(s).", - health.getClusterName(), health.getNumberOfNodes()); - } catch (Exception e) { - logger.error("Error getting cluster health", e); - clientException = e; - } - - assumeNoException("Sounds like your cluster is not running at " + clusterAddresses, - clientException); - - return client; - } - - private static Client startClient() throws IOException { - String[] stringAddresses = clusterAddresses.split(","); - TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; - int i = 0; - for (String stringAddress : stringAddresses) { - URL url = new URL("http://" + stringAddress); - InetAddress inetAddress = InetAddress.getByName(url.getHost()); - transportAddresses[i++] = new TransportAddress( - new InetSocketAddress(inetAddress, url.getPort())); - } - return startClient(createTempDir(), transportAddresses); - } - - public static Client getClient() { - if (client == null) { - try { - client = startClient(); - } catch (IOException e) { - logger.error("can not start the client", e); - } - assertThat(client, notNullValue()); - } - return client; - } - - @BeforeClass - public static void initializeSettings() { - clusterAddresses = System.getProperty(TESTS_CLUSTER); - if (clusterAddresses == null || clusterAddresses.isEmpty()) { - fail("Must specify " + TESTS_CLUSTER + " for smoke client test"); - } - } - - @AfterClass - public static void stopTransportClient() { - if (client != null) { - client.close(); - client = null; - } - } - - @Before - public void defineIndexName() { - doClean(); - index = "qa-xpack-smoke-test-client-" - + randomAsciiOfLength(10).toLowerCase(Locale.getDefault()); - } - - @After - public void cleanIndex() { - doClean(); - } - - private void doClean() { - if (client != null) { - try { - client.admin().indices().prepareDelete(index).get(); - } catch (Exception e) { - // We ignore this cleanup exception - } - } - } -} diff --git a/x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/MLTransportClientIT.java b/x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/MLTransportClientIT.java deleted file mode 100644 index 1a4959c0be84a..0000000000000 --- a/x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/MLTransportClientIT.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.client; - -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.ml.action.CloseJobAction; -import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; -import org.elasticsearch.xpack.core.ml.action.FlushJobAction; -import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; -import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction; -import org.elasticsearch.xpack.core.ml.action.GetJobsAction; -import org.elasticsearch.xpack.core.ml.action.GetModelSnapshotsAction; -import org.elasticsearch.xpack.core.ml.action.OpenJobAction; -import org.elasticsearch.xpack.core.ml.action.PostDataAction; -import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.PutJobAction; -import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction; -import org.elasticsearch.xpack.core.ml.action.ValidateDetectorAction; -import org.elasticsearch.xpack.core.ml.action.ValidateJobConfigAction; -import org.elasticsearch.xpack.core.ml.client.MachineLearningClient; -import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; -import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; -import org.elasticsearch.xpack.core.ml.job.config.DataDescription; -import org.elasticsearch.xpack.core.ml.job.config.Detector; -import org.elasticsearch.xpack.core.ml.job.config.Job; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; - -public class MLTransportClientIT extends ESXPackSmokeClientTestCase { - - public void testMLTransportClient_JobActions() { - Client client = getClient(); - XPackClient xPackClient = new XPackClient(client); - MachineLearningClient mlClient = xPackClient.machineLearning(); - - String jobId = "ml-transport-client-it-job"; - Job.Builder job = createJob(jobId); - - PutJobAction.Response putJobResponse = mlClient.putJob(new PutJobAction.Request(job)).actionGet(); - assertThat(putJobResponse, notNullValue()); - - GetJobsAction.Response getJobResponse = mlClient.getJobs(new GetJobsAction.Request(jobId)).actionGet(); - assertThat(getJobResponse, notNullValue()); - assertThat(getJobResponse.getResponse(), notNullValue()); - assertThat(getJobResponse.getResponse().count(), equalTo(1L)); - - // Open job POST data, flush, close and check a result - AcknowledgedResponse openJobResponse = mlClient.openJob(new OpenJobAction.Request(jobId)).actionGet(); - assertThat(openJobResponse.isAcknowledged(), equalTo(true)); - - String content = "{\"time\":1000, \"msg\": \"some categorical message\"}\n" + - "{\"time\":11000, \"msg\": \"some categorical message in the second bucket\"}\n" + - "{\"time\":21000, \"msg\": \"some categorical message in the third bucket\"}\n"; - PostDataAction.Request postRequest = new PostDataAction.Request(jobId); - postRequest.setContent(new BytesArray(content), XContentType.JSON); - PostDataAction.Response postResponse = mlClient.postData(postRequest).actionGet(); - assertThat(postResponse.getDataCounts(), notNullValue()); - assertThat(postResponse.getDataCounts().getInputFieldCount(), equalTo(3L)); - - FlushJobAction.Response flushResponse = mlClient.flushJob(new FlushJobAction.Request(jobId)).actionGet(); - assertThat(flushResponse.isFlushed(), equalTo(true)); - - CloseJobAction.Response closeResponse = mlClient.closeJob(new CloseJobAction.Request(jobId)).actionGet(); - assertThat(closeResponse.isClosed(), equalTo(true)); - - GetBucketsAction.Response getBucketsResponse = mlClient.getBuckets(new GetBucketsAction.Request(jobId)).actionGet(); - assertThat(getBucketsResponse.getBuckets().count(), equalTo(1L)); - - // Update a model snapshot - GetModelSnapshotsAction.Response getModelSnapshotResponse = - mlClient.getModelSnapshots(new GetModelSnapshotsAction.Request(jobId, null)).actionGet(); - assertThat(getModelSnapshotResponse.getPage().count(), equalTo(1L)); - String snapshotId = getModelSnapshotResponse.getPage().results().get(0).getSnapshotId(); - - UpdateModelSnapshotAction.Request updateModelSnapshotRequest = new UpdateModelSnapshotAction.Request(jobId, snapshotId); - updateModelSnapshotRequest.setDescription("Changed description"); - UpdateModelSnapshotAction.Response updateModelSnapshotResponse = - mlClient.updateModelSnapshot(updateModelSnapshotRequest).actionGet(); - assertThat(updateModelSnapshotResponse.getModel(), notNullValue()); - assertThat(updateModelSnapshotResponse.getModel().getDescription(), equalTo("Changed description")); - - // and delete the job - AcknowledgedResponse deleteJobResponse = mlClient.deleteJob(new DeleteJobAction.Request(jobId)).actionGet(); - assertThat(deleteJobResponse, notNullValue()); - assertThat(deleteJobResponse.isAcknowledged(), equalTo(true)); - } - - public void testMLTransportClient_ValidateActions() { - Client client = getClient(); - XPackClient xPackClient = new XPackClient(client); - MachineLearningClient mlClient = xPackClient.machineLearning(); - - Detector.Builder detector = new Detector.Builder(); - detector.setFunction("count"); - ValidateDetectorAction.Request validateDetectorRequest = new ValidateDetectorAction.Request(detector.build()); - AcknowledgedResponse validateDetectorResponse = mlClient.validateDetector(validateDetectorRequest).actionGet(); - assertThat(validateDetectorResponse.isAcknowledged(), equalTo(true)); - - Job.Builder job = createJob("ml-transport-client-it-validate-job"); - ValidateJobConfigAction.Request validateJobRequest = new ValidateJobConfigAction.Request(job.build(new Date())); - AcknowledgedResponse validateJobResponse = mlClient.validateJobConfig(validateJobRequest).actionGet(); - assertThat(validateJobResponse.isAcknowledged(), equalTo(true)); - } - - - public void testMLTransportClient_DateFeedActions() { - Client client = getClient(); - XPackClient xPackClient = new XPackClient(client); - MachineLearningClient mlClient = xPackClient.machineLearning(); - - String jobId = "ml-transport-client-it-datafeed-job"; - Job.Builder job = createJob(jobId); - - PutJobAction.Response putJobResponse = mlClient.putJob(new PutJobAction.Request(job)).actionGet(); - assertThat(putJobResponse, notNullValue()); - - String datafeedId = "ml-transport-client-it-datafeed"; - DatafeedConfig.Builder datafeed = new DatafeedConfig.Builder(datafeedId, jobId); - String datafeedIndex = "ml-transport-client-test"; - String datatype = "type-bar"; - datafeed.setIndices(Collections.singletonList(datafeedIndex)); - - mlClient.putDatafeed(new PutDatafeedAction.Request(datafeed.build())).actionGet(); - - GetDatafeedsAction.Response getDatafeedResponse = mlClient.getDatafeeds(new GetDatafeedsAction.Request(datafeedId)).actionGet(); - assertThat(getDatafeedResponse.getResponse(), notNullValue()); - - // Open job before starting the datafeed - AcknowledgedResponse openJobResponse = mlClient.openJob(new OpenJobAction.Request(jobId)).actionGet(); - assertThat(openJobResponse.isAcknowledged(), equalTo(true)); - - // create the index for the data feed - Map source = new HashMap<>(); - source.put("time", new Date()); - source.put("message", "some message"); - client.prepareIndex(datafeedIndex, datatype).setSource(source).get(); - - StartDatafeedAction.Request startDatafeedRequest = new StartDatafeedAction.Request(datafeedId, new Date().getTime()); - AcknowledgedResponse startDataFeedResponse = mlClient.startDatafeed(startDatafeedRequest).actionGet(); - assertThat(startDataFeedResponse.isAcknowledged(), equalTo(true)); - - StopDatafeedAction.Response stopDataFeedResponse = mlClient.stopDatafeed(new StopDatafeedAction.Request(datafeedId)).actionGet(); - assertThat(stopDataFeedResponse.isStopped(), equalTo(true)); - } - - private Job.Builder createJob(String jobId) { - Job.Builder job = new Job.Builder(); - job.setId(jobId); - - List detectors = new ArrayList<>(); - Detector.Builder detector = new Detector.Builder(); - detector.setFunction("count"); - detectors.add(detector.build()); - - AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(detectors); - analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(10L)); - job.setAnalysisConfig(analysisConfig); - job.setDataDescription(new DataDescription.Builder()); - return job; - } -} diff --git a/x-pack/transport-client/build.gradle b/x-pack/transport-client/build.gradle deleted file mode 100644 index d764ef897447a..0000000000000 --- a/x-pack/transport-client/build.gradle +++ /dev/null @@ -1,41 +0,0 @@ -apply plugin: 'elasticsearch.build' -apply plugin: 'nebula.maven-base-publish' -apply plugin: 'nebula.maven-scm' - -group = 'org.elasticsearch.client' -archivesBaseName = 'x-pack-transport' - -dependencies { - // this "api" dependency looks weird, but it is correct, as it contains - // all of x-pack for now, and transport client will be going away in the future. - compile "org.elasticsearch.plugin:x-pack-core:${version}" - compile "org.elasticsearch.client:transport:${version}" - testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" - testCompile "junit:junit:${versions.junit}" - testCompile "org.hamcrest:hamcrest:${versions.hamcrest}" -} - -dependencyLicenses.enabled = false - -forbiddenApisTest { - // we don't use the core test-framework, no lucene classes present so we don't want the es-test-signatures to - // be pulled in - replaceSignatureFiles 'jdk-signatures', 'es-all-signatures' -} - -testingConventions { - naming.clear() - naming { - Tests { - baseClass 'com.carrotsearch.randomizedtesting.RandomizedTest' - } - } -} - -publishing { - publications { - nebula(MavenPublication) { - artifactId = archivesBaseName - } - } -} diff --git a/x-pack/transport-client/src/main/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClient.java b/x-pack/transport-client/src/main/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClient.java deleted file mode 100644 index cf4e5db92b00e..0000000000000 --- a/x-pack/transport-client/src/main/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClient.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.client; - -import io.netty.util.ThreadDeathWatcher; -import io.netty.util.concurrent.GlobalEventExecutor; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.transport.client.PreBuiltTransportClient; -import org.elasticsearch.xpack.core.XPackClientPlugin; -import org.elasticsearch.xpack.core.XPackPlugin; -import org.elasticsearch.xpack.core.security.SecurityField; - -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.concurrent.TimeUnit; - -/** - * A builder to create an instance of {@link TransportClient} that pre-installs - * all of the plugins installed by the {@link PreBuiltTransportClient} and the - * {@link XPackPlugin} so that the client may be used with an x-pack enabled - * cluster. - * - * @deprecated {@link TransportClient} is deprecated in favour of the high-level REST client and will be removed in Elasticsearch 8.0 - */ -@SuppressWarnings({"unchecked","varargs"}) -@Deprecated -public class PreBuiltXPackTransportClient extends PreBuiltTransportClient { - - @SafeVarargs - public PreBuiltXPackTransportClient(Settings settings, Class... plugins) { - this(settings, Arrays.asList(plugins)); - } - - public PreBuiltXPackTransportClient(Settings settings, Collection> plugins) { - this(settings, plugins, null); - } - - public PreBuiltXPackTransportClient(Settings settings, Collection> plugins, - HostFailureListener hostFailureListener) { - super(settings, addPlugins(plugins, Collections.singletonList(XPackClientPlugin.class)), hostFailureListener); - } - - @Override - public void close() { - super.close(); - if (NetworkModule.TRANSPORT_TYPE_SETTING.get(settings).equals(SecurityField.NAME4)) { - try { - GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - try { - ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - } -} diff --git a/x-pack/transport-client/src/test/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClientTests.java b/x-pack/transport-client/src/test/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClientTests.java deleted file mode 100644 index f9808ce54faac..0000000000000 --- a/x-pack/transport-client/src/test/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClientTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.client; - -import com.carrotsearch.randomizedtesting.RandomizedTest; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.core.security.SecurityField; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; - -/** - * Unit tests for the {@link PreBuiltXPackTransportClient} - */ -public class PreBuiltXPackTransportClientTests extends RandomizedTest { - - @Test - public void testPluginInstalled() { - try (TransportClient client = new PreBuiltXPackTransportClient(Settings.EMPTY)) { - Settings settings = client.settings(); - assertEquals(SecurityField.NAME4, NetworkModule.TRANSPORT_TYPE_SETTING.get(settings)); - } - } - -} \ No newline at end of file From e46622bb76338207f81e1426f8de1763b6c86453 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Thu, 23 May 2019 13:24:33 -0400 Subject: [PATCH 073/224] SQL: Add back the single node JDBC tests (#41960) Adds back single node tests that were accidentally removed. From 4bec333f7264d1f154bcb17eb524d40bab68d746 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 23 May 2019 13:27:34 -0400 Subject: [PATCH 074/224] Mute accounting circuit breaker check after test (#42448) If we close an engine while a refresh is happening, then we might leak refCount of some SegmentReaders. We need to skip the ram accounting circuit breaker check until we have a new Lucene snapshot which includes the fix for LUCENE-8809. This also adds a test to the engine but left it muted so we won't forget to reenable this check. Closes #30290 --- .../index/engine/InternalEngineTests.java | 42 +++++++++++++++++++ .../index/engine/EngineTestCase.java | 3 ++ .../test/InternalTestCluster.java | 21 ++++++---- 3 files changed, 57 insertions(+), 9 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index db9de3765b1e7..b213da097ce5e 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -81,6 +81,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -154,6 +155,7 @@ import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -5636,4 +5638,44 @@ public void testMaxSeqNoInCommitUserData() throws Exception { rollTranslog.join(); assertMaxSeqNoInCommitUserData(engine); } + + @AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-8809") + public void testRefreshAndFailEngineConcurrently() throws Exception { + AtomicBoolean stopped = new AtomicBoolean(); + Semaphore indexedDocs = new Semaphore(0); + Thread indexer = new Thread(() -> { + while (stopped.get() == false) { + String id = Integer.toString(randomIntBetween(1, 100)); + try { + engine.index(indexForDoc(createParsedDoc(id, null))); + indexedDocs.release(); + } catch (IOException e) { + throw new AssertionError(e); + } catch (AlreadyClosedException e) { + return; + } + } + }); + + Thread refresher = new Thread(() -> { + while (stopped.get() == false) { + try { + engine.refresh("test", randomFrom(Engine.SearcherScope.values()), randomBoolean()); + } catch (AlreadyClosedException e) { + return; + } + } + }); + indexer.start(); + refresher.start(); + indexedDocs.acquire(randomIntBetween(1, 100)); + try { + engine.failEngine("test", new IOException("simulated error")); + } finally { + stopped.set(true); + indexer.join(); + refresher.join(); + } + assertThat(engine.config().getCircuitBreakerService().getBreaker(CircuitBreaker.ACCOUNTING).getUsed(), equalTo(0L)); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index afa319af7e1cf..e25217eaccc9b 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -55,6 +55,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; @@ -269,6 +270,8 @@ public void tearDown() throws Exception { assertConsistentHistoryBetweenTranslogAndLuceneIndex(replicaEngine, createMapperService("test")); assertMaxSeqNoInCommitUserData(replicaEngine); } + assertThat(engine.config().getCircuitBreakerService().getBreaker(CircuitBreaker.ACCOUNTING).getUsed(), equalTo(0L)); + assertThat(replicaEngine.config().getCircuitBreakerService().getBreaker(CircuitBreaker.ACCOUNTING).getUsed(), equalTo(0L)); } finally { IOUtils.close(replicaEngine, storeReplica, engine, store, () -> terminate(threadPool)); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 2e88a018e5a0d..cc071df9769ca 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -2334,15 +2334,18 @@ public void ensureEstimatedStats() { final CircuitBreakerService breakerService = getInstanceFromNode(CircuitBreakerService.class, nodeAndClient.node); CircuitBreaker fdBreaker = breakerService.getBreaker(CircuitBreaker.FIELDDATA); assertThat("Fielddata breaker not reset to 0 on node: " + name, fdBreaker.getUsed(), equalTo(0L)); - try { - assertBusy(() -> { - CircuitBreaker acctBreaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING); - assertThat("Accounting breaker not reset to 0 on node: " + name + ", are there still Lucene indices around?", - acctBreaker.getUsed(), equalTo(0L)); - }); - } catch (Exception e) { - throw new AssertionError("Exception during check for accounting breaker reset to 0", e); - } + + // Mute this assertion until we have a new Lucene snapshot with https://issues.apache.org/jira/browse/LUCENE-8809. + // try { + // assertBusy(() -> { + // CircuitBreaker acctBreaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING); + // assertThat("Accounting breaker not reset to 0 on node: " + name + ", are there still Lucene indices around?", + // acctBreaker.getUsed(), equalTo(0L)); + // }); + // } catch (Exception e) { + // throw new AssertionError("Exception during check for accounting breaker reset to 0", e); + // } + // Anything that uses transport or HTTP can increase the // request breaker (because they use bigarrays), because of // that the breaker can sometimes be incremented from ping From 8177e7102d9438c0b6d2fe55a351089a5e5b98a2 Mon Sep 17 00:00:00 2001 From: sandmannn Date: Thu, 23 May 2019 19:38:11 +0200 Subject: [PATCH 075/224] Split document and metadata fields in GetResult (#38373) This commit makes creators of GetField split the fields into document fields and metadata fields. It is part of larger refactoring that aims to remove the calls to static methods of MapperService related to metadata fields, as discussed in #24422. --- build.gradle | 4 +- .../PercolateQueryBuilderTests.java | 4 +- .../action/update/UpdateHelper.java | 2 +- .../action/update/UpdateResponse.java | 3 +- .../elasticsearch/index/get/GetResult.java | 141 +++++++++++------- .../index/get/ShardGetService.java | 18 ++- .../action/explain/ExplainResponseTests.java | 4 +- .../action/get/GetResponseTests.java | 10 +- .../action/get/MultiGetResponseTests.java | 2 +- .../action/update/UpdateRequestTests.java | 20 +-- .../action/update/UpdateResponseTests.java | 2 +- .../index/get/GetResultTests.java | 48 +++--- .../query/GeoShapeQueryBuilderTests.java | 2 +- .../index/query/TermsQueryBuilderTests.java | 2 +- .../document/RestGetSourceActionTests.java | 6 +- .../authc/AuthenticationServiceTests.java | 10 +- .../authc/esnative/NativeUsersStoreTests.java | 3 + .../store/NativePrivilegeStoreTests.java | 6 +- .../xpack/security/test/SecurityMocks.java | 3 +- .../execution/ExecutionServiceTests.java | 6 +- .../ack/TransportAckWatchActionTests.java | 2 +- 21 files changed, 184 insertions(+), 114 deletions(-) diff --git a/build.gradle b/build.gradle index 7de02b814da86..037d3242dc4b7 100644 --- a/build.gradle +++ b/build.gradle @@ -162,8 +162,8 @@ task verifyVersions { * after the backport of the backcompat code is complete. */ -boolean bwc_tests_enabled = true -final String bwc_tests_disabled_issue = "" /* place a PR link here when committing bwc changes */ +boolean bwc_tests_enabled = false +final String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/38373" /* place a PR link here when committing bwc changes */ if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index e697c2f66eed8..6053a92b54a20 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -153,12 +153,12 @@ protected GetResponse executeGet(GetRequest getRequest) { if (indexedDocumentExists) { return new GetResponse( new GetResult(indexedDocumentIndex, MapperService.SINGLE_MAPPING_NAME, indexedDocumentId, 0, 1, 0L, true, - documentSource.iterator().next(), Collections.emptyMap()) + documentSource.iterator().next(), Collections.emptyMap(), Collections.emptyMap()) ); } else { return new GetResponse( new GetResult(indexedDocumentIndex, MapperService.SINGLE_MAPPING_NAME, indexedDocumentId, UNASSIGNED_SEQ_NO, 0, -1, - false, null, Collections.emptyMap()) + false, null, Collections.emptyMap(), Collections.emptyMap()) ); } } diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 54cd38aa0b960..c6e45af0e6a89 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -306,7 +306,7 @@ public static GetResult extractGetResult(final UpdateRequest request, String con // TODO when using delete/none, we can still return the source as bytes by generating it (using the sourceContentType) return new GetResult(concreteIndex, request.type(), request.id(), seqNo, primaryTerm, version, true, sourceFilteredAsBytes, - Collections.emptyMap()); + Collections.emptyMap(), Collections.emptyMap()); } public static class Result { diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateResponse.java b/server/src/main/java/org/elasticsearch/action/update/UpdateResponse.java index 03d721b26fe08..f3afec4f25b29 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateResponse.java @@ -164,7 +164,8 @@ public UpdateResponse build() { if (getResult != null) { update.setGetResult(new GetResult(update.getIndex(), update.getType(), update.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), update.getVersion(), - getResult.isExists(), getResult.internalSourceRef(), getResult.getFields())); + getResult.isExists(), getResult.internalSourceRef(), getResult.getDocumentFields(), + getResult.getMetadataFields())); } update.setForcedRefresh(forcedRefresh); return update; diff --git a/server/src/main/java/org/elasticsearch/index/get/GetResult.java b/server/src/main/java/org/elasticsearch/index/get/GetResult.java index 5769b659e40b3..ffaa42ce0ad21 100644 --- a/server/src/main/java/org/elasticsearch/index/get/GetResult.java +++ b/server/src/main/java/org/elasticsearch/index/get/GetResult.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.get; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressorFactory; @@ -36,11 +37,9 @@ import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; -import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.Objects; @@ -67,7 +66,8 @@ public class GetResult implements Streamable, Iterable, ToXConten private long seqNo; private long primaryTerm; private boolean exists; - private Map fields; + private Map documentFields; + private Map metaFields; private Map sourceAsMap; private BytesReference source; private byte[] sourceAsBytes; @@ -76,7 +76,7 @@ public class GetResult implements Streamable, Iterable, ToXConten } public GetResult(String index, String type, String id, long seqNo, long primaryTerm, long version, boolean exists, - BytesReference source, Map fields) { + BytesReference source, Map documentFields, Map metaFields) { this.index = index; this.type = type; this.id = id; @@ -89,9 +89,13 @@ public GetResult(String index, String type, String id, long seqNo, long primaryT this.version = version; this.exists = exists; this.source = source; - this.fields = fields; - if (this.fields == null) { - this.fields = emptyMap(); + this.documentFields = documentFields; + if (this.documentFields == null) { + this.documentFields = emptyMap(); + } + this.metaFields = metaFields; + if (this.metaFields == null) { + this.metaFields = emptyMap(); } } @@ -222,20 +226,31 @@ public Map getSource() { return sourceAsMap(); } + + public Map getMetadataFields() { + return metaFields; + } + + public Map getDocumentFields() { + return documentFields; + } + public Map getFields() { + Map fields = new HashMap<>(); + fields.putAll(metaFields); + fields.putAll(documentFields); return fields; } public DocumentField field(String name) { - return fields.get(name); + return getFields().get(name); } @Override public Iterator iterator() { - if (fields == null) { - return Collections.emptyIterator(); - } - return fields.values().iterator(); + // need to join the fields and metadata fields + Map allFields = this.getFields(); + return allFields.values().iterator(); } public XContentBuilder toXContentEmbedded(XContentBuilder builder, Params params) throws IOException { @@ -244,21 +259,7 @@ public XContentBuilder toXContentEmbedded(XContentBuilder builder, Params params builder.field(_PRIMARY_TERM, primaryTerm); } - List metaFields = new ArrayList<>(); - List otherFields = new ArrayList<>(); - if (fields != null && !fields.isEmpty()) { - for (DocumentField field : fields.values()) { - if (field.getValues().isEmpty()) { - continue; - } - if (field.isMetadataField()) { - metaFields.add(field); - } else { - otherFields.add(field); - } - } - } - for (DocumentField field : metaFields) { + for (DocumentField field : metaFields.values()) { // TODO: can we avoid having an exception here? if (field.getName().equals(IgnoredFieldMapper.NAME)) { builder.field(field.getName(), field.getValues()); @@ -273,9 +274,9 @@ public XContentBuilder toXContentEmbedded(XContentBuilder builder, Params params XContentHelper.writeRawField(SourceFieldMapper.NAME, source, builder, params); } - if (!otherFields.isEmpty()) { + if (!documentFields.isEmpty()) { builder.startObject(FIELDS); - for (DocumentField field : otherFields) { + for (DocumentField field : documentFields.values()) { field.toXContent(builder, params); } builder.endObject(); @@ -317,7 +318,8 @@ public static GetResult fromXContentEmbedded(XContentParser parser, String index long primaryTerm = UNASSIGNED_PRIMARY_TERM; Boolean found = null; BytesReference source = null; - Map fields = new HashMap<>(); + Map documentFields = new HashMap<>(); + Map metaFields = new HashMap<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); @@ -337,7 +339,8 @@ public static GetResult fromXContentEmbedded(XContentParser parser, String index } else if (FOUND.equals(currentFieldName)) { found = parser.booleanValue(); } else { - fields.put(currentFieldName, new DocumentField(currentFieldName, Collections.singletonList(parser.objectText()))); + metaFields.put(currentFieldName, new DocumentField(currentFieldName, + Collections.singletonList(parser.objectText()))); } } else if (token == XContentParser.Token.START_OBJECT) { if (SourceFieldMapper.NAME.equals(currentFieldName)) { @@ -350,20 +353,20 @@ public static GetResult fromXContentEmbedded(XContentParser parser, String index } else if (FIELDS.equals(currentFieldName)) { while(parser.nextToken() != XContentParser.Token.END_OBJECT) { DocumentField getField = DocumentField.fromXContent(parser); - fields.put(getField.getName(), getField); + documentFields.put(getField.getName(), getField); } } else { parser.skipChildren(); // skip potential inner objects for forward compatibility } } else if (token == XContentParser.Token.START_ARRAY) { if (IgnoredFieldMapper.NAME.equals(currentFieldName)) { - fields.put(currentFieldName, new DocumentField(currentFieldName, parser.list())); + metaFields.put(currentFieldName, new DocumentField(currentFieldName, parser.list())); } else { parser.skipChildren(); // skip potential inner arrays for forward compatibility } } } - return new GetResult(index, type, id, seqNo, primaryTerm, version, found, source, fields); + return new GetResult(index, type, id, seqNo, primaryTerm, version, found, source, documentFields, metaFields); } public static GetResult fromXContent(XContentParser parser) throws IOException { @@ -379,6 +382,35 @@ public static GetResult readGetResult(StreamInput in) throws IOException { return result; } + private Map readFields(StreamInput in) throws IOException { + Map fields = null; + int size = in.readVInt(); + if (size == 0) { + fields = new HashMap<>(); + } else { + fields = new HashMap<>(size); + for (int i = 0; i < size; i++) { + DocumentField field = DocumentField.readDocumentField(in); + fields.put(field.getName(), field); + } + } + return fields; + } + + static void splitFieldsByMetadata(Map fields, Map outOther, + Map outMetadata) { + if (fields == null) { + return; + } + for (Map.Entry fieldEntry: fields.entrySet()) { + if (fieldEntry.getValue().isMetadataField()) { + outMetadata.put(fieldEntry.getKey(), fieldEntry.getValue()); + } else { + outOther.put(fieldEntry.getKey(), fieldEntry.getValue()); + } + } + } + @Override public void readFrom(StreamInput in) throws IOException { index = in.readString(); @@ -393,15 +425,14 @@ public void readFrom(StreamInput in) throws IOException { if (source.length() == 0) { source = null; } - int size = in.readVInt(); - if (size == 0) { - fields = emptyMap(); + if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + documentFields = readFields(in); + metaFields = readFields(in); } else { - fields = new HashMap<>(size); - for (int i = 0; i < size; i++) { - DocumentField field = DocumentField.readDocumentField(in); - fields.put(field.getName(), field); - } + Map fields = readFields(in); + documentFields = new HashMap<>(); + metaFields = new HashMap<>(); + splitFieldsByMetadata(fields, documentFields, metaFields); } } } @@ -417,13 +448,22 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(exists); if (exists) { out.writeBytesReference(source); - if (fields == null) { - out.writeVInt(0); + if (out.getVersion().onOrAfter(Version.V_7_3_0)) { + writeFields(out, documentFields); + writeFields(out, metaFields); } else { - out.writeVInt(fields.size()); - for (DocumentField field : fields.values()) { - field.writeTo(out); - } + writeFields(out, this.getFields()); + } + } + } + + private void writeFields(StreamOutput out, Map fields) throws IOException { + if (fields == null) { + out.writeVInt(0); + } else { + out.writeVInt(fields.size()); + for (DocumentField field : fields.values()) { + field.writeTo(out); } } } @@ -444,13 +484,14 @@ public boolean equals(Object o) { Objects.equals(index, getResult.index) && Objects.equals(type, getResult.type) && Objects.equals(id, getResult.id) && - Objects.equals(fields, getResult.fields) && + Objects.equals(documentFields, getResult.documentFields) && + Objects.equals(metaFields, getResult.metaFields) && Objects.equals(sourceAsMap(), getResult.sourceAsMap()); } @Override public int hashCode() { - return Objects.hash(version, seqNo, primaryTerm, exists, index, type, id, fields, sourceAsMap()); + return Objects.hash(version, seqNo, primaryTerm, exists, index, type, id, documentFields, metaFields, sourceAsMap()); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java index 3c85fe40c5ba7..f77fc072c7062 100644 --- a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -118,7 +118,7 @@ public GetResult getForUpdate(String type, String id, long ifSeqNo, long ifPrima public GetResult get(Engine.GetResult engineGetResult, String id, String type, String[] fields, FetchSourceContext fetchSourceContext) { if (!engineGetResult.exists()) { - return new GetResult(shardId.getIndexName(), type, id, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM, -1, false, null, null); + return new GetResult(shardId.getIndexName(), type, id, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM, -1, false, null, null, null); } currentMetric.inc(); @@ -174,7 +174,7 @@ private GetResult innerGet(String type, String id, String[] gFields, boolean rea } if (get == null || get.exists() == false) { - return new GetResult(shardId.getIndexName(), type, id, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM, -1, false, null, null); + return new GetResult(shardId.getIndexName(), type, id, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM, -1, false, null, null, null); } try { @@ -187,7 +187,8 @@ private GetResult innerGet(String type, String id, String[] gFields, boolean rea private GetResult innerGetLoadFromStoredFields(String type, String id, String[] gFields, FetchSourceContext fetchSourceContext, Engine.GetResult get, MapperService mapperService) { - Map fields = null; + Map documentFields = null; + Map metaDataFields = null; BytesReference source = null; DocIdAndVersion docIdAndVersion = get.docIdAndVersion(); FieldsVisitor fieldVisitor = buildFieldsVisitors(gFields, fetchSourceContext); @@ -201,9 +202,14 @@ private GetResult innerGetLoadFromStoredFields(String type, String id, String[] if (!fieldVisitor.fields().isEmpty()) { fieldVisitor.postProcess(mapperService); - fields = new HashMap<>(fieldVisitor.fields().size()); + documentFields = new HashMap<>(); + metaDataFields = new HashMap<>(); for (Map.Entry> entry : fieldVisitor.fields().entrySet()) { - fields.put(entry.getKey(), new DocumentField(entry.getKey(), entry.getValue())); + if (MapperService.isMetadataField(entry.getKey())) { + metaDataFields.put(entry.getKey(), new DocumentField(entry.getKey(), entry.getValue())); + } else { + documentFields.put(entry.getKey(), new DocumentField(entry.getKey(), entry.getValue())); + } } } } @@ -240,7 +246,7 @@ private GetResult innerGetLoadFromStoredFields(String type, String id, String[] } return new GetResult(shardId.getIndexName(), type, id, get.docIdAndVersion().seqNo, get.docIdAndVersion().primaryTerm, - get.version(), get.exists(), source, fields); + get.version(), get.exists(), source, documentFields, metaDataFields); } private static FieldsVisitor buildFieldsVisitors(String[] fields, FetchSourceContext fetchSourceContext) { diff --git a/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java b/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java index 9f1ee08844b66..1b3b9a8afa9f4 100644 --- a/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java @@ -70,7 +70,7 @@ protected ExplainResponse createTestInstance() { 0, 1, randomNonNegativeLong(), true, RandomObjects.randomSource(random()), - singletonMap(fieldName, new DocumentField(fieldName, values))); + singletonMap(fieldName, new DocumentField(fieldName, values)), null); return new ExplainResponse(index, type, id, exist, explanation, getResult); } @@ -87,7 +87,7 @@ public void testToXContent() throws IOException { Explanation explanation = Explanation.match(1.0f, "description", Collections.emptySet()); GetResult getResult = new GetResult(null, null, null, 0, 1, -1, true, new BytesArray("{ \"field1\" : " + "\"value1\", \"field2\":\"value2\"}"), singletonMap("field1", new DocumentField("field1", - singletonList("value1")))); + singletonList("value1"))), null); ExplainResponse response = new ExplainResponse(index, type, id, exist, explanation, getResult); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); diff --git a/server/src/test/java/org/elasticsearch/action/get/GetResponseTests.java b/server/src/test/java/org/elasticsearch/action/get/GetResponseTests.java index a215a47b89466..359a394b33806 100644 --- a/server/src/test/java/org/elasticsearch/action/get/GetResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/get/GetResponseTests.java @@ -94,14 +94,15 @@ public void testToXContent() { { GetResponse getResponse = new GetResponse(new GetResult("index", "type", "id", 0, 1, 1, true, new BytesArray("{ \"field1\" : " + "\"value1\", \"field2\":\"value2\"}"), Collections.singletonMap("field1", new DocumentField("field1", - Collections.singletonList("value1"))))); + Collections.singletonList("value1"))), null)); String output = Strings.toString(getResponse); assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":1,\"_seq_no\":0,\"_primary_term\":1," + "\"found\":true,\"_source\":{ \"field1\" : \"value1\", \"field2\":\"value2\"},\"fields\":{\"field1\":[\"value1\"]}}", output); } { - GetResponse getResponse = new GetResponse(new GetResult("index", "type", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null)); + GetResponse getResponse = new GetResponse(new GetResult("index", "type", "id", UNASSIGNED_SEQ_NO, + 0, 1, false, null, null, null)); String output = Strings.toString(getResponse); assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"found\":false}", output); } @@ -110,7 +111,7 @@ public void testToXContent() { public void testToString() { GetResponse getResponse = new GetResponse(new GetResult("index", "type", "id", 0, 1, 1, true, new BytesArray("{ \"field1\" : " + "\"value1\", \"field2\":\"value2\"}"), - Collections.singletonMap("field1", new DocumentField("field1", Collections.singletonList("value1"))))); + Collections.singletonMap("field1", new DocumentField("field1", Collections.singletonList("value1"))), null)); assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":1,\"_seq_no\":0,\"_primary_term\":1," + "\"found\":true,\"_source\":{ \"field1\" : \"value1\", \"field2\":\"value2\"},\"fields\":{\"field1\":[\"value1\"]}}", getResponse.toString()); @@ -123,7 +124,8 @@ public void testEqualsAndHashcode() { public void testFromXContentThrowsParsingException() throws IOException { GetResponse getResponse = - new GetResponse(new GetResult(null, null, null, UNASSIGNED_SEQ_NO, 0, randomIntBetween(1, 5), randomBoolean(), null, null)); + new GetResponse(new GetResult(null, null, null, UNASSIGNED_SEQ_NO, 0, randomIntBetween(1, 5), + randomBoolean(), null, null, null)); XContentType xContentType = randomFrom(XContentType.values()); BytesReference originalBytes = toShuffledXContent(getResponse, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); diff --git a/server/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java b/server/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java index 101313f3001c6..8182e49049052 100644 --- a/server/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java @@ -71,7 +71,7 @@ private static MultiGetResponse createTestInstance() { if (randomBoolean()) { items[i] = new MultiGetItemResponse(new GetResponse(new GetResult( randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4), 0, 1, randomNonNegativeLong(), - true, null, null + true, null, null, null )), null); } else { items[i] = new MultiGetItemResponse(null, new MultiGetResponse.Failure(randomAlphaOfLength(4), diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index 642d14e2258cb..6549c3a8df5e1 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -359,7 +359,7 @@ public void testNowInScript() throws IOException { .scriptedUpsert(true); long nowInMillis = randomNonNegativeLong(); // We simulate that the document is not existing yet - GetResult getResult = new GetResult("test", "type1", "2", UNASSIGNED_SEQ_NO, 0, 0, false, null, null); + GetResult getResult = new GetResult("test", "type1", "2", UNASSIGNED_SEQ_NO, 0, 0, false, null, null, null); UpdateHelper.Result result = updateHelper.prepare(new ShardId("test", "_na_", 0), updateRequest, getResult, () -> nowInMillis); Streamable action = result.action(); assertThat(action, instanceOf(IndexRequest.class)); @@ -372,7 +372,7 @@ public void testNowInScript() throws IOException { .script(mockInlineScript("ctx._timestamp = ctx._now")) .scriptedUpsert(true); // We simulate that the document is not existing yet - GetResult getResult = new GetResult("test", "type1", "2", 0, 1, 0, true, new BytesArray("{}"), null); + GetResult getResult = new GetResult("test", "type1", "2", 0, 1, 0, true, new BytesArray("{}"), null, null); UpdateHelper.Result result = updateHelper.prepare(new ShardId("test", "_na_", 0), updateRequest, getResult, () -> 42L); Streamable action = result.action(); assertThat(action, instanceOf(IndexRequest.class)); @@ -381,7 +381,7 @@ public void testNowInScript() throws IOException { public void testIndexTimeout() { final GetResult getResult = - new GetResult("test", "type", "1", 0, 1, 0, true, new BytesArray("{\"f\":\"v\"}"), null); + new GetResult("test", "type", "1", 0, 1, 0, true, new BytesArray("{\"f\":\"v\"}"), null, null); final UpdateRequest updateRequest = new UpdateRequest("test", "type", "1") .script(mockInlineScript("return")) @@ -391,7 +391,7 @@ public void testIndexTimeout() { public void testDeleteTimeout() { final GetResult getResult = - new GetResult("test", "type", "1", 0, 1, 0, true, new BytesArray("{\"f\":\"v\"}"), null); + new GetResult("test", "type", "1", 0, 1, 0, true, new BytesArray("{\"f\":\"v\"}"), null, null); final UpdateRequest updateRequest = new UpdateRequest("test", "type", "1") .script(mockInlineScript("ctx.op = delete")) @@ -402,7 +402,7 @@ public void testDeleteTimeout() { public void testUpsertTimeout() throws IOException { final boolean exists = randomBoolean(); final BytesReference source = exists ? new BytesArray("{\"f\":\"v\"}") : null; - final GetResult getResult = new GetResult("test", "type", "1", UNASSIGNED_SEQ_NO, 0, 0, exists, source, null); + final GetResult getResult = new GetResult("test", "type", "1", UNASSIGNED_SEQ_NO, 0, 0, exists, source, null, null); final XContentBuilder sourceBuilder = jsonBuilder(); sourceBuilder.startObject(); { @@ -546,7 +546,7 @@ public void testValidate() { } public void testRoutingExtraction() throws Exception { - GetResult getResult = new GetResult("test", "type", "1", UNASSIGNED_SEQ_NO, 0, 0, false, null, null); + GetResult getResult = new GetResult("test", "type", "1", UNASSIGNED_SEQ_NO, 0, 0, false, null, null, null); IndexRequest indexRequest = new IndexRequest("test", "type", "1"); // There is no routing and parent because the document doesn't exist @@ -556,7 +556,7 @@ public void testRoutingExtraction() throws Exception { assertNull(UpdateHelper.calculateRouting(getResult, indexRequest)); // Doc exists but has no source or fields - getResult = new GetResult("test", "type", "1", 0, 1, 0, true, null, null); + getResult = new GetResult("test", "type", "1", 0, 1, 0, true, null, null, null); // There is no routing and parent on either request assertNull(UpdateHelper.calculateRouting(getResult, indexRequest)); @@ -565,7 +565,7 @@ public void testRoutingExtraction() throws Exception { fields.put("_routing", new DocumentField("_routing", Collections.singletonList("routing1"))); // Doc exists and has the parent and routing fields - getResult = new GetResult("test", "type", "1", 0, 1, 0, true, null, fields); + getResult = new GetResult("test", "type", "1", 0, 1, 0, true, null, fields, null); // Use the get result parent and routing assertThat(UpdateHelper.calculateRouting(getResult, indexRequest), equalTo("routing1")); @@ -575,7 +575,7 @@ public void testNoopDetection() throws Exception { ShardId shardId = new ShardId("test", "", 0); GetResult getResult = new GetResult("test", "type", "1", 0, 1, 0, true, new BytesArray("{\"body\": \"foo\"}"), - null); + null, null); UpdateRequest request = new UpdateRequest("test", "type1", "1").fromXContent( createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"foo\"}}"))); @@ -606,7 +606,7 @@ public void testUpdateScript() throws Exception { ShardId shardId = new ShardId("test", "", 0); GetResult getResult = new GetResult("test", "type", "1", 0, 1, 0, true, new BytesArray("{\"body\": \"bar\"}"), - null); + null, null); UpdateRequest request = new UpdateRequest("test", "type1", "1") .script(mockInlineScript("ctx._source.body = \"foo\"")); diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java index 8ec0423b40699..babad0276917d 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java @@ -74,7 +74,7 @@ public void testToXContent() throws IOException { UpdateResponse updateResponse = new UpdateResponse(new ReplicationResponse.ShardInfo(3, 2), new ShardId("books", "books_uuid", 2), "book", "1", 7, 17, 2, UPDATED); - updateResponse.setGetResult(new GetResult("books", "book", "1",0, 1, 2, true, source, fields)); + updateResponse.setGetResult(new GetResult("books", "book", "1",0, 1, 2, true, source, fields, null)); String output = Strings.toString(updateResponse); assertEquals("{\"_index\":\"books\",\"_type\":\"book\",\"_id\":\"1\",\"_version\":2,\"result\":\"updated\"," + diff --git a/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java b/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java index ad8673d13ea6b..5758fb5bcb971 100644 --- a/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java +++ b/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java @@ -76,14 +76,15 @@ public void testToXContent() throws IOException { { GetResult getResult = new GetResult("index", "type", "id", 0, 1, 1, true, new BytesArray("{ \"field1\" : " + "\"value1\", \"field2\":\"value2\"}"), singletonMap("field1", new DocumentField("field1", - singletonList("value1")))); + singletonList("value1"))), singletonMap("field1", new DocumentField("metafield", + singletonList("metavalue")))); String output = Strings.toString(getResult); assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":1,\"_seq_no\":0,\"_primary_term\":1," + - "\"found\":true,\"_source\":{ \"field1\" : \"value1\", \"field2\":\"value2\"},\"fields\":{\"field1\":[\"value1\"]}}", - output); + "\"metafield\":\"metavalue\",\"found\":true,\"_source\":{ \"field1\" : \"value1\", \"field2\":\"value2\"}," + + "\"fields\":{\"field1\":[\"value1\"]}}", output); } { - GetResult getResult = new GetResult("index", "type", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null); + GetResult getResult = new GetResult("index", "type", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null, null); String output = Strings.toString(getResult); assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"found\":false}", output); } @@ -96,7 +97,8 @@ public void testToAndFromXContentEmbedded() throws Exception { // We don't expect to retrieve the index/type/id of the GetResult because they are not rendered // by the toXContentEmbedded method. GetResult expectedGetResult = new GetResult(null, null, null, tuple.v2().getSeqNo(), tuple.v2().getPrimaryTerm(), -1, - tuple.v2().isExists(), tuple.v2().sourceRef(), tuple.v2().getFields()); + tuple.v2().isExists(), tuple.v2().sourceRef(), tuple.v2().getDocumentFields(), + tuple.v2().getMetadataFields()); boolean humanReadable = randomBoolean(); BytesReference originalBytes = toXContentEmbedded(getResult, xContentType, humanReadable); @@ -122,7 +124,7 @@ public void testToXContentEmbedded() throws IOException { fields.put("baz", new DocumentField("baz", Arrays.asList("baz_0", "baz_1"))); GetResult getResult = new GetResult("index", "type", "id", 0, 1, 2, true, - new BytesArray("{\"foo\":\"bar\",\"baz\":[\"baz_0\",\"baz_1\"]}"), fields); + new BytesArray("{\"foo\":\"bar\",\"baz\":[\"baz_0\",\"baz_1\"]}"), fields, null); BytesReference originalBytes = toXContentEmbedded(getResult, XContentType.JSON, false); assertEquals("{\"_seq_no\":0,\"_primary_term\":1,\"found\":true,\"_source\":{\"foo\":\"bar\",\"baz\":[\"baz_0\",\"baz_1\"]}," + @@ -130,7 +132,7 @@ public void testToXContentEmbedded() throws IOException { } public void testToXContentEmbeddedNotFound() throws IOException { - GetResult getResult = new GetResult("index", "type", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null); + GetResult getResult = new GetResult("index", "type", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null, null); BytesReference originalBytes = toXContentEmbedded(getResult, XContentType.JSON, false); assertEquals("{\"found\":false}", originalBytes.utf8ToString()); @@ -154,33 +156,33 @@ public void testEqualsAndHashcode() { public static GetResult copyGetResult(GetResult getResult) { return new GetResult(getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), getResult.getVersion(), - getResult.isExists(), getResult.internalSourceRef(), getResult.getFields()); + getResult.isExists(), getResult.internalSourceRef(), getResult.getDocumentFields(), getResult.getMetadataFields()); } public static GetResult mutateGetResult(GetResult getResult) { List> mutations = new ArrayList<>(); mutations.add(() -> new GetResult(randomUnicodeOfLength(15), getResult.getType(), getResult.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), getResult.getVersion(), - getResult.isExists(), getResult.internalSourceRef(), getResult.getFields())); + getResult.isExists(), getResult.internalSourceRef(), getResult.getFields(), null)); mutations.add(() -> new GetResult(getResult.getIndex(), randomUnicodeOfLength(15), getResult.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), getResult.getVersion(), - getResult.isExists(), getResult.internalSourceRef(), getResult.getFields())); + getResult.isExists(), getResult.internalSourceRef(), getResult.getFields(), null)); mutations.add(() -> new GetResult(getResult.getIndex(), getResult.getType(), randomUnicodeOfLength(15), getResult.getSeqNo(), getResult.getPrimaryTerm(), getResult.getVersion(), - getResult.isExists(), getResult.internalSourceRef(), getResult.getFields())); + getResult.isExists(), getResult.internalSourceRef(), getResult.getFields(), null)); mutations.add(() -> new GetResult(getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), randomNonNegativeLong(), - getResult.isExists(), getResult.internalSourceRef(), getResult.getFields())); + getResult.isExists(), getResult.internalSourceRef(), getResult.getFields(), null)); mutations.add(() -> new GetResult(getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.isExists() ? UNASSIGNED_SEQ_NO : getResult.getSeqNo(), getResult.isExists() ? 0 : getResult.getPrimaryTerm(), - getResult.getVersion(), getResult.isExists() == false, getResult.internalSourceRef(), getResult.getFields())); + getResult.getVersion(), getResult.isExists() == false, getResult.internalSourceRef(), getResult.getFields(), null)); mutations.add(() -> new GetResult(getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), getResult.getVersion(), getResult.isExists(), - RandomObjects.randomSource(random()), getResult.getFields())); + RandomObjects.randomSource(random()), getResult.getFields(), null)); mutations.add(() -> new GetResult(getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), getResult.getVersion(), - getResult.isExists(), getResult.internalSourceRef(), randomDocumentFields(XContentType.JSON).v1())); + getResult.isExists(), getResult.internalSourceRef(), randomDocumentFields(XContentType.JSON).v1(), null)); return randomFrom(mutations).get(); } @@ -195,6 +197,8 @@ public static Tuple randomGetResult(XContentType xContentT BytesReference source = null; Map fields = null; Map expectedFields = null; + Map metaFields = null; + Map expectedMetaFields = null; if (frequently()) { version = randomNonNegativeLong(); seqNo = randomNonNegativeLong(); @@ -205,8 +209,13 @@ public static Tuple randomGetResult(XContentType xContentT } if (randomBoolean()) { Tuple, Map> tuple = randomDocumentFields(xContentType); - fields = tuple.v1(); - expectedFields = tuple.v2(); + fields = new HashMap<>(); + metaFields = new HashMap<>(); + GetResult.splitFieldsByMetadata(tuple.v1(), fields, metaFields); + + expectedFields = new HashMap<>(); + expectedMetaFields = new HashMap<>(); + GetResult.splitFieldsByMetadata(tuple.v2(), expectedFields, expectedMetaFields); } } else { seqNo = UNASSIGNED_SEQ_NO; @@ -214,8 +223,9 @@ public static Tuple randomGetResult(XContentType xContentT version = -1; exists = false; } - GetResult getResult = new GetResult(index, type, id, seqNo, primaryTerm, version, exists, source, fields); - GetResult expectedGetResult = new GetResult(index, type, id, seqNo, primaryTerm, version, exists, source, expectedFields); + GetResult getResult = new GetResult(index, type, id, seqNo, primaryTerm, version, exists, source, fields, metaFields); + GetResult expectedGetResult = new GetResult(index, type, id, seqNo, primaryTerm, version, exists, source, + expectedFields, expectedMetaFields); return Tuple.tuple(getResult, expectedGetResult); } diff --git a/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java index 4851387b1a497..62cc7a43cd2c2 100644 --- a/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java @@ -151,7 +151,7 @@ protected GetResponse executeGet(GetRequest getRequest) { throw new ElasticsearchException("boom", ex); } return new GetResponse(new GetResult(indexedShapeIndex, indexedType, indexedShapeId, 0, 1, 0, true, new BytesArray(json), - null)); + null, null)); } @After diff --git a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java index a9080c688f64f..40e32b91d7e55 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java @@ -210,7 +210,7 @@ public GetResponse executeGet(GetRequest getRequest) { throw new ElasticsearchException("boom", ex); } return new GetResponse(new GetResult(getRequest.index(), getRequest.type(), getRequest.id(), 0, 1, 0, true, - new BytesArray(json), null)); + new BytesArray(json), null, null)); } public void testNumeric() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/rest/action/document/RestGetSourceActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/document/RestGetSourceActionTests.java index f012c1393c9ad..53d78c7d03e84 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/document/RestGetSourceActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/document/RestGetSourceActionTests.java @@ -96,7 +96,7 @@ public void testTypeParameter() { public void testRestGetSourceAction() throws Exception { final BytesReference source = new BytesArray("{\"foo\": \"bar\"}"); final GetResponse response = - new GetResponse(new GetResult("index1", "_doc", "1", UNASSIGNED_SEQ_NO, 0, -1, true, source, emptyMap())); + new GetResponse(new GetResult("index1", "_doc", "1", UNASSIGNED_SEQ_NO, 0, -1, true, source, emptyMap(), null)); final RestResponse restResponse = listener.buildResponse(response); @@ -107,7 +107,7 @@ public void testRestGetSourceAction() throws Exception { public void testRestGetSourceActionWithMissingDocument() { final GetResponse response = - new GetResponse(new GetResult("index1", "_doc", "1", UNASSIGNED_SEQ_NO, 0, -1, false, null, emptyMap())); + new GetResponse(new GetResult("index1", "_doc", "1", UNASSIGNED_SEQ_NO, 0, -1, false, null, emptyMap(), null)); final ResourceNotFoundException exception = expectThrows(ResourceNotFoundException.class, () -> listener.buildResponse(response)); @@ -116,7 +116,7 @@ public void testRestGetSourceActionWithMissingDocument() { public void testRestGetSourceActionWithMissingDocumentSource() { final GetResponse response = - new GetResponse(new GetResult("index1", "_doc", "1", UNASSIGNED_SEQ_NO, 0, -1, true, null, emptyMap())); + new GetResponse(new GetResult("index1", "_doc", "1", UNASSIGNED_SEQ_NO, 0, -1, true, null, emptyMap(), null)); final ResourceNotFoundException exception = expectThrows(ResourceNotFoundException.class, () -> listener.buildResponse(response)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index 67ce5ce2b27af..d8a7d9447946b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -1249,11 +1249,12 @@ public void testApiKeyAuth() { creatorMap.put("realm", "auth realm"); source.put("creator", creatorMap); GetResponse getResponse = new GetResponse(new GetResult(request.index(), request.type(), request.id(), 0, 1, 1L, true, - BytesReference.bytes(JsonXContent.contentBuilder().map(source)), Collections.emptyMap())); + BytesReference.bytes(JsonXContent.contentBuilder().map(source)), Collections.emptyMap(), Collections.emptyMap())); listener.onResponse(getResponse); } else { listener.onResponse(new GetResponse(new GetResult(request.index(), request.type(), request.id(), - SequenceNumbers.UNASSIGNED_SEQ_NO, 1, -1L, false, null, Collections.emptyMap()))); + SequenceNumbers.UNASSIGNED_SEQ_NO, 1, -1L, false, null, + Collections.emptyMap(), Collections.emptyMap()))); } return Void.TYPE; }).when(client).get(any(GetRequest.class), any(ActionListener.class)); @@ -1288,11 +1289,12 @@ public void testExpiredApiKey() { creatorMap.put("realm", "auth realm"); source.put("creator", creatorMap); GetResponse getResponse = new GetResponse(new GetResult(request.index(), request.type(), request.id(), 0, 1, 1L, true, - BytesReference.bytes(JsonXContent.contentBuilder().map(source)), Collections.emptyMap())); + BytesReference.bytes(JsonXContent.contentBuilder().map(source)), Collections.emptyMap(), Collections.emptyMap())); listener.onResponse(getResponse); } else { listener.onResponse(new GetResponse(new GetResult(request.index(), request.type(), request.id(), - SequenceNumbers.UNASSIGNED_SEQ_NO, 1, -1L, false, null, Collections.emptyMap()))); + SequenceNumbers.UNASSIGNED_SEQ_NO, 1, -1L, false, null, + Collections.emptyMap(), Collections.emptyMap()))); } return Void.TYPE; }).when(client).get(any(GetRequest.class), any(ActionListener.class)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java index 4cbf5307d3ed6..53eb3fc0bdbc4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java @@ -118,6 +118,7 @@ public void testBlankPasswordInIndexImpliesDefaultPassword() throws Exception { 0, 1, 1L, true, BytesReference.bytes(jsonBuilder().map(values)), + Collections.emptyMap(), Collections.emptyMap()); final PlainActionFuture future = new PlainActionFuture<>(); @@ -187,6 +188,7 @@ public void testVerifyNonExistentUser() throws Exception { UNASSIGNED_SEQ_NO, 0, 1L, false, null, + Collections.emptyMap(), Collections.emptyMap()); actionRespond(GetRequest.class, new GetResponse(getResult)); @@ -229,6 +231,7 @@ private void respondToGetUserRequest(String username, SecureString password, Str 0, 1, 1L, true, source, + Collections.emptyMap(), Collections.emptyMap()); actionRespond(GetRequest.class, new GetResponse(getResult)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java index 7f7a262131bb2..d50663b9d7cab 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java @@ -132,7 +132,8 @@ public void testGetSinglePrivilegeByName() throws Exception { final String docSource = Strings.toString(sourcePrivilege); listener.get().onResponse(new GetResponse( - new GetResult(request.index(), request.type(), request.id(), 0, 1, 1L, true, new BytesArray(docSource), emptyMap()) + new GetResult(request.index(), request.type(), request.id(), 0, 1, 1L, true, + new BytesArray(docSource), emptyMap(), emptyMap()) )); final ApplicationPrivilegeDescriptor getPrivilege = future.get(1, TimeUnit.SECONDS); assertThat(getPrivilege, equalTo(sourcePrivilege)); @@ -149,7 +150,8 @@ public void testGetMissingPrivilege() throws Exception { assertThat(request.id(), equalTo("application-privilege_myapp:admin")); listener.get().onResponse(new GetResponse( - new GetResult(request.index(), request.type(), request.id(), UNASSIGNED_SEQ_NO, 0, -1, false, null, emptyMap()) + new GetResult(request.index(), request.type(), request.id(), UNASSIGNED_SEQ_NO, 0, -1, + false, null, emptyMap(), emptyMap()) )); final ApplicationPrivilegeDescriptor getPrivilege = future.get(1, TimeUnit.SECONDS); assertThat(getPrivilege, Matchers.nullValue()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityMocks.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityMocks.java index 3476a3d7c00a3..20108b0114933 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityMocks.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityMocks.java @@ -65,7 +65,8 @@ public static SecurityIndexManager mockSecurityIndexManager(boolean exists, bool } public static void mockGetRequest(Client client, String documentId, BytesReference source) { - GetResult result = new GetResult(SECURITY_MAIN_ALIAS, SINGLE_MAPPING_NAME, documentId, 0, 1, 1, true, source, emptyMap()); + GetResult result = new GetResult(SECURITY_MAIN_ALIAS, SINGLE_MAPPING_NAME, documentId, 0, 1, 1, true, source, + emptyMap(), emptyMap()); mockGetRequest(client, documentId, result); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java index 80cb657a5762e..fd06045204710 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java @@ -1217,7 +1217,8 @@ private void mockGetWatchResponse(Client client, String id, GetResponse response listener.onResponse(response); } else { GetResult notFoundResult = - new GetResult(request.index(), request.type(), request.id(), UNASSIGNED_SEQ_NO, 0, -1, false, null, null); + new GetResult(request.index(), request.type(), request.id(), UNASSIGNED_SEQ_NO, 0, + -1, false, null, null, null); listener.onResponse(new GetResponse(notFoundResult)); } return null; @@ -1232,7 +1233,8 @@ private void mockGetWatchException(Client client, String id, Exception e) { listener.onFailure(e); } else { GetResult notFoundResult = - new GetResult(request.index(), request.type(), request.id(), UNASSIGNED_SEQ_NO, 0, -1, false, null, null); + new GetResult(request.index(), request.type(), request.id(), UNASSIGNED_SEQ_NO, 0, -1, + false, null, null, null); listener.onResponse(new GetResponse(notFoundResult)); } return null; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchActionTests.java index 726a46799d401..0f7d64527fe26 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchActionTests.java @@ -71,7 +71,7 @@ public void testWatchNotFound() { doAnswer(invocation -> { ActionListener listener = (ActionListener) invocation.getArguments()[1]; listener.onResponse(new GetResponse(new GetResult(Watch.INDEX, MapperService.SINGLE_MAPPING_NAME, watchId, UNASSIGNED_SEQ_NO, - 0, -1, false, BytesArray.EMPTY, Collections.emptyMap()))); + 0, -1, false, BytesArray.EMPTY, Collections.emptyMap(), Collections.emptyMap()))); return null; }).when(client).get(anyObject(), anyObject()); From 81f3b5d4c288cee50d4484d77c3259a84b7a9562 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 23 May 2019 20:18:34 +0200 Subject: [PATCH 076/224] Improve Close Index Response (#39687) This changes the `CloseIndexResponse` so that it reports closing result for each index. Shard failures or exception are also reported per index, and the global acknowledgment flag is computed from the index results only. The response looks like: ``` { "acknowledged" : true, "shards_acknowledged" : true, "indices" : { "docs" : { "closed" : true } } } ``` The response reports shard failures like: ``` { "acknowledged" : false, "shards_acknowledged" : false, "indices" : { "docs-1" : { "closed" : true }, "docs-2" : { "closed" : false, "shards" : { "1" : { "failures" : [ { "shard" : 1, "index" : "docs-2", "status" : "BAD_REQUEST", "reason" : { "type" : "index_closed_exception", "reason" : "closed", "index_uuid" : "JFmQwr_aSPiZbkAH_KEF7A", "index" : "docs-2" } } ] } } }, "docs-3" : { "closed" : true } } } ``` Co-authored-by: Tanguy Leroux --- .../test/indices.open/10_basic.yml | 37 +++ .../indices/close/CloseIndexResponse.java | 247 +++++++++++++++++- .../close/TransportCloseIndexAction.java | 4 +- .../metadata/MetaDataIndexStateService.java | 97 +++---- .../close/CloseIndexResponseTests.java | 139 +++++++++- .../MetaDataIndexStateServiceTests.java | 62 ++++- .../MetaDataIndexStateServiceUtils.java | 6 +- .../indices/cluster/ClusterStateChanges.java | 6 +- .../indices/state/CloseIndexIT.java | 52 +++- .../CloseFollowerIndexStepTests.java | 2 +- 10 files changed, 578 insertions(+), 74 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml index a8ab29d9feb97..8bc8ce6c4c871 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml @@ -79,3 +79,40 @@ - is_true: acknowledged - match: { acknowledged: true } - match: { shards_acknowledged: true } +--- +"Close index response with result per index": + - skip: + version: " - 7.99.99" + reason: "close index response reports result per index starting version 8.0.0" + + - do: + indices.create: + index: index_1 + body: + settings: + number_of_replicas: 0 + + - do: + indices.create: + index: index_2 + body: + settings: + number_of_replicas: 0 + + - do: + indices.create: + index: index_3 + body: + settings: + number_of_replicas: 0 + + - do: + indices.close: + index: "index_*" + + - match: { acknowledged: true } + - match: { shards_acknowledged: true } + - match: { indices.index_1.closed: true } + - match: { indices.index_2.closed: true } + - match: { indices.index_3.closed: true } + diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java index ea44ba7a8e46b..ea7d14655c594 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java @@ -18,20 +18,40 @@ */ package org.elasticsearch.action.admin.indices.close; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.master.ShardsAcknowledgedResponse; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static java.util.Collections.emptyList; +import static java.util.Collections.unmodifiableList; public class CloseIndexResponse extends ShardsAcknowledgedResponse { + private List indices; + CloseIndexResponse() { } - public CloseIndexResponse(final boolean acknowledged, final boolean shardsAcknowledged) { + public CloseIndexResponse(final boolean acknowledged, final boolean shardsAcknowledged, final List indices) { super(acknowledged, shardsAcknowledged); + this.indices = unmodifiableList(Objects.requireNonNull(indices)); + } + + public List getIndices() { + return indices; } @Override @@ -40,6 +60,11 @@ public void readFrom(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_7_2_0)) { readShardsAcknowledged(in); } + if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + indices = unmodifiableList(in.readList(IndexResult::new)); + } else { + indices = unmodifiableList(emptyList()); + } } @Override @@ -48,5 +73,225 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_7_2_0)) { writeShardsAcknowledged(out); } + if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + out.writeList(indices); + } + } + + protected void addCustomFields(final XContentBuilder builder, final Params params) throws IOException { + super.addCustomFields(builder, params); + builder.startObject("indices"); + for (IndexResult index : indices) { + index.toXContent(builder, params); + } + builder.endObject(); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + public static class IndexResult implements Writeable, ToXContentFragment { + + private final Index index; + private final @Nullable Exception exception; + private final @Nullable ShardResult[] shards; + + public IndexResult(final Index index) { + this(index, null, null); + } + + public IndexResult(final Index index, final Exception failure) { + this(index, Objects.requireNonNull(failure), null); + } + + public IndexResult(final Index index, final ShardResult[] shards) { + this(index, null, Objects.requireNonNull(shards)); + } + + private IndexResult(final Index index, @Nullable final Exception exception, @Nullable final ShardResult[] shards) { + this.index = Objects.requireNonNull(index); + this.exception = exception; + this.shards = shards; + } + + IndexResult(final StreamInput in) throws IOException { + this.index = new Index(in); + this.exception = in.readException(); + this.shards = in.readOptionalArray(ShardResult::new, ShardResult[]::new); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + index.writeTo(out); + out.writeException(exception); + out.writeOptionalArray(shards); + } + + public Index getIndex() { + return index; + } + + public Exception getException() { + return exception; + } + + public ShardResult[] getShards() { + return shards; + } + + public boolean hasFailures() { + if (exception != null) { + return true; + } + if (shards != null) { + for (ShardResult shard : shards) { + if (shard.hasFailures()) { + return true; + } + } + } + return false; + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(index.getName()); + { + if (hasFailures()) { + builder.field("closed", false); + if (exception != null) { + builder.startObject("exception"); + ElasticsearchException.generateFailureXContent(builder, params, exception, true); + builder.endObject(); + } else { + builder.startObject("failedShards"); + for (ShardResult shard : shards) { + if (shard.hasFailures()) { + shard.toXContent(builder, params); + } + } + builder.endObject(); + } + } else { + builder.field("closed", true); + } + } + return builder.endObject(); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } + + public static class ShardResult implements Writeable, ToXContentFragment { + + private final int id; + private final ShardResult.Failure[] failures; + + public ShardResult(final int id, final Failure[] failures) { + this.id = id; + this.failures = failures; + } + + ShardResult(final StreamInput in) throws IOException { + this.id = in.readVInt(); + this.failures = in.readOptionalArray(Failure::readFailure, ShardResult.Failure[]::new); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeVInt(id); + out.writeOptionalArray(failures); + } + + public boolean hasFailures() { + return failures != null && failures.length > 0; + } + + public int getId() { + return id; + } + + public Failure[] getFailures() { + return failures; + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(String.valueOf(id)); + { + builder.startArray("failures"); + if (failures != null) { + for (Failure failure : failures) { + builder.startObject(); + failure.toXContent(builder, params); + builder.endObject(); + } + } + builder.endArray(); + } + return builder.endObject(); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + public static class Failure extends DefaultShardOperationFailedException implements Writeable { + + private @Nullable String nodeId; + + private Failure() { + } + + public Failure(final String index, final int shardId, final Throwable reason) { + this(index, shardId, reason, null); + } + + public Failure(final String index, final int shardId, final Throwable reason, final String nodeId) { + super(index, shardId, reason); + this.nodeId = nodeId; + } + + public String getNodeId() { + return nodeId; + } + + @Override + public void readFrom(final StreamInput in) throws IOException { + super.readFrom(in); + nodeId = in.readOptionalString(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalString(nodeId); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + if (nodeId != null) { + builder.field("node", nodeId); + } + return super.toXContent(builder, params); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + static Failure readFailure(final StreamInput in) throws IOException { + final Failure failure = new Failure(); + failure.readFrom(in); + return failure; + } + } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index a6f4b6f3d0c4a..3c231d13845b2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -40,6 +40,8 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.Collections; + /** * Close index action */ @@ -109,7 +111,7 @@ protected void masterOperation(final Task task, final ActionListener listener) throws Exception { final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); if (concreteIndices == null || concreteIndices.length == 0) { - listener.onResponse(new CloseIndexResponse(true, false)); + listener.onResponse(new CloseIndexResponse(true, false, Collections.emptyList())); return; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index 80be71dadd3d6..ef4583e98e544 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -29,10 +29,11 @@ import org.elasticsearch.action.NotifyOnceListener; import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse.IndexResult; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse.ShardResult; import org.elasticsearch.action.admin.indices.close.TransportVerifyShardBeforeCloseAction; import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest; import org.elasticsearch.action.support.ActiveShardsObserver; -import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; @@ -52,6 +53,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.collect.ImmutableOpenIntMap; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -72,6 +74,8 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; @@ -144,27 +148,22 @@ public ClusterState execute(final ClusterState currentState) { public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) { if (oldState == newState) { assert blockedIndices.isEmpty() : "List of blocked indices is not empty but cluster state wasn't changed"; - listener.onResponse(new CloseIndexResponse(true, false)); + listener.onResponse(new CloseIndexResponse(true, false, Collections.emptyList())); } else { assert blockedIndices.isEmpty() == false : "List of blocked indices is empty but cluster state was changed"; threadPool.executor(ThreadPool.Names.MANAGEMENT) .execute(new WaitForClosedBlocksApplied(blockedIndices, request, - ActionListener.wrap(results -> + ActionListener.wrap(verifyResults -> clusterService.submitStateUpdateTask("close-indices", new ClusterStateUpdateTask(Priority.URGENT) { - - boolean acknowledged = true; + private final List indices = new ArrayList<>(); @Override public ClusterState execute(final ClusterState currentState) throws Exception { - final ClusterState updatedState = closeRoutingTable(currentState, blockedIndices, results); - for (Map.Entry result : results.entrySet()) { - IndexMetaData updatedMetaData = updatedState.metaData().index(result.getKey()); - if (updatedMetaData != null && updatedMetaData.getState() != IndexMetaData.State.CLOSE) { - acknowledged = false; - break; - } - } - return allocationService.reroute(updatedState, "indices closed"); + Tuple> closingResult = + closeRoutingTable(currentState, blockedIndices, verifyResults); + assert verifyResults.size() == closingResult.v2().size(); + indices.addAll(closingResult.v2()); + return allocationService.reroute(closingResult.v1(), "indices closed"); } @Override @@ -176,27 +175,28 @@ public void onFailure(final String source, final Exception e) { public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) { - final String[] indices = results.entrySet().stream() - .filter(result -> result.getValue().isAcknowledged()) - .map(result -> result.getKey().getName()) - .filter(index -> newState.routingTable().hasIndex(index)) + final boolean acknowledged = indices.stream().noneMatch(IndexResult::hasFailures); + final String[] waitForIndices = indices.stream() + .filter(result -> result.hasFailures() == false) + .filter(result -> newState.routingTable().hasIndex(result.getIndex())) + .map(result -> result.getIndex().getName()) .toArray(String[]::new); - if (indices.length > 0) { - activeShardsObserver.waitForActiveShards(indices, request.waitForActiveShards(), + if (waitForIndices.length > 0) { + activeShardsObserver.waitForActiveShards(waitForIndices, request.waitForActiveShards(), request.ackTimeout(), shardsAcknowledged -> { if (shardsAcknowledged == false) { logger.debug("[{}] indices closed, but the operation timed out while waiting " + - "for enough shards to be started.", Arrays.toString(indices)); + "for enough shards to be started.", Arrays.toString(waitForIndices)); } // acknowledged maybe be false but some indices may have been correctly closed, so // we maintain a kind of coherency by overriding the shardsAcknowledged value // (see ShardsAcknowledgedResponse constructor) boolean shardsAcked = acknowledged ? shardsAcknowledged : false; - listener.onResponse(new CloseIndexResponse(acknowledged, shardsAcked)); + listener.onResponse(new CloseIndexResponse(acknowledged, shardsAcked, indices)); }, listener::onFailure); } else { - listener.onResponse(new CloseIndexResponse(acknowledged, false)); + listener.onResponse(new CloseIndexResponse(acknowledged, false, indices)); } } }), @@ -292,11 +292,11 @@ class WaitForClosedBlocksApplied extends AbstractRunnable { private final Map blockedIndices; private final CloseIndexClusterStateUpdateRequest request; - private final ActionListener> listener; + private final ActionListener> listener; private WaitForClosedBlocksApplied(final Map blockedIndices, final CloseIndexClusterStateUpdateRequest request, - final ActionListener> listener) { + final ActionListener> listener) { if (blockedIndices == null || blockedIndices.isEmpty()) { throw new IllegalArgumentException("Cannot wait for closed blocks to be applied, list of blocked indices is empty or null"); } @@ -312,7 +312,7 @@ public void onFailure(final Exception e) { @Override protected void doRun() throws Exception { - final Map results = ConcurrentCollections.newConcurrentMap(); + final Map results = ConcurrentCollections.newConcurrentMap(); final CountDown countDown = new CountDown(blockedIndices.size()); final ClusterState state = clusterService.state(); blockedIndices.forEach((index, block) -> { @@ -325,47 +325,51 @@ protected void doRun() throws Exception { }); } - private void waitForShardsReadyForClosing(final Index index, final ClusterBlock closingBlock, - final ClusterState state, final Consumer onResponse) { + private void waitForShardsReadyForClosing(final Index index, + final ClusterBlock closingBlock, + final ClusterState state, + final Consumer onResponse) { final IndexMetaData indexMetaData = state.metaData().index(index); if (indexMetaData == null) { logger.debug("index {} has been blocked before closing and is now deleted, ignoring", index); - onResponse.accept(new AcknowledgedResponse(true)); + onResponse.accept(new IndexResult(index)); return; } final IndexRoutingTable indexRoutingTable = state.routingTable().index(index); if (indexRoutingTable == null || indexMetaData.getState() == IndexMetaData.State.CLOSE) { assert state.blocks().hasIndexBlock(index.getName(), INDEX_CLOSED_BLOCK); logger.debug("index {} has been blocked before closing and is already closed, ignoring", index); - onResponse.accept(new AcknowledgedResponse(true)); + onResponse.accept(new IndexResult(index)); return; } final ImmutableOpenIntMap shards = indexRoutingTable.getShards(); - final AtomicArray results = new AtomicArray<>(shards.size()); + final AtomicArray results = new AtomicArray<>(shards.size()); final CountDown countDown = new CountDown(shards.size()); for (IntObjectCursor shard : shards) { final IndexShardRoutingTable shardRoutingTable = shard.value; - final ShardId shardId = shardRoutingTable.shardId(); + final int shardId = shardRoutingTable.shardId().id(); sendVerifyShardBeforeCloseRequest(shardRoutingTable, closingBlock, new NotifyOnceListener() { @Override public void innerOnResponse(final ReplicationResponse replicationResponse) { - ReplicationResponse.ShardInfo shardInfo = replicationResponse.getShardInfo(); - results.setOnce(shardId.id(), new AcknowledgedResponse(shardInfo.getFailed() == 0)); + ShardResult.Failure[] failures = Arrays.stream(replicationResponse.getShardInfo().getFailures()) + .map(f -> new ShardResult.Failure(f.index(), f.shardId(), f.getCause(), f.nodeId())) + .toArray(ShardResult.Failure[]::new); + results.setOnce(shardId, new ShardResult(shardId, failures)); processIfFinished(); } @Override public void innerOnFailure(final Exception e) { - results.setOnce(shardId.id(), new AcknowledgedResponse(false)); + ShardResult.Failure failure = new ShardResult.Failure(index.getName(), shardId, e); + results.setOnce(shardId, new ShardResult(shardId, new ShardResult.Failure[]{failure})); processIfFinished(); } private void processIfFinished() { if (countDown.countDown()) { - final boolean acknowledged = results.asList().stream().allMatch(AcknowledgedResponse::isAcknowledged); - onResponse.accept(new AcknowledgedResponse(acknowledged)); + onResponse.accept(new IndexResult(index, results.toArray(new ShardResult[results.length()]))); } } }); @@ -396,9 +400,9 @@ private void sendVerifyShardBeforeCloseRequest(final IndexShardRoutingTable shar /** * Step 3 - Move index states from OPEN to CLOSE in cluster state for indices that are ready for closing. */ - static ClusterState closeRoutingTable(final ClusterState currentState, - final Map blockedIndices, - final Map results) { + static Tuple> closeRoutingTable(final ClusterState currentState, + final Map blockedIndices, + final Map verifyResult) { // Remove the index routing table of closed indices if the cluster is in a mixed version // that does not support the replication of closed indices @@ -409,9 +413,10 @@ static ClusterState closeRoutingTable(final ClusterState currentState, final RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable()); final Set closedIndices = new HashSet<>(); - for (Map.Entry result : results.entrySet()) { + Map closingResults = new HashMap<>(verifyResult); + for (Map.Entry result : verifyResult.entrySet()) { final Index index = result.getKey(); - final boolean acknowledged = result.getValue().isAcknowledged(); + final boolean acknowledged = result.getValue().hasFailures() == false; try { if (acknowledged == false) { logger.debug("verification of shards before closing {} failed", index); @@ -424,7 +429,11 @@ static ClusterState closeRoutingTable(final ClusterState currentState, continue; } final ClusterBlock closingBlock = blockedIndices.get(index); + assert closingBlock != null; if (currentState.blocks().hasIndexBlock(index.getName(), closingBlock) == false) { + // we should report error in this case as the index can be left as open. + closingResults.put(result.getKey(), new IndexResult(result.getKey(), new IllegalStateException( + "verification of shards before closing " + index + " succeeded but block has been removed in the meantime"))); logger.debug("verification of shards before closing {} succeeded but block has been removed in the meantime", index); continue; } @@ -450,9 +459,9 @@ static ClusterState closeRoutingTable(final ClusterState currentState, logger.debug("index {} has been deleted since it was blocked before closing, ignoring", index); } } - logger.info("completed closing of indices {}", closedIndices); - return ClusterState.builder(currentState).blocks(blocks).metaData(metadata).routingTable(routingTable.build()).build(); + return Tuple.tuple(ClusterState.builder(currentState).blocks(blocks).metaData(metadata).routingTable(routingTable.build()).build(), + closingResults.values()); } public void openIndex(final OpenIndexClusterStateUpdateRequest request, diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java index f86beff7738e3..40c34af51598d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java @@ -19,14 +19,30 @@ package org.elasticsearch.action.admin.indices.close; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.ActionNotFoundTransportException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.test.VersionUtils.getPreviousVersion; import static org.elasticsearch.test.VersionUtils.randomVersionBetween; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; public class CloseIndexResponseTests extends ESTestCase { @@ -47,11 +63,12 @@ public void testBwcSerialization() throws Exception { { final CloseIndexResponse response = randomResponse(); try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_1_0)); + out.setVersion(randomVersionBetween(random(), Version.V_7_0_0, getPreviousVersion(Version.V_7_2_0))); response.writeTo(out); final AcknowledgedResponse deserializedResponse = new AcknowledgedResponse(); try (StreamInput in = out.bytes().streamInput()) { + in.setVersion(out.getVersion()); deserializedResponse.readFrom(in); } assertThat(deserializedResponse.isAcknowledged(), equalTo(response.isAcknowledged())); @@ -64,22 +81,136 @@ public void testBwcSerialization() throws Exception { final CloseIndexResponse deserializedResponse = new CloseIndexResponse(); try (StreamInput in = out.bytes().streamInput()) { - in.setVersion(randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_1_0)); + in.setVersion(randomVersionBetween(random(), Version.V_7_0_0, getPreviousVersion(Version.V_7_2_0))); deserializedResponse.readFrom(in); } assertThat(deserializedResponse.isAcknowledged(), equalTo(response.isAcknowledged())); } } + { + final CloseIndexResponse response = randomResponse(); + try (BytesStreamOutput out = new BytesStreamOutput()) { + Version version = randomVersionBetween(random(), Version.V_7_2_0, Version.CURRENT); + out.setVersion(version); + response.writeTo(out); + final CloseIndexResponse deserializedResponse = new CloseIndexResponse(); + try (StreamInput in = out.bytes().streamInput()) { + in.setVersion(version); + deserializedResponse.readFrom(in); + } + assertThat(deserializedResponse.isAcknowledged(), equalTo(response.isAcknowledged())); + assertThat(deserializedResponse.isShardsAcknowledged(), equalTo(response.isShardsAcknowledged())); + if (version.onOrAfter(Version.V_8_0_0)) { + assertThat(deserializedResponse.getIndices(), hasSize(response.getIndices().size())); + } else { + assertThat(deserializedResponse.getIndices(), empty()); + } + } + } } private CloseIndexResponse randomResponse() { - final boolean acknowledged = randomBoolean(); + boolean acknowledged = true; + final String[] indicesNames = generateRandomStringArray(10, 10, false, true); + + final List indexResults = new ArrayList<>(); + for (String indexName : indicesNames) { + final Index index = new Index(indexName, "_na_"); + if (randomBoolean()) { + indexResults.add(new CloseIndexResponse.IndexResult(index)); + } else { + if (randomBoolean()) { + acknowledged = false; + indexResults.add(new CloseIndexResponse.IndexResult(index, randomException(index, 0))); + } else { + final int nbShards = randomIntBetween(1, 5); + CloseIndexResponse.ShardResult[] shards = new CloseIndexResponse.ShardResult[nbShards]; + for (int i = 0; i < nbShards; i++) { + CloseIndexResponse.ShardResult.Failure[] failures = null; + if (randomBoolean()) { + acknowledged = false; + failures = new CloseIndexResponse.ShardResult.Failure[randomIntBetween(1, 3)]; + for (int j = 0; j < failures.length; j++) { + String nodeId = randomAlphaOfLength(5); + failures[j] = new CloseIndexResponse.ShardResult.Failure(indexName, i, randomException(index, i), nodeId); + } + } + shards[i] = new CloseIndexResponse.ShardResult(i, failures); + } + indexResults.add(new CloseIndexResponse.IndexResult(index, shards)); + } + } + + } + final boolean shardsAcknowledged = acknowledged ? randomBoolean() : false; - return new CloseIndexResponse(acknowledged, shardsAcknowledged); + return new CloseIndexResponse(acknowledged, shardsAcknowledged, indexResults); + } + + private static ElasticsearchException randomException(final Index index, final int id) { + return randomFrom( + new IndexNotFoundException(index), + new ActionNotFoundTransportException("test"), + new NoShardAvailableActionException(new ShardId(index, id))); } private static void assertCloseIndexResponse(final CloseIndexResponse actual, final CloseIndexResponse expected) { assertThat(actual.isAcknowledged(), equalTo(expected.isAcknowledged())); assertThat(actual.isShardsAcknowledged(), equalTo(expected.isShardsAcknowledged())); + + for (int i = 0; i < expected.getIndices().size(); i++) { + CloseIndexResponse.IndexResult expectedIndexResult = expected.getIndices().get(i); + CloseIndexResponse.IndexResult actualIndexResult = actual.getIndices().get(i); + assertThat(actualIndexResult.getIndex(), equalTo(expectedIndexResult.getIndex())); + assertThat(actualIndexResult.hasFailures(), equalTo(expectedIndexResult.hasFailures())); + + if (expectedIndexResult.hasFailures() == false) { + assertThat(actualIndexResult.getException(), nullValue()); + if (actualIndexResult.getShards() != null) { + assertThat(Arrays.stream(actualIndexResult.getShards()) + .allMatch(shardResult -> shardResult.hasFailures() == false), is(true)); + } + } + + if (expectedIndexResult.getException() != null) { + assertThat(actualIndexResult.getShards(), nullValue()); + assertThat(actualIndexResult.getException(), notNullValue()); + assertThat(actualIndexResult.getException().getMessage(), equalTo(expectedIndexResult.getException().getMessage())); + assertThat(actualIndexResult.getException().getClass(), equalTo(expectedIndexResult.getException().getClass())); + assertArrayEquals(actualIndexResult.getException().getStackTrace(), expectedIndexResult.getException().getStackTrace()); + } else { + assertThat(actualIndexResult.getException(), nullValue()); + } + + if (expectedIndexResult.getShards() != null) { + assertThat(actualIndexResult.getShards().length, equalTo(expectedIndexResult.getShards().length)); + + for (int j = 0; j < expectedIndexResult.getShards().length; j++) { + CloseIndexResponse.ShardResult expectedShardResult = expectedIndexResult.getShards()[j]; + CloseIndexResponse.ShardResult actualShardResult = actualIndexResult.getShards()[j]; + assertThat(actualShardResult.getId(), equalTo(expectedShardResult.getId())); + assertThat(actualShardResult.hasFailures(), equalTo(expectedShardResult.hasFailures())); + + if (expectedShardResult.hasFailures()) { + assertThat(actualShardResult.getFailures().length, equalTo(expectedShardResult.getFailures().length)); + + for (int k = 0; k < expectedShardResult.getFailures().length; k++) { + CloseIndexResponse.ShardResult.Failure expectedFailure = expectedShardResult.getFailures()[k]; + CloseIndexResponse.ShardResult.Failure actualFailure = actualShardResult.getFailures()[k]; + assertThat(actualFailure.getNodeId(), equalTo(expectedFailure.getNodeId())); + assertThat(actualFailure.index(), equalTo(expectedFailure.index())); + assertThat(actualFailure.shardId(), equalTo(expectedFailure.shardId())); + assertThat(actualFailure.getCause().getMessage(), equalTo(expectedFailure.getCause().getMessage())); + assertThat(actualFailure.getCause().getClass(), equalTo(expectedFailure.getCause().getClass())); + assertArrayEquals(actualFailure.getCause().getStackTrace(), expectedFailure.getCause().getStackTrace()); + } + } else { + assertThat(actualShardResult.getFailures(), nullValue()); + } + } + } else { + assertThat(actualIndexResult.getShards(), nullValue()); + } + } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceTests.java index 36bca0be1c2d5..b655a98379553 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceTests.java @@ -20,7 +20,8 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.Version; -import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse.IndexResult; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.RestoreInProgress; @@ -50,6 +51,7 @@ import org.elasticsearch.test.ESTestCase; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -69,6 +71,7 @@ import static org.elasticsearch.cluster.shards.ClusterShardLimitIT.ShardCounts.forDataNodeCount; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -80,7 +83,7 @@ public class MetaDataIndexStateServiceTests extends ESTestCase { public void testCloseRoutingTable() { final Set nonBlockedIndices = new HashSet<>(); final Map blockedIndices = new HashMap<>(); - final Map results = new HashMap<>(); + final Map results = new HashMap<>(); ClusterState state = ClusterState.builder(new ClusterName("testCloseRoutingTable")).build(); for (int i = 0; i < randomIntBetween(1, 25); i++) { @@ -92,12 +95,17 @@ public void testCloseRoutingTable() { } else { final ClusterBlock closingBlock = MetaDataIndexStateService.createIndexClosingBlock(); state = addBlockedIndex(indexName, randomIntBetween(1, 5), randomIntBetween(0, 5), state, closingBlock); - blockedIndices.put(state.metaData().index(indexName).getIndex(), closingBlock); - results.put(state.metaData().index(indexName).getIndex(), new AcknowledgedResponse(randomBoolean())); + final Index index = state.metaData().index(indexName).getIndex(); + blockedIndices.put(index, closingBlock); + if (randomBoolean()) { + results.put(index, new CloseIndexResponse.IndexResult(index)); + } else { + results.put(index, new CloseIndexResponse.IndexResult(index, new Exception("test"))); + } } } - final ClusterState updatedState = MetaDataIndexStateService.closeRoutingTable(state, blockedIndices, results); + final ClusterState updatedState = MetaDataIndexStateService.closeRoutingTable(state, blockedIndices, results).v1(); assertThat(updatedState.metaData().indices().size(), equalTo(nonBlockedIndices.size() + blockedIndices.size())); for (Index nonBlockedIndex : nonBlockedIndices) { @@ -105,7 +113,7 @@ public void testCloseRoutingTable() { assertThat(updatedState.blocks().hasIndexBlockWithId(nonBlockedIndex.getName(), INDEX_CLOSED_BLOCK_ID), is(false)); } for (Index blockedIndex : blockedIndices.keySet()) { - if (results.get(blockedIndex).isAcknowledged()) { + if (results.get(blockedIndex).hasFailures() == false) { assertIsClosed(blockedIndex.getName(), updatedState); } else { assertIsOpened(blockedIndex.getName(), updatedState); @@ -117,7 +125,7 @@ public void testCloseRoutingTable() { public void testCloseRoutingTableRemovesRoutingTable() { final Set nonBlockedIndices = new HashSet<>(); final Map blockedIndices = new HashMap<>(); - final Map results = new HashMap<>(); + final Map results = new HashMap<>(); final ClusterBlock closingBlock = MetaDataIndexStateService.createIndexClosingBlock(); ClusterState state = ClusterState.builder(new ClusterName("testCloseRoutingTableRemovesRoutingTable")).build(); @@ -129,8 +137,13 @@ public void testCloseRoutingTableRemovesRoutingTable() { nonBlockedIndices.add(state.metaData().index(indexName).getIndex()); } else { state = addBlockedIndex(indexName, randomIntBetween(1, 5), randomIntBetween(0, 5), state, closingBlock); - blockedIndices.put(state.metaData().index(indexName).getIndex(), closingBlock); - results.put(state.metaData().index(indexName).getIndex(), new AcknowledgedResponse(randomBoolean())); + final Index index = state.metaData().index(indexName).getIndex(); + blockedIndices.put(index, closingBlock); + if (randomBoolean()) { + results.put(index, new CloseIndexResponse.IndexResult(index)); + } else { + results.put(index, new CloseIndexResponse.IndexResult(index, new Exception("test"))); + } } } @@ -142,7 +155,7 @@ public void testCloseRoutingTableRemovesRoutingTable() { new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.V_7_2_0))) .build(); - state = MetaDataIndexStateService.closeRoutingTable(state, blockedIndices, results); + state = MetaDataIndexStateService.closeRoutingTable(state, blockedIndices, results).v1(); assertThat(state.metaData().indices().size(), equalTo(nonBlockedIndices.size() + blockedIndices.size())); for (Index nonBlockedIndex : nonBlockedIndices) { @@ -150,7 +163,7 @@ public void testCloseRoutingTableRemovesRoutingTable() { assertThat(state.blocks().hasIndexBlockWithId(nonBlockedIndex.getName(), INDEX_CLOSED_BLOCK_ID), is(false)); } for (Index blockedIndex : blockedIndices.keySet()) { - if (results.get(blockedIndex).isAcknowledged()) { + if (results.get(blockedIndex).hasFailures() == false) { IndexMetaData indexMetaData = state.metaData().index(blockedIndex); assertThat(indexMetaData.getState(), is(IndexMetaData.State.CLOSE)); Settings indexSettings = indexMetaData.getSettings(); @@ -329,6 +342,33 @@ public void testIsIndexVerifiedBeforeClosed() { } } + public void testCloseFailedIfBlockDisappeared() { + ClusterState state = ClusterState.builder(new ClusterName("failedIfBlockDisappeared")).build(); + Map blockedIndices = new HashMap<>(); + int numIndices = between(1, 10); + Set disappearedIndices = new HashSet<>(); + Map verifyResults = new HashMap<>(); + for (int i = 0; i < numIndices; i++) { + String indexName = "test-" + i; + state = addOpenedIndex(indexName, randomIntBetween(1, 3), randomIntBetween(0, 3), state); + Index index = state.metaData().index(indexName).getIndex(); + state = MetaDataIndexStateService.addIndexClosedBlocks(new Index[]{index}, blockedIndices, state); + if (randomBoolean()) { + state = ClusterState.builder(state) + .blocks(ClusterBlocks.builder().blocks(state.blocks()).removeIndexBlocks(indexName).build()) + .build(); + disappearedIndices.add(index); + } + verifyResults.put(index, new IndexResult(index)); + } + Collection closingResults = + MetaDataIndexStateService.closeRoutingTable(state, blockedIndices, unmodifiableMap(verifyResults)).v2(); + assertThat(closingResults, hasSize(numIndices)); + Set failedIndices = closingResults.stream().filter(IndexResult::hasFailures) + .map(IndexResult::getIndex).collect(Collectors.toSet()); + assertThat(failedIndices, equalTo(disappearedIndices)); + } + public static ClusterState createClusterForShardLimitTest(int nodesInCluster, int openIndexShards, int openIndexReplicas, int closedIndexShards, int closedIndexReplicas, Settings clusterSettings) { ImmutableOpenMap.Builder dataNodes = ImmutableOpenMap.builder(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceUtils.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceUtils.java index 5ee6a7c60da3d..7c94a42bd0cb5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceUtils.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceUtils.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.index.Index; @@ -43,7 +43,7 @@ public static ClusterState addIndexClosedBlocks(final Index[] indices, final Map */ public static ClusterState closeRoutingTable(final ClusterState state, final Map blockedIndices, - final Map results) { - return MetaDataIndexStateService.closeRoutingTable(state, blockedIndices, results); + final Map results) { + return MetaDataIndexStateService.closeRoutingTable(state, blockedIndices, results).v1(); } } diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index a8c47f5d3ef39..433662f95d4e0 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; import org.elasticsearch.action.admin.indices.close.TransportVerifyShardBeforeCloseAction; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; @@ -40,7 +41,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.action.support.master.TransportMasterNodeActionUtils; @@ -227,8 +227,8 @@ public ClusterState closeIndices(ClusterState state, CloseIndexRequest request) final Map blockedIndices = new HashMap<>(); ClusterState newState = MetaDataIndexStateServiceUtils.addIndexClosedBlocks(concreteIndices, blockedIndices, state); - newState = MetaDataIndexStateServiceUtils.closeRoutingTable(newState, blockedIndices, blockedIndices.keySet().stream() - .collect(Collectors.toMap(Function.identity(), index -> new AcknowledgedResponse(true)))); + newState = MetaDataIndexStateServiceUtils.closeRoutingTable(newState, blockedIndices, + blockedIndices.keySet().stream().collect(Collectors.toMap(Function.identity(), CloseIndexResponse.IndexResult::new))); return allocationService.reroute(newState, "indices closed"); } diff --git a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java index 6f666483b18d0..b39a008de5f4f 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -20,6 +20,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequestBuilder; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.Client; @@ -45,6 +46,7 @@ import org.elasticsearch.test.InternalTestCluster; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.concurrent.CountDownLatch; @@ -64,6 +66,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; public class CloseIndexIT extends ESIntegTestCase { @@ -115,7 +118,7 @@ public void testCloseIndex() throws Exception { indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, nbDocs) .mapToObj(i -> client().prepareIndex(indexName, "_doc", String.valueOf(i)).setSource("num", i)).collect(toList())); - assertBusy(() -> assertAcked(client().admin().indices().prepareClose(indexName))); + assertBusy(() -> closeIndices(indexName)); assertIndexIsClosed(indexName); assertAcked(client().admin().indices().prepareOpen(indexName)); @@ -130,13 +133,17 @@ public void testCloseAlreadyClosedIndex() throws Exception { indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, randomIntBetween(1, 10)) .mapToObj(i -> client().prepareIndex(indexName, "_doc", String.valueOf(i)).setSource("num", i)).collect(toList())); } - // First close should be acked - assertBusy(() -> assertAcked(client().admin().indices().prepareClose(indexName))); + // First close should be fully acked + assertBusy(() -> closeIndices(indexName)); assertIndexIsClosed(indexName); // Second close should be acked too final ActiveShardCount activeShardCount = randomFrom(ActiveShardCount.NONE, ActiveShardCount.DEFAULT, ActiveShardCount.ALL); - assertBusy(() -> assertAcked(client().admin().indices().prepareClose(indexName).setWaitForActiveShards(activeShardCount))); + assertBusy(() -> { + CloseIndexResponse response = client().admin().indices().prepareClose(indexName).setWaitForActiveShards(activeShardCount).get(); + assertAcked(response); + assertTrue(response.getIndices().isEmpty()); + }); assertIndexIsClosed(indexName); } @@ -150,7 +157,7 @@ public void testCloseUnassignedIndex() throws Exception { assertThat(clusterState.metaData().indices().get(indexName).getState(), is(IndexMetaData.State.OPEN)); assertThat(clusterState.routingTable().allShards().stream().allMatch(ShardRouting::unassigned), is(true)); - assertBusy(() -> assertAcked(client().admin().indices().prepareClose(indexName).setWaitForActiveShards(ActiveShardCount.NONE))); + assertBusy(() -> closeIndices(client().admin().indices().prepareClose(indexName).setWaitForActiveShards(ActiveShardCount.NONE))); assertIndexIsClosed(indexName); } @@ -198,7 +205,7 @@ public void testCloseWhileIndexingDocuments() throws Exception { indexer.setAssertNoFailuresOnStop(false); waitForDocs(randomIntBetween(10, 50), indexer); - assertBusy(() -> assertAcked(client().admin().indices().prepareClose(indexName))); + assertBusy(() -> closeIndices(indexName)); indexer.stop(); nbDocs += indexer.totalIndexedDocs(); @@ -345,6 +352,9 @@ public void testCloseIndexWaitForActiveShards() throws Exception { assertThat(client().admin().cluster().prepareHealth(indexName).get().getStatus(), is(ClusterHealthStatus.GREEN)); assertTrue(closeIndexResponse.isAcknowledged()); assertTrue(closeIndexResponse.isShardsAcknowledged()); + assertThat(closeIndexResponse.getIndices().get(0), notNullValue()); + assertThat(closeIndexResponse.getIndices().get(0).hasFailures(), is(false)); + assertThat(closeIndexResponse.getIndices().get(0).getIndex().getName(), equalTo(indexName)); assertIndexIsClosed(indexName); } @@ -448,6 +458,36 @@ public void testResyncPropagatePrimaryTerm() throws Exception { } } + private static void closeIndices(final String... indices) { + closeIndices(client().admin().indices().prepareClose(indices)); + } + + private static void closeIndices(final CloseIndexRequestBuilder requestBuilder) { + final CloseIndexResponse response = requestBuilder.get(); + assertThat(response.isAcknowledged(), is(true)); + assertThat(response.isShardsAcknowledged(), is(true)); + + final String[] indices = requestBuilder.request().indices(); + if (indices != null) { + assertThat(response.getIndices().size(), equalTo(indices.length)); + for (String index : indices) { + CloseIndexResponse.IndexResult indexResult = response.getIndices().stream() + .filter(result -> index.equals(result.getIndex().getName())).findFirst().get(); + assertThat(indexResult, notNullValue()); + assertThat(indexResult.hasFailures(), is(false)); + assertThat(indexResult.getException(), nullValue()); + assertThat(indexResult.getShards(), notNullValue()); + Arrays.stream(indexResult.getShards()).forEach(shardResult -> { + assertThat(shardResult.hasFailures(), is(false)); + assertThat(shardResult.getFailures(), notNullValue()); + assertThat(shardResult.getFailures().length, equalTo(0)); + }); + } + } else { + assertThat(response.getIndices().size(), equalTo(0)); + } + } + static void assertIndexIsClosed(final String... indices) { final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); for (String index : indices) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CloseFollowerIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CloseFollowerIndexStepTests.java index 368afaa26d0cc..4c00485e631e2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CloseFollowerIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CloseFollowerIndexStepTests.java @@ -44,7 +44,7 @@ public void testCloseFollowingIndex() { assertThat(closeIndexRequest.indices()[0], equalTo("follower-index")); @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[1]; - listener.onResponse(new CloseIndexResponse(true, true)); + listener.onResponse(new CloseIndexResponse(true, true, Collections.emptyList())); return null; }).when(indicesClient).close(Mockito.any(), Mockito.any()); From f6ae6c470a33aed74a4802d891260d442bd51ce2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Thu, 23 May 2019 21:56:13 +0200 Subject: [PATCH 077/224] Implement XContentParser.genericMap and XContentParser.genericMapOrdered methods (#42059) Implement XContentParser.genericMap and XContentParser.genericMapOrdered methods --- .../common/xcontent/XContentParser.java | 15 +++ .../common/xcontent/XContentSubParser.java | 9 ++ .../support/AbstractXContentParser.java | 87 +++++++---------- .../common/xcontent/SimpleStruct.java | 93 +++++++++++++++++++ .../common/xcontent/XContentParserTests.java | 54 +++++++++++ .../xcontent/WatcherXContentParser.java | 8 ++ 6 files changed, 215 insertions(+), 51 deletions(-) create mode 100644 libs/x-content/src/test/java/org/elasticsearch/common/xcontent/SimpleStruct.java diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java index 81cc39c5793cf..6d4da08bfaa59 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java @@ -19,11 +19,14 @@ package org.elasticsearch.common.xcontent; +import org.elasticsearch.common.CheckedFunction; + import java.io.Closeable; import java.io.IOException; import java.nio.CharBuffer; import java.util.List; import java.util.Map; +import java.util.function.Supplier; /** * Interface for pull - parsing {@link XContent} see {@link XContentType} for supported types. @@ -135,6 +138,18 @@ enum NumberType { Map mapStringsOrdered() throws IOException; + /** + * Returns an instance of {@link Map} holding parsed map. + * Serves as a replacement for the "map", "mapOrdered", "mapStrings" and "mapStringsOrdered" methods above. + * + * @param mapFactory factory for creating new {@link Map} objects + * @param mapValueParser parser for parsing a single map value + * @param map value type + * @return {@link Map} object + */ + Map map( + Supplier> mapFactory, CheckedFunction mapValueParser) throws IOException; + List list() throws IOException; List listOrderedMap() throws IOException; diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java index adcbf6ef1bee0..252bfea7ca9c0 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java @@ -19,10 +19,13 @@ package org.elasticsearch.common.xcontent; +import org.elasticsearch.common.CheckedFunction; + import java.io.IOException; import java.nio.CharBuffer; import java.util.List; import java.util.Map; +import java.util.function.Supplier; /** * Wrapper for a XContentParser that makes a single object/array look like a complete document. @@ -110,6 +113,12 @@ public Map mapStringsOrdered() throws IOException { return parser.mapStringsOrdered(); } + @Override + public Map map( + Supplier> mapFactory, CheckedFunction mapValueParser) throws IOException { + return parser.map(mapFactory, mapValueParser); + } + @Override public List list() throws IOException { return parser.list(); diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java index fa6ffdd0407f9..68e03e34a1a17 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.xcontent.support; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParseException; @@ -34,6 +35,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.function.Supplier; public abstract class AbstractXContentParser implements XContentParser { @@ -279,6 +281,12 @@ public Map mapStringsOrdered() throws IOException { return readOrderedMapStrings(this); } + @Override + public Map map( + Supplier> mapFactory, CheckedFunction mapValueParser) throws IOException { + return readGenericMap(this, mapFactory, mapValueParser); + } + @Override public List list() throws IOException { return readList(this); @@ -289,21 +297,13 @@ public List listOrderedMap() throws IOException { return readListOrderedMap(this); } - interface MapFactory { - Map newMap(); - } - - interface MapStringsFactory { - Map newMap(); - } - - static final MapFactory SIMPLE_MAP_FACTORY = HashMap::new; + static final Supplier> SIMPLE_MAP_FACTORY = HashMap::new; - static final MapFactory ORDERED_MAP_FACTORY = LinkedHashMap::new; + static final Supplier> ORDERED_MAP_FACTORY = LinkedHashMap::new; - static final MapStringsFactory SIMPLE_MAP_STRINGS_FACTORY = HashMap::new; + static final Supplier> SIMPLE_MAP_STRINGS_FACTORY = HashMap::new; - static final MapStringsFactory ORDERED_MAP_STRINGS_FACTORY = LinkedHashMap::new; + static final Supplier> ORDERED_MAP_STRINGS_FACTORY = LinkedHashMap::new; static Map readMap(XContentParser parser) throws IOException { return readMap(parser, SIMPLE_MAP_FACTORY); @@ -329,28 +329,19 @@ static List readListOrderedMap(XContentParser parser) throws IOException return readList(parser, ORDERED_MAP_FACTORY); } - static Map readMap(XContentParser parser, MapFactory mapFactory) throws IOException { - Map map = mapFactory.newMap(); - XContentParser.Token token = parser.currentToken(); - if (token == null) { - token = parser.nextToken(); - } - if (token == XContentParser.Token.START_OBJECT) { - token = parser.nextToken(); - } - for (; token == XContentParser.Token.FIELD_NAME; token = parser.nextToken()) { - // Must point to field name - String fieldName = parser.currentName(); - // And then the value... - token = parser.nextToken(); - Object value = readValue(parser, mapFactory, token); - map.put(fieldName, value); - } - return map; + static Map readMap(XContentParser parser, Supplier> mapFactory) throws IOException { + return readGenericMap(parser, mapFactory, p -> readValue(p, mapFactory)); } - static Map readMapStrings(XContentParser parser, MapStringsFactory mapStringsFactory) throws IOException { - Map map = mapStringsFactory.newMap(); + static Map readMapStrings(XContentParser parser, Supplier> mapFactory) throws IOException { + return readGenericMap(parser, mapFactory, XContentParser::text); + } + + static Map readGenericMap( + XContentParser parser, + Supplier> mapFactory, + CheckedFunction mapValueParser) throws IOException { + Map map = mapFactory.get(); XContentParser.Token token = parser.currentToken(); if (token == null) { token = parser.nextToken(); @@ -363,13 +354,13 @@ static Map readMapStrings(XContentParser parser, MapStringsFacto String fieldName = parser.currentName(); // And then the value... parser.nextToken(); - String value = parser.text(); + T value = mapValueParser.apply(parser); map.put(fieldName, value); } return map; } - static List readList(XContentParser parser, MapFactory mapFactory) throws IOException { + static List readList(XContentParser parser, Supplier> mapFactory) throws IOException { XContentParser.Token token = parser.currentToken(); if (token == null) { token = parser.nextToken(); @@ -386,28 +377,22 @@ static List readList(XContentParser parser, MapFactory mapFactory) throw ArrayList list = new ArrayList<>(); for (; token != null && token != XContentParser.Token.END_ARRAY; token = parser.nextToken()) { - list.add(readValue(parser, mapFactory, token)); + list.add(readValue(parser, mapFactory)); } return list; } - static Object readValue(XContentParser parser, MapFactory mapFactory, XContentParser.Token token) throws IOException { - if (token == XContentParser.Token.VALUE_NULL) { - return null; - } else if (token == XContentParser.Token.VALUE_STRING) { - return parser.text(); - } else if (token == XContentParser.Token.VALUE_NUMBER) { - return parser.numberValue(); - } else if (token == XContentParser.Token.VALUE_BOOLEAN) { - return parser.booleanValue(); - } else if (token == XContentParser.Token.START_OBJECT) { - return readMap(parser, mapFactory); - } else if (token == XContentParser.Token.START_ARRAY) { - return readList(parser, mapFactory); - } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { - return parser.binaryValue(); + static Object readValue(XContentParser parser, Supplier> mapFactory) throws IOException { + switch (parser.currentToken()) { + case VALUE_STRING: return parser.text(); + case VALUE_NUMBER: return parser.numberValue(); + case VALUE_BOOLEAN: return parser.booleanValue(); + case START_OBJECT: return readMap(parser, mapFactory); + case START_ARRAY: return readList(parser, mapFactory); + case VALUE_EMBEDDED_OBJECT: return parser.binaryValue(); + case VALUE_NULL: + default: return null; } - return null; } @Override diff --git a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/SimpleStruct.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/SimpleStruct.java new file mode 100644 index 0000000000000..72bff3500be35 --- /dev/null +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/SimpleStruct.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.xcontent; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Simple structure with 3 fields: int, double and String. + * Used for testing parsers. + */ +class SimpleStruct implements ToXContentObject { + + static SimpleStruct fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + private static final ParseField I = new ParseField("i"); + private static final ParseField D = new ParseField("d"); + private static final ParseField S = new ParseField("s"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + "simple_struct", true, args -> new SimpleStruct((int) args[0], (double) args[1], (String) args[2])); + + static { + PARSER.declareInt(constructorArg(), I); + PARSER.declareDouble(constructorArg(), D); + PARSER.declareString(constructorArg(), S); + } + + private final int i; + private final double d; + private final String s; + + SimpleStruct(int i, double d, String s) { + this.i = i; + this.d = d; + this.s = s; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder + .startObject() + .field(I.getPreferredName(), i) + .field(D.getPreferredName(), d) + .field(S.getPreferredName(), s) + .endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SimpleStruct other = (SimpleStruct) o; + return i == other.i && d == other.d && Objects.equals(s, other.s); + } + + @Override + public int hashCode() { + return Objects.hash(i, d, s); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} + diff --git a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java index 606d019f3c4f7..c519880224ccb 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java @@ -30,18 +30,21 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.isIn; import static org.hamcrest.Matchers.nullValue; +import static org.junit.internal.matchers.ThrowableMessageMatcher.hasMessage; public class XContentParserTests extends ESTestCase { @@ -329,6 +332,57 @@ public void testNestedMapInList() throws IOException { } } + public void testGenericMap() throws IOException { + String content = "{" + + "\"c\": { \"i\": 3, \"d\": 0.3, \"s\": \"ccc\" }, " + + "\"a\": { \"i\": 1, \"d\": 0.1, \"s\": \"aaa\" }, " + + "\"b\": { \"i\": 2, \"d\": 0.2, \"s\": \"bbb\" }" + + "}"; + SimpleStruct structA = new SimpleStruct(1, 0.1, "aaa"); + SimpleStruct structB = new SimpleStruct(2, 0.2, "bbb"); + SimpleStruct structC = new SimpleStruct(3, 0.3, "ccc"); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, content)) { + Map actualMap = parser.map(HashMap::new, SimpleStruct::fromXContent); + // Verify map contents, ignore the iteration order. + assertThat(actualMap, equalTo(Map.of("a", structA, "b", structB, "c", structC))); + assertThat(actualMap.values(), containsInAnyOrder(structA, structB, structC)); + assertNull(parser.nextToken()); + } + } + + public void testGenericMapOrdered() throws IOException { + String content = "{" + + "\"c\": { \"i\": 3, \"d\": 0.3, \"s\": \"ccc\" }, " + + "\"a\": { \"i\": 1, \"d\": 0.1, \"s\": \"aaa\" }, " + + "\"b\": { \"i\": 2, \"d\": 0.2, \"s\": \"bbb\" }" + + "}"; + SimpleStruct structA = new SimpleStruct(1, 0.1, "aaa"); + SimpleStruct structB = new SimpleStruct(2, 0.2, "bbb"); + SimpleStruct structC = new SimpleStruct(3, 0.3, "ccc"); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, content)) { + Map actualMap = parser.map(LinkedHashMap::new, SimpleStruct::fromXContent); + // Verify map contents, ignore the iteration order. + assertThat(actualMap, equalTo(Map.of("a", structA, "b", structB, "c", structC))); + // Verify that map's iteration order is the same as the order in which fields appear in JSON. + assertThat(actualMap.values(), contains(structC, structA, structB)); + assertNull(parser.nextToken()); + } + } + + public void testGenericMap_Failure_MapContainingUnparsableValue() throws IOException { + String content = "{" + + "\"a\": { \"i\": 1, \"d\": 0.1, \"s\": \"aaa\" }, " + + "\"b\": { \"i\": 2, \"d\": 0.2, \"s\": 666 }, " + + "\"c\": { \"i\": 3, \"d\": 0.3, \"s\": \"ccc\" }" + + "}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, content)) { + XContentParseException exception = expectThrows( + XContentParseException.class, + () -> parser.map(HashMap::new, SimpleStruct::fromXContent)); + assertThat(exception, hasMessage(containsString("s doesn't support values of type: VALUE_NUMBER"))); + } + } + public void testSubParserObject() throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); int numberOfTokens; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java index fcb3802ca6b76..1d155a5f0c02d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.watcher.support.xcontent; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -21,6 +22,7 @@ import java.time.ZonedDateTime; import java.util.List; import java.util.Map; +import java.util.function.Supplier; /** * A xcontent parser that is used by watcher. This is a special parser that is @@ -123,6 +125,12 @@ public Map mapStringsOrdered() throws IOException { return parser.mapStringsOrdered(); } + @Override + public Map map( + Supplier> mapFactory, CheckedFunction mapValueParser) throws IOException { + return parser.map(mapFactory, mapValueParser); + } + @Override public List list() throws IOException { return parser.list(); From 677c391df05ecccb4bb065666775577fd2d4185f Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 23 May 2019 21:04:03 +0100 Subject: [PATCH 078/224] Avoid HashMap construction on Grok non-match (#42444) This change moves the construction of the result HashMap in Grok.captures() into the branch that actually needs it. This probably will not make a measurable difference for ingest pipelines, but it is beneficial to the ML find_file_structure endpoint, as it tries out many Grok patterns that will fail to match. --- libs/grok/src/main/java/org/elasticsearch/grok/Grok.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java index c20737998feb9..473e8626a4c42 100644 --- a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java +++ b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java @@ -240,7 +240,6 @@ public boolean match(String text) { */ public Map captures(String text) { byte[] textAsBytes = text.getBytes(StandardCharsets.UTF_8); - Map fields = new HashMap<>(); Matcher matcher = compiledExpression.matcher(textAsBytes); int result; try { @@ -256,6 +255,7 @@ public Map captures(String text) { // TODO: I think we should throw an error here? return null; } else if (compiledExpression.numberOfNames() > 0) { + Map fields = new HashMap<>(); Region region = matcher.getEagerRegion(); for (Iterator entry = compiledExpression.namedBackrefIterator(); entry.hasNext();) { NameEntry e = entry.next(); @@ -270,8 +270,10 @@ public Map captures(String text) { } } } + return fields; + } else { + return Collections.emptyMap(); } - return fields; } public static Map getBuiltinPatterns() { From a15f1ee4f64b19670b15a9c1526c7b0b0204ea60 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 23 May 2019 21:06:47 +0100 Subject: [PATCH 079/224] [ML] Improve file structure finder timestamp format determination (#41948) This change contains a major refactoring of the timestamp format determination code used by the ML find file structure endpoint. Previously timestamp format determination was done separately for each piece of text supplied to the timestamp format finder. This had the drawback that it was not possible to distinguish dd/MM and MM/dd in the case where both numbers were 12 or less. In order to do this sensibly it is best to look across all the available timestamps and see if one of the numbers is greater than 12 in any of them. This necessitates making the timestamp format finder an instantiable class that can accumulate evidence over time. Another problem with the previous approach was that it was only possible to override the timestamp format to one of a limited set of timestamp formats. There was no way out if a file to be analysed had a timestamp that was sane yet not in the supported set. This is now changed to allow any timestamp format that can be parsed by a combination of these Java date/time formats: yy, yyyy, M, MM, MMM, MMMM, d, dd, EEE, EEEE, H, HH, h, mm, ss, a, XX, XXX, zzz Additionally S letter groups (fractional seconds) are supported providing they occur after ss and separated from the ss by a dot, comma or colon. Spacing and punctuation is also permitted with the exception of the question mark, newline and carriage return characters, together with literal text enclosed in single quotes. The full list of changes/improvements in this refactor is: - Make TimestampFormatFinder an instantiable class - Overrides must be specified in Java date/time format - Joda format is no longer accepted - Joda timestamp formats in outputs are now derived from the determined or overridden Java timestamp formats, not stored separately - Functionality for determining the "best" timestamp format in a set of lines has been moved from TextLogFileStructureFinder to TimestampFormatFinder, taking advantage of the fact that TimestampFormatFinder is now an instantiable class with state - The functionality to quickly rule out some possible Grok patterns when looking for timestamp formats has been changed from using simple regular expressions to the much faster approach of using the Shift-And method of sub-string search, but using an "alphabet" consisting of just 1 (representing any digit) and 0 (representing non-digits) - Timestamp format overrides are now much more flexible - Timestamp format overrides that do not correspond to a built-in Grok pattern are mapped to a %{CUSTOM_TIMESTAMP} Grok pattern whose definition is included within the date processor in the ingest pipeline - Grok patterns that correspond to multiple Java date/time patterns are now handled better - the Grok pattern is accepted as matching broadly, and the required set of Java date/time patterns is built up considering all observed samples - As a result of the more flexible acceptance of Grok patterns, when looking for the "best" timestamp in a set of lines timestamps are considered different if they are preceded by a different sequence of punctuation characters (to prevent timestamps far into some lines being considered similar to timestamps near the beginning of other lines) - Out-of-the-box Grok patterns that are considered now include %{DATE} and %{DATESTAMP}, which have indeterminate day/month ordering - The order of day/month in formats with indeterminate day/month order is determined by considering all observed samples (plus the server locale if the observed samples still do not suggest an ordering) Relates #38086 Closes #35137 Closes #35132 --- .../ml/apis/find-file-structure.asciidoc | 157 +- .../DelimitedFileStructureFinder.java | 23 +- .../FileStructureUtils.java | 117 +- .../GrokPatternCreator.java | 133 +- .../NdJsonFileStructureFinder.java | 11 +- .../TextLogFileStructureFinder.java | 125 +- .../TimestampFormatFinder.java | 1699 +++++++++++++---- .../XmlFileStructureFinder.java | 14 +- .../DelimitedFileStructureFinderTests.java | 14 +- .../FileStructureUtilsTests.java | 85 +- .../GrokPatternCreatorTests.java | 188 +- .../TextLogFileStructureFinderTests.java | 227 +-- .../TimestampFormatFinderTests.java | 1293 ++++++++++--- 13 files changed, 2907 insertions(+), 1179 deletions(-) diff --git a/docs/reference/ml/apis/find-file-structure.asciidoc b/docs/reference/ml/apis/find-file-structure.asciidoc index 9c21d2a88b49a..e9d9da479c0f2 100644 --- a/docs/reference/ml/apis/find-file-structure.asciidoc +++ b/docs/reference/ml/apis/find-file-structure.asciidoc @@ -147,57 +147,46 @@ is not compulsory to have a timestamp in the file. -- `timestamp_format`:: - (string) The time format of the timestamp field in the file. + + (string) The Java time format of the timestamp field in the file. + + -- -NOTE: Currently there is a limitation that this format must be one that the -structure finder might choose by itself. The reason for this restriction is that -to consistently set all the fields in the response the structure finder needs a -corresponding Grok pattern name and simple regular expression for each timestamp -format. Therefore, there is little value in specifying this parameter for -structured file formats. If you know which field contains your primary timestamp, -it is as good and less error-prone to just specify `timestamp_field`. - -The valuable use case for this parameter is when the format is semi-structured +NOTE: Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are +supported providing they occur after `ss` and separated from the `ss` by a `.`, +`,` or `:`. Spacing and punctuation is also permitted with the exception of `?`, +newline and carriage return, together with literal text enclosed in single +quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override +format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the file, and you know which format corresponds to the primary timestamp, but you do not want to specify the -full `grok_pattern`. - -If this parameter is not specified, the structure finder chooses the best format from -the formats it knows, which are these Java time formats: - -* `dd/MMM/yyyy:HH:mm:ss XX` -* `EEE MMM dd HH:mm zzz yyyy` -* `EEE MMM dd HH:mm:ss yyyy` -* `EEE MMM dd HH:mm:ss zzz yyyy` -* `EEE MMM dd yyyy HH:mm zzz` -* `EEE MMM dd yyyy HH:mm:ss zzz` -* `EEE, dd MMM yyyy HH:mm XX` -* `EEE, dd MMM yyyy HH:mm XXX` -* `EEE, dd MMM yyyy HH:mm:ss XX` -* `EEE, dd MMM yyyy HH:mm:ss XXX` -* `ISO8601` -* `MMM d HH:mm:ss` -* `MMM d HH:mm:ss,SSS` -* `MMM d yyyy HH:mm:ss` -* `MMM dd HH:mm:ss` -* `MMM dd HH:mm:ss,SSS` -* `MMM dd yyyy HH:mm:ss` -* `MMM dd, yyyy h:mm:ss a` -* `TAI64N` -* `UNIX` -* `UNIX_MS` -* `yyyy-MM-dd HH:mm:ss` -* `yyyy-MM-dd HH:mm:ss,SSS` -* `yyyy-MM-dd HH:mm:ss,SSS XX` -* `yyyy-MM-dd HH:mm:ss,SSSXX` -* `yyyy-MM-dd HH:mm:ss,SSSXXX` -* `yyyy-MM-dd HH:mm:ssXX` -* `yyyy-MM-dd HH:mm:ssXXX` -* `yyyy-MM-dd'T'HH:mm:ss,SSS` -* `yyyy-MM-dd'T'HH:mm:ss,SSSXX` -* `yyyy-MM-dd'T'HH:mm:ss,SSSXXX` -* `yyyyMMddHHmmss` +full `grok_pattern`. Another is when the timestamp format is one that the +structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best +format from a built-in set. -- @@ -263,8 +252,18 @@ If the request does not encounter errors, you receive the following result: "charset" : "UTF-8", <4> "has_byte_order_marker" : false, <5> "format" : "ndjson", <6> - "need_client_timezone" : false, <7> - "mappings" : { <8> + "timestamp_field" : "release_date", <7> + "joda_timestamp_formats" : [ <8> + "ISO8601" + ], + "java_timestamp_formats" : [ <9> + "ISO8601" + ], + "need_client_timezone" : true, <10> + "mappings" : { <11> + "@timestamp" : { + "type" : "date" + }, "author" : { "type" : "keyword" }, @@ -275,10 +274,25 @@ If the request does not encounter errors, you receive the following result: "type" : "long" }, "release_date" : { - "type" : "keyword" + "type" : "date", + "format" : "iso8601" } }, - "field_stats" : { <9> + "ingest_pipeline" : { + "description" : "Ingest pipeline created by file structure finder", + "processors" : [ + { + "date" : { + "field" : "release_date", + "timezone" : "{{ beat.timezone }}", + "formats" : [ + "ISO8601" + ] + } + } + ] + }, + "field_stats" : { <12> "author" : { "count" : 24, "cardinality" : 20, @@ -484,17 +498,22 @@ If the request does not encounter errors, you receive the following result: <5> For UTF character encodings, `has_byte_order_marker` indicates whether the file begins with a byte order marker. <6> `format` is one of `ndjson`, `xml`, `delimited` or `semi_structured_text`. -<7> If a timestamp format is detected that does not include a timezone, - `need_client_timezone` will be `true`. The server that parses the file must - therefore be told the correct timezone by the client. -<8> `mappings` contains some suitable mappings for an index into which the data - could be ingested. In this case, the `release_date` field has been given a - `keyword` type as it is not considered specific enough to convert to the - `date` type. -<9> `field_stats` contains the most common values of each field, plus basic - numeric statistics for the numeric `page_count` field. This information - may provide clues that the data needs to be cleaned or transformed prior - to use by other {ml} functionality. +<7> The `timestamp_field` names the field considered most likely to be the + primary timestamp of each document. +<8> `joda_timestamp_formats` are used to tell Logstash how to parse timestamps. +<9> `java_timestamp_formats` are the Java time formats recognized in the time + fields. Elasticsearch mappings and Ingest pipeline use this format. +<10> If a timestamp format is detected that does not include a timezone, + `need_client_timezone` will be `true`. The server that parses the file must + therefore be told the correct timezone by the client. +<11> `mappings` contains some suitable mappings for an index into which the data + could be ingested. In this case, the `release_date` field has been given a + `keyword` type as it is not considered specific enough to convert to the + `date` type. +<12> `field_stats` contains the most common values of each field, plus basic + numeric statistics for the numeric `page_count` field. This information + may provide clues that the data needs to be cleaned or transformed prior + to use by other {ml} functionality. The next example shows how it's possible to find the structure of some New York City yellow cab trip data. The first `curl` command downloads the data, the @@ -526,7 +545,7 @@ If the request does not encounter errors, you receive the following result: "charset" : "UTF-8", "has_byte_order_marker" : false, "format" : "delimited", <2> - "multiline_start_pattern" : "^.*?,\"?\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", + "multiline_start_pattern" : "^.*?,\"?\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", "exclude_lines_pattern" : "^\"?VendorID\"?,\"?tpep_pickup_datetime\"?,\"?tpep_dropoff_datetime\"?,\"?passenger_count\"?,\"?trip_distance\"?,\"?RatecodeID\"?,\"?store_and_fwd_flag\"?,\"?PULocationID\"?,\"?DOLocationID\"?,\"?payment_type\"?,\"?fare_amount\"?,\"?extra\"?,\"?mta_tax\"?,\"?tip_amount\"?,\"?tolls_amount\"?,\"?improvement_surcharge\"?,\"?total_amount\"?", "column_names" : [ <3> "VendorID", @@ -1361,14 +1380,14 @@ this: "charset" : "UTF-8", "has_byte_order_marker" : false, "format" : "semi_structured_text", <1> - "multiline_start_pattern" : "^\\[\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", <2> + "multiline_start_pattern" : "^\\[\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", <2> "grok_pattern" : "\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel}.*", <3> "timestamp_field" : "timestamp", "joda_timestamp_formats" : [ "ISO8601" ], "java_timestamp_formats" : [ - "yyyy-MM-dd'T'HH:mm:ss,SSS" + "ISO8601" ], "need_client_timezone" : true, "mappings" : { @@ -1398,7 +1417,7 @@ this: "field" : "timestamp", "timezone" : "{{ beat.timezone }}", "formats" : [ - "yyyy-MM-dd'T'HH:mm:ss,SSS" + "ISO8601" ] } }, @@ -1515,14 +1534,14 @@ this: "charset" : "UTF-8", "has_byte_order_marker" : false, "format" : "semi_structured_text", - "multiline_start_pattern" : "^\\[\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", + "multiline_start_pattern" : "^\\[\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", "grok_pattern" : "\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel} *\\]\\[%{JAVACLASS:class} *\\] \\[%{HOSTNAME:node}\\] %{JAVALOGMESSAGE:message}", <1> "timestamp_field" : "timestamp", "joda_timestamp_formats" : [ "ISO8601" ], "java_timestamp_formats" : [ - "yyyy-MM-dd'T'HH:mm:ss,SSS" + "ISO8601" ], "need_client_timezone" : true, "mappings" : { @@ -1558,7 +1577,7 @@ this: "field" : "timestamp", "timezone" : "{{ beat.timezone }}", "formats" : [ - "yyyy-MM-dd'T'HH:mm:ss,SSS" + "ISO8601" ] } }, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java index dd30c0a1f94bc..aa88905962638 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java @@ -8,7 +8,6 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; -import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import org.supercsv.exception.SuperCsvException; import org.supercsv.io.CsvListReader; import org.supercsv.prefs.CsvPreference; @@ -27,7 +26,6 @@ import java.util.Map; import java.util.Random; import java.util.SortedMap; -import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -62,7 +60,7 @@ static DelimitedFileStructureFinder makeDelimitedFileStructureFinder(List (field == null) ? null : field.trim()).collect(Collectors.toList()) : row); sampleRecords.add(sampleRecord); sampleMessages.add( - sampleLines.subList(prevMessageEndLineNumber + 1, lineNumbers.get(index)).stream().collect(Collectors.joining("\n"))); + String.join("\n", sampleLines.subList(prevMessageEndLineNumber + 1, lineNumbers.get(index)))); prevMessageEndLineNumber = lineNumber; } - String preamble = Pattern.compile("\n").splitAsStream(sample).limit(lineNumbers.get(1)).collect(Collectors.joining("\n", "", "\n")); + String preamble = String.join("\n", sampleLines.subList(0, lineNumbers.get(1))) + "\n"; + + // null to allow GC before timestamp search + sampleLines = null; char delimiter = (char) csvPreference.getDelimiterChar(); FileStructure.Builder structureBuilder = new FileStructure.Builder(FileStructure.Format.DELIMITED) @@ -107,7 +108,7 @@ static DelimitedFileStructureFinder makeDelimitedFileStructureFinder(List timeField = FileStructureUtils.guessTimestampField(explanation, sampleRecords, overrides, + Tuple timeField = FileStructureUtils.guessTimestampField(explanation, sampleRecords, overrides, timeoutChecker); if (timeField != null) { String timeLineRegex = null; @@ -119,7 +120,7 @@ static DelimitedFileStructureFinder makeDelimitedFileStructureFinder(Listnull if + * @return A tuple of (field name, timestamp format finder) if one can be found, or null if * there is no consistent timestamp. */ - static Tuple guessTimestampField(List explanation, List> sampleRecords, - FileStructureOverrides overrides, TimeoutChecker timeoutChecker) { + static Tuple guessTimestampField(List explanation, List> sampleRecords, + FileStructureOverrides overrides, TimeoutChecker timeoutChecker) { if (sampleRecords.isEmpty()) { return null; } + StringBuilder exceptionMsg = null; + // Accept the first match from the first sample that is compatible with all the other samples - for (Tuple candidate : findCandidates(explanation, sampleRecords, overrides, timeoutChecker)) { + for (Tuple candidate : findCandidates(explanation, sampleRecords, overrides, timeoutChecker)) { + + String fieldName = candidate.v1(); + TimestampFormatFinder timestampFormatFinder = candidate.v2(); boolean allGood = true; for (Map sampleRecord : sampleRecords.subList(1, sampleRecords.size())) { - Object fieldValue = sampleRecord.get(candidate.v1()); + Object fieldValue = sampleRecord.get(fieldName); if (fieldValue == null) { if (overrides.getTimestampField() != null) { throw new IllegalArgumentException("Specified timestamp field [" + overrides.getTimestampField() + "] is not present in record [" + sampleRecord + "]"); } - explanation.add("First sample match [" + candidate.v1() + "] ruled out because record [" + sampleRecord + + explanation.add("First sample match [" + fieldName + "] ruled out because record [" + sampleRecord + "] doesn't have field"); allGood = false; break; @@ -88,15 +91,20 @@ static Tuple guessTimestampField(List explanatio timeoutChecker.check("timestamp field determination"); - TimestampMatch match = TimestampFormatFinder.findFirstFullMatch(fieldValue.toString(), overrides.getTimestampFormat(), - timeoutChecker); - if (match == null || match.candidateIndex != candidate.v2().candidateIndex) { + try { + timestampFormatFinder.addSample(fieldValue.toString()); + } catch (IllegalArgumentException e) { if (overrides.getTimestampFormat() != null) { - throw new IllegalArgumentException("Specified timestamp format [" + overrides.getTimestampFormat() + - "] does not match for record [" + sampleRecord + "]"); + if (exceptionMsg == null) { + exceptionMsg = new StringBuilder("Specified timestamp format [" + overrides.getTimestampFormat() + + "] does not match"); + } else { + exceptionMsg.append(", nor"); + } + exceptionMsg.append(" for record [").append(sampleRecord).append("] in field [").append(fieldName).append("]"); } - explanation.add("First sample match [" + candidate.v1() + "] ruled out because record [" + sampleRecord + - "] matches differently: [" + match + "]"); + explanation.add("First sample match " + timestampFormatFinder.getRawJavaTimestampFormats() + + " ruled out because record [" + sampleRecord + "] does not match"); allGood = false; break; } @@ -104,16 +112,21 @@ static Tuple guessTimestampField(List explanatio if (allGood) { explanation.add(((overrides.getTimestampField() == null) ? "Guessing timestamp" : "Timestamp") + - " field is [" + candidate.v1() + "] with format [" + candidate.v2() + "]"); + " field is [" + fieldName + "] with format " + timestampFormatFinder.getJavaTimestampFormats()); return candidate; } } + if (exceptionMsg != null) { + throw new IllegalArgumentException(exceptionMsg.toString()); + } + return null; } - private static List> findCandidates(List explanation, List> sampleRecords, - FileStructureOverrides overrides, TimeoutChecker timeoutChecker) { + private static List> findCandidates(List explanation, List> sampleRecords, + FileStructureOverrides overrides, + TimeoutChecker timeoutChecker) { assert sampleRecords.isEmpty() == false; Map firstRecord = sampleRecords.get(0); @@ -124,7 +137,7 @@ private static List> findCandidates(List e "] is not present in record [" + firstRecord + "]"); } - List> candidates = new ArrayList<>(); + List> candidates = new ArrayList<>(); // Get candidate timestamps from the possible field(s) of the first sample record for (Map.Entry field : firstRecord.entrySet()) { @@ -132,12 +145,17 @@ private static List> findCandidates(List e if (onlyConsiderField == null || onlyConsiderField.equals(fieldName)) { Object value = field.getValue(); if (value != null) { - TimestampMatch match = TimestampFormatFinder.findFirstFullMatch(value.toString(), overrides.getTimestampFormat(), - timeoutChecker); - if (match != null) { - Tuple candidate = new Tuple<>(fieldName, match); - candidates.add(candidate); - explanation.add("First sample timestamp match [" + candidate + "]"); + // Construct the TimestampFormatFinder outside the no-op catch because an exception + // from the constructor indicates a problem with the overridden format + TimestampFormatFinder timestampFormatFinder = + new TimestampFormatFinder(explanation, overrides.getTimestampFormat(), true, true, true, timeoutChecker); + try { + timestampFormatFinder.addSample(value.toString()); + candidates.add(new Tuple<>(fieldName, timestampFormatFinder)); + explanation.add("First sample timestamp match " + timestampFormatFinder.getRawJavaTimestampFormats() + + " for field [" + fieldName + "]"); + } catch (IllegalArgumentException e) { + // No possible timestamp format found in this particular field - not a problem } } } @@ -231,6 +249,27 @@ private static Stream flatten(Object value) { } } + /** + * Finds the appropriate date mapping for a collection of field values. Throws + * {@link IllegalArgumentException} if no consistent date mapping can be found. + * @param explanation List of reasons for choosing the overall file structure. This list + * may be non-empty when the method is called, and this method may + * append to it. + * @param fieldValues Values of the field for which mappings are to be guessed. The guessed + * mapping will be compatible with all the provided values. Must not be + * empty. + * @param timeoutChecker Will abort the operation if its timeout is exceeded. + * @return The sub-section of the index mappings most appropriate for the field. + */ + static Map findTimestampMapping(List explanation, Collection fieldValues, + TimeoutChecker timeoutChecker) { + assert fieldValues.isEmpty() == false; + + TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, timeoutChecker); + fieldValues.forEach(timestampFormatFinder::addSample); + return timestampFormatFinder.getEsDateMappingTypeWithFormat(); + } + /** * Given some sample values for a field, guess the most appropriate index mapping for the * field. @@ -247,26 +286,17 @@ private static Stream flatten(Object value) { */ static Map guessScalarMapping(List explanation, String fieldName, Collection fieldValues, TimeoutChecker timeoutChecker) { - assert fieldValues.isEmpty() == false; if (fieldValues.stream().allMatch(value -> "true".equals(value) || "false".equals(value))) { return Collections.singletonMap(MAPPING_TYPE_SETTING, "boolean"); } - // This checks if a date mapping would be appropriate, and, if so, finds the correct format - Iterator iter = fieldValues.iterator(); - TimestampMatch timestampMatch = TimestampFormatFinder.findFirstFullMatch(iter.next(), timeoutChecker); - while (timestampMatch != null && iter.hasNext()) { - // To be mapped as type date all the values must match the same timestamp format - it is - // not acceptable for all values to be dates, but with different formats - if (timestampMatch.equals(TimestampFormatFinder.findFirstFullMatch(iter.next(), timestampMatch.candidateIndex, - timeoutChecker)) == false) { - timestampMatch = null; - } - } - if (timestampMatch != null) { - return timestampMatch.getEsDateMappingTypeWithFormat(); + try { + return findTimestampMapping(explanation, fieldValues, timeoutChecker); + } catch (IllegalArgumentException e) { + // To be mapped as type "date" all the values must match the same timestamp format - if + // they don't we'll end up here, and move on to try other possible mappings } if (fieldValues.stream().allMatch(NUMBER_GROK::match)) { @@ -321,6 +351,7 @@ static boolean isMoreLikelyTextThanKeyword(String str) { * Create an ingest pipeline definition appropriate for the file structure. * @param grokPattern The Grok pattern used for parsing semi-structured text formats. null for * fully structured formats. + * @param customGrokPatternDefinitions The definitions for any custom patterns that {@code grokPattern} uses. * @param timestampField The input field containing the timestamp to be parsed into @timestamp. * null if there is no timestamp. * @param timestampFormats Timestamp formats to be used for parsing {@code timestampField}. @@ -328,7 +359,8 @@ static boolean isMoreLikelyTextThanKeyword(String str) { * @param needClientTimezone Is the timezone of the client supplying data to ingest required to uniquely parse the timestamp? * @return The ingest pipeline definition, or null if none is required. */ - public static Map makeIngestPipelineDefinition(String grokPattern, String timestampField, List timestampFormats, + public static Map makeIngestPipelineDefinition(String grokPattern, Map customGrokPatternDefinitions, + String timestampField, List timestampFormats, boolean needClientTimezone) { if (grokPattern == null && timestampField == null) { @@ -344,7 +376,12 @@ public static Map makeIngestPipelineDefinition(String grokPatter Map grokProcessorSettings = new LinkedHashMap<>(); grokProcessorSettings.put("field", "message"); grokProcessorSettings.put("patterns", Collections.singletonList(grokPattern)); + if (customGrokPatternDefinitions.isEmpty() == false) { + grokProcessorSettings.put("pattern_definitions", customGrokPatternDefinitions); + } processors.add(Collections.singletonMap("grok", grokProcessorSettings)); + } else { + assert customGrokPatternDefinitions.isEmpty(); } if (timestampField != null) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreator.java index 6620afcb7145b..7a5c9a48f8757 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreator.java @@ -8,7 +8,6 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.grok.Grok; import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; -import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.util.ArrayList; import java.util.Arrays; @@ -18,6 +17,7 @@ import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -76,10 +76,12 @@ public final class GrokPatternCreator { new ValueOnlyGrokPatternCandidate("DATESTAMP_RFC2822", "date", "extra_timestamp"), new ValueOnlyGrokPatternCandidate("DATESTAMP_OTHER", "date", "extra_timestamp"), new ValueOnlyGrokPatternCandidate("DATESTAMP_EVENTLOG", "date", "extra_timestamp"), + new ValueOnlyGrokPatternCandidate("HTTPDERROR_DATE", "date", "extra_timestamp"), new ValueOnlyGrokPatternCandidate("SYSLOGTIMESTAMP", "date", "extra_timestamp"), new ValueOnlyGrokPatternCandidate("HTTPDATE", "date", "extra_timestamp"), new ValueOnlyGrokPatternCandidate("CATALINA_DATESTAMP", "date", "extra_timestamp"), new ValueOnlyGrokPatternCandidate("CISCOTIMESTAMP", "date", "extra_timestamp"), + new ValueOnlyGrokPatternCandidate("DATESTAMP", "date", "extra_timestamp"), new ValueOnlyGrokPatternCandidate("LOGLEVEL", "keyword", "loglevel"), new ValueOnlyGrokPatternCandidate("URI", "keyword", "uri"), new ValueOnlyGrokPatternCandidate("UUID", "keyword", "uuid"), @@ -90,7 +92,8 @@ public final class GrokPatternCreator { // TODO: would be nice to have IPORHOST here, but HOSTNAME matches almost all words new ValueOnlyGrokPatternCandidate("IP", "ip", "ipaddress"), new ValueOnlyGrokPatternCandidate("DATE", "date", "date"), - new ValueOnlyGrokPatternCandidate("TIME", "date", "time"), + // A time with no date cannot be stored in a field of type "date", hence "keyword" + new ValueOnlyGrokPatternCandidate("TIME", "keyword", "time"), // This already includes pre/post break conditions new ValueOnlyGrokPatternCandidate("QUOTEDSTRING", "keyword", "field", "", ""), // Disallow +, - and . before numbers, as well as "word" characters, otherwise we'll pick @@ -121,6 +124,7 @@ public final class GrokPatternCreator { */ private final Map mappings; private final Map fieldStats; + private final Map grokPatternDefinitions; private final Map fieldNameCountStore = new HashMap<>(); private final StringBuilder overallGrokPatternBuilder = new StringBuilder(); private final TimeoutChecker timeoutChecker; @@ -131,16 +135,24 @@ public final class GrokPatternCreator { * can be appended by the methods of this class. * @param sampleMessages Sample messages that any Grok pattern found must match. * @param mappings Will be updated with mappings appropriate for the returned pattern, if non-null. - * @param timeoutChecker Will abort the operation if its timeout is exceeded. * @param fieldStats Will be updated with field stats for the fields in the returned pattern, if non-null. + * @param customGrokPatternDefinitions Custom Grok pattern definitions to add to the built-in ones. + * @param timeoutChecker Will abort the operation if its timeout is exceeded. */ public GrokPatternCreator(List explanation, Collection sampleMessages, Map mappings, - Map fieldStats, TimeoutChecker timeoutChecker) { - this.explanation = explanation; + Map fieldStats, Map customGrokPatternDefinitions, + TimeoutChecker timeoutChecker) { + this.explanation = Objects.requireNonNull(explanation); this.sampleMessages = Collections.unmodifiableCollection(sampleMessages); this.mappings = mappings; this.fieldStats = fieldStats; - this.timeoutChecker = timeoutChecker; + if (customGrokPatternDefinitions.isEmpty()) { + grokPatternDefinitions = Grok.getBuiltinPatterns(); + } else { + grokPatternDefinitions = new HashMap<>(Grok.getBuiltinPatterns()); + grokPatternDefinitions.putAll(customGrokPatternDefinitions); + } + this.timeoutChecker = Objects.requireNonNull(timeoutChecker); } /** @@ -171,7 +183,8 @@ public Tuple findFullLineGrokPattern(String timestampField) { */ public void validateFullLineGrokPattern(String grokPattern, String timestampField) { - FullMatchGrokPatternCandidate candidate = FullMatchGrokPatternCandidate.fromGrokPattern(grokPattern, timestampField); + FullMatchGrokPatternCandidate candidate = FullMatchGrokPatternCandidate.fromGrokPattern(grokPattern, timestampField, + grokPatternDefinitions); if (candidate.matchesAll(sampleMessages, timeoutChecker)) { candidate.processMatch(explanation, sampleMessages, mappings, fieldStats, timeoutChecker); } else { @@ -189,7 +202,7 @@ public String createGrokPatternFromExamples(String seedPatternName, String seedF overallGrokPatternBuilder.setLength(0); - GrokPatternCandidate seedCandidate = new NoMappingGrokPatternCandidate(seedPatternName, seedFieldName); + GrokPatternCandidate seedCandidate = new NoMappingGrokPatternCandidate(seedPatternName, seedFieldName, grokPatternDefinitions); processCandidateAndSplit(seedCandidate, true, sampleMessages, false, 0, false, 0); @@ -215,8 +228,8 @@ private void processCandidateAndSplit(GrokPatternCandidate chosenPattern, boolea Collection prefaces = new ArrayList<>(); Collection epilogues = new ArrayList<>(); - String patternBuilderContent = - chosenPattern.processCaptures(fieldNameCountStore, snippets, prefaces, epilogues, mappings, fieldStats, timeoutChecker); + String patternBuilderContent = chosenPattern.processCaptures(explanation, fieldNameCountStore, snippets, prefaces, epilogues, + mappings, fieldStats, timeoutChecker); appendBestGrokMatchForStrings(false, prefaces, ignoreKeyValueCandidateLeft, ignoreValueOnlyCandidatesLeft); overallGrokPatternBuilder.append(patternBuilderContent); appendBestGrokMatchForStrings(isLast, epilogues, ignoreKeyValueCandidateRight, ignoreValueOnlyCandidatesRight); @@ -234,7 +247,7 @@ void appendBestGrokMatchForStrings(boolean isLast, Collection snippets, GrokPatternCandidate bestCandidate = null; if (snippets.isEmpty() == false) { - GrokPatternCandidate kvCandidate = new KeyValueGrokPatternCandidate(explanation); + GrokPatternCandidate kvCandidate = new KeyValueGrokPatternCandidate(); if (ignoreKeyValueCandidate == false && kvCandidate.matchesAll(snippets)) { bestCandidate = kvCandidate; } else { @@ -409,9 +422,9 @@ interface GrokPatternCandidate { * calculate field stats. * @return The string that needs to be incorporated into the overall Grok pattern for the line. */ - String processCaptures(Map fieldNameCountStore, Collection snippets, Collection prefaces, - Collection epilogues, Map mappings, Map fieldStats, - TimeoutChecker timeoutChecker); + String processCaptures(List explanation, Map fieldNameCountStore, Collection snippets, + Collection prefaces, Collection epilogues, Map mappings, + Map fieldStats, TimeoutChecker timeoutChecker); } /** @@ -434,10 +447,22 @@ static class ValueOnlyGrokPatternCandidate implements GrokPatternCandidate { * for the pre and/or post breaks. * * @param grokPatternName Name of the Grok pattern to try to match - must match one defined in Logstash. + * @param mappingType Data type for field in Elasticsearch mappings. * @param fieldName Name of the field to extract from the match. */ ValueOnlyGrokPatternCandidate(String grokPatternName, String mappingType, String fieldName) { - this(grokPatternName, mappingType, fieldName, "\\b", "\\b"); + this(grokPatternName, mappingType, fieldName, "\\b", "\\b", Grok.getBuiltinPatterns()); + } + + /** + * @param grokPatternName Name of the Grok pattern to try to match - must match one defined in Logstash. + * @param mappingType Data type for field in Elasticsearch mappings. + * @param fieldName Name of the field to extract from the match. + * @param grokPatternDefinitions Definitions of Grok patterns to be used. + */ + ValueOnlyGrokPatternCandidate(String grokPatternName, String mappingType, String fieldName, + Map grokPatternDefinitions) { + this(grokPatternName, mappingType, fieldName, "\\b", "\\b", grokPatternDefinitions); } /** @@ -448,11 +473,24 @@ static class ValueOnlyGrokPatternCandidate implements GrokPatternCandidate { * @param postBreak Only consider the match if it's broken from the following text by this. */ ValueOnlyGrokPatternCandidate(String grokPatternName, String mappingType, String fieldName, String preBreak, String postBreak) { + this(grokPatternName, mappingType, fieldName, preBreak, postBreak, Grok.getBuiltinPatterns()); + } + + /** + * @param grokPatternName Name of the Grok pattern to try to match - must match one defined in Logstash. + * @param mappingType Data type for field in Elasticsearch mappings. + * @param fieldName Name of the field to extract from the match. + * @param preBreak Only consider the match if it's broken from the previous text by this. + * @param postBreak Only consider the match if it's broken from the following text by this. + * @param grokPatternDefinitions Definitions of Grok patterns to be used. + */ + ValueOnlyGrokPatternCandidate(String grokPatternName, String mappingType, String fieldName, String preBreak, String postBreak, + Map grokPatternDefinitions) { this.grokPatternName = grokPatternName; this.mappingType = mappingType; this.fieldName = fieldName; // The (?m) here has the Ruby meaning, which is equivalent to (?s) in Java - grok = new Grok(Grok.getBuiltinPatterns(), "(?m)%{DATA:" + PREFACE + "}" + preBreak + + grok = new Grok(grokPatternDefinitions, "(?m)%{DATA:" + PREFACE + "}" + preBreak + "%{" + grokPatternName + ":" + VALUE + "}" + postBreak + "%{GREEDYDATA:" + EPILOGUE + "}", TimeoutChecker.watchdog); } @@ -467,9 +505,9 @@ public boolean matchesAll(Collection snippets) { * bit that matches. */ @Override - public String processCaptures(Map fieldNameCountStore, Collection snippets, Collection prefaces, - Collection epilogues, Map mappings, Map fieldStats, - TimeoutChecker timeoutChecker) { + public String processCaptures(List explanation, Map fieldNameCountStore, Collection snippets, + Collection prefaces, Collection epilogues, Map mappings, + Map fieldStats, TimeoutChecker timeoutChecker) { Collection values = new ArrayList<>(); for (String snippet : snippets) { Map captures = timeoutChecker.grokCaptures(grok, snippet, "full message Grok pattern field extraction"); @@ -485,10 +523,13 @@ public String processCaptures(Map fieldNameCountStore, Collecti if (mappings != null) { Map fullMappingType = Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, mappingType); if ("date".equals(mappingType)) { - assert values.isEmpty() == false; - TimestampMatch timestampMatch = TimestampFormatFinder.findFirstFullMatch(values.iterator().next(), timeoutChecker); - if (timestampMatch != null) { - fullMappingType = timestampMatch.getEsDateMappingTypeWithFormat(); + try { + fullMappingType = FileStructureUtils.findTimestampMapping(explanation, values, timeoutChecker); + } catch (IllegalArgumentException e) { + // This feels like it shouldn't happen, but there may be some obscure edge case + // where it does, and in production it will cause less frustration to just return + // a mapping type of "date" with no format than to fail the whole analysis + assert e == null : e.getMessage(); } timeoutChecker.check("mapping determination"); } @@ -509,13 +550,9 @@ public String processCaptures(Map fieldNameCountStore, Collecti */ static class KeyValueGrokPatternCandidate implements GrokPatternCandidate { - private static final Pattern kvFinder = Pattern.compile("\\b(\\w+)=[\\w.-]+"); - private final List explanation; - private String fieldName; + private static final Pattern KV_FINDER = Pattern.compile("\\b(\\w+)=[\\w.-]+"); - KeyValueGrokPatternCandidate(List explanation) { - this.explanation = explanation; - } + private String fieldName; @Override public boolean matchesAll(Collection snippets) { @@ -523,7 +560,7 @@ public boolean matchesAll(Collection snippets) { boolean isFirst = true; for (String snippet : snippets) { if (isFirst) { - Matcher matcher = kvFinder.matcher(snippet); + Matcher matcher = KV_FINDER.matcher(snippet); while (matcher.find()) { candidateNames.add(matcher.group(1)); } @@ -540,9 +577,9 @@ public boolean matchesAll(Collection snippets) { } @Override - public String processCaptures(Map fieldNameCountStore, Collection snippets, Collection prefaces, - Collection epilogues, Map mappings, Map fieldStats, - TimeoutChecker timeoutChecker) { + public String processCaptures(List explanation, Map fieldNameCountStore, Collection snippets, + Collection prefaces, Collection epilogues, Map mappings, + Map fieldStats, TimeoutChecker timeoutChecker) { if (fieldName == null) { throw new IllegalStateException("Cannot process KV matches until a field name has been determined"); } @@ -578,15 +615,15 @@ public String processCaptures(Map fieldNameCountStore, Collecti */ static class NoMappingGrokPatternCandidate extends ValueOnlyGrokPatternCandidate { - NoMappingGrokPatternCandidate(String grokPatternName, String fieldName) { - super(grokPatternName, null, fieldName); + NoMappingGrokPatternCandidate(String grokPatternName, String fieldName, Map grokPatternDefinitions) { + super(grokPatternName, null, fieldName, grokPatternDefinitions); } @Override - public String processCaptures(Map fieldNameCountStore, Collection snippets, Collection prefaces, - Collection epilogues, Map mappings, Map fieldStats, - TimeoutChecker timeoutChecker) { - return super.processCaptures(fieldNameCountStore, snippets, prefaces, epilogues, null, fieldStats, timeoutChecker); + public String processCaptures(List explanation, Map fieldNameCountStore, Collection snippets, + Collection prefaces, Collection epilogues, Map mappings, + Map fieldStats, TimeoutChecker timeoutChecker) { + return super.processCaptures(explanation, fieldNameCountStore, snippets, prefaces, epilogues, null, fieldStats, timeoutChecker); } } @@ -600,17 +637,27 @@ static class FullMatchGrokPatternCandidate { private final Grok grok; static FullMatchGrokPatternCandidate fromGrokPatternName(String grokPatternName, String timeField) { - return new FullMatchGrokPatternCandidate("%{" + grokPatternName + "}", timeField); + return new FullMatchGrokPatternCandidate("%{" + grokPatternName + "}", timeField, Grok.getBuiltinPatterns()); + } + + static FullMatchGrokPatternCandidate fromGrokPatternName(String grokPatternName, String timeField, + Map grokPatternDefinitions) { + return new FullMatchGrokPatternCandidate("%{" + grokPatternName + "}", timeField, grokPatternDefinitions); } static FullMatchGrokPatternCandidate fromGrokPattern(String grokPattern, String timeField) { - return new FullMatchGrokPatternCandidate(grokPattern, timeField); + return new FullMatchGrokPatternCandidate(grokPattern, timeField, Grok.getBuiltinPatterns()); + } + + static FullMatchGrokPatternCandidate fromGrokPattern(String grokPattern, String timeField, + Map grokPatternDefinitions) { + return new FullMatchGrokPatternCandidate(grokPattern, timeField, grokPatternDefinitions); } - private FullMatchGrokPatternCandidate(String grokPattern, String timeField) { + private FullMatchGrokPatternCandidate(String grokPattern, String timeField, Map grokPatternDefinitions) { this.grokPattern = grokPattern; this.timeField = timeField; - grok = new Grok(Grok.getBuiltinPatterns(), grokPattern, TimeoutChecker.watchdog); + grok = new Grok(grokPatternDefinitions, grokPattern, TimeoutChecker.watchdog); } public String getTimeField() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinder.java index 33d9ba56b3f53..116de8f7679d2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinder.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; -import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.io.IOException; import java.util.ArrayList; @@ -53,17 +52,17 @@ static NdJsonFileStructureFinder makeNdJsonFileStructureFinder(List expl .setNumLinesAnalyzed(sampleMessages.size()) .setNumMessagesAnalyzed(sampleRecords.size()); - Tuple timeField = + Tuple timeField = FileStructureUtils.guessTimestampField(explanation, sampleRecords, overrides, timeoutChecker); if (timeField != null) { boolean needClientTimeZone = timeField.v2().hasTimezoneDependentParsing(); structureBuilder.setTimestampField(timeField.v1()) - .setJodaTimestampFormats(timeField.v2().jodaTimestampFormats) - .setJavaTimestampFormats(timeField.v2().javaTimestampFormats) + .setJodaTimestampFormats(timeField.v2().getJodaTimestampFormats()) + .setJavaTimestampFormats(timeField.v2().getJavaTimestampFormats()) .setNeedClientTimezone(needClientTimeZone) - .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(null, timeField.v1(), - timeField.v2().javaTimestampFormats, needClientTimeZone)); + .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), timeField.v1(), + timeField.v2().getJavaTimestampFormats(), needClientTimeZone)); } Tuple, SortedMap> mappingsAndFieldStats = diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java index 36e5e91b4326b..d07eea15f973f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java @@ -8,16 +8,12 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; -import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; import java.util.regex.Pattern; @@ -30,25 +26,33 @@ public class TextLogFileStructureFinder implements FileStructureFinder { static TextLogFileStructureFinder makeTextLogFileStructureFinder(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker, FileStructureOverrides overrides, TimeoutChecker timeoutChecker) { - String[] sampleLines = sample.split("\n"); - Tuple> bestTimestamp = mostLikelyTimestamp(sampleLines, overrides, timeoutChecker); - if (bestTimestamp == null) { - // Is it appropriate to treat a file that is neither structured nor has - // a regular pattern of timestamps as a log file? Probably not... - throw new IllegalArgumentException("Could not find " + - ((overrides.getTimestampFormat() == null) ? "a timestamp" : "the specified timestamp format") + " in the sample provided"); + TimestampFormatFinder timestampFormatFinder = populateTimestampFormatFinder(explanation, sampleLines, overrides, timeoutChecker); + switch (timestampFormatFinder.getNumMatchedFormats()) { + case 0: + // Is it appropriate to treat a file that is neither structured nor has + // a regular pattern of timestamps as a log file? Probably not... + throw new IllegalArgumentException("Could not find " + ((overrides.getTimestampFormat() == null) + ? "a timestamp" + : "the specified timestamp format") + " in the sample provided"); + case 1: + // Simple case + break; + default: + timestampFormatFinder.selectBestMatch(); + break; } - explanation.add(((overrides.getTimestampFormat() == null) ? "Most likely timestamp" : "Timestamp") + " format is [" + - bestTimestamp.v1() + "]"); + explanation.add(((overrides.getTimestampFormat() == null) ? "Most likely timestamp" : "Timestamp") + " format is " + + timestampFormatFinder.getJavaTimestampFormats()); List sampleMessages = new ArrayList<>(); StringBuilder preamble = new StringBuilder(); int linesConsumed = 0; StringBuilder message = null; int linesInMessage = 0; - String multiLineRegex = createMultiLineMessageStartRegex(bestTimestamp.v2(), bestTimestamp.v1().simplePattern.pattern()); + String multiLineRegex = createMultiLineMessageStartRegex(timestampFormatFinder.getPrefaces(), + timestampFormatFinder.getSimplePattern().pattern()); Pattern multiLinePattern = Pattern.compile(multiLineRegex); for (String sampleLine : sampleLines) { if (multiLinePattern.matcher(sampleLine).find()) { @@ -82,6 +86,9 @@ static TextLogFileStructureFinder makeTextLogFileStructureFinder(List ex + "problem is probably that the primary timestamp format has been incorrectly detected, so try overriding it."); } + // null to allow GC before Grok pattern search + sampleLines = null; + FileStructure.Builder structureBuilder = new FileStructure.Builder(FileStructure.Format.SEMI_STRUCTURED_TEXT) .setCharset(charsetName) .setHasByteOrderMarker(hasByteOrderMarker) @@ -97,7 +104,9 @@ static TextLogFileStructureFinder makeTextLogFileStructureFinder(List ex SortedMap fieldStats = new TreeMap<>(); fieldStats.put("message", FileStructureUtils.calculateFieldStats(sampleMessages, timeoutChecker)); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, fieldStats, timeoutChecker); + Map customGrokPatternDefinitions = timestampFormatFinder.getCustomGrokPatternDefinitions(); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, fieldStats, + customGrokPatternDefinitions, timeoutChecker); // We can't parse directly into @timestamp using Grok, so parse to some other time field, which the date filter will then remove String interimTimestampField = overrides.getTimestampField(); String grokPattern = overrides.getGrokPattern(); @@ -116,20 +125,22 @@ static TextLogFileStructureFinder makeTextLogFileStructureFinder(List ex if (interimTimestampField == null) { interimTimestampField = "timestamp"; } - grokPattern = grokPatternCreator.createGrokPatternFromExamples(bestTimestamp.v1().grokPatternName, interimTimestampField); + grokPattern = + grokPatternCreator.createGrokPatternFromExamples(timestampFormatFinder.getGrokPatternName(), interimTimestampField); } } - boolean needClientTimeZone = bestTimestamp.v1().hasTimezoneDependentParsing(); + boolean needClientTimeZone = timestampFormatFinder.hasTimezoneDependentParsing(); FileStructure structure = structureBuilder .setTimestampField(interimTimestampField) - .setJodaTimestampFormats(bestTimestamp.v1().jodaTimestampFormats) - .setJavaTimestampFormats(bestTimestamp.v1().javaTimestampFormats) + .setJodaTimestampFormats(timestampFormatFinder.getJodaTimestampFormats()) + .setJavaTimestampFormats(timestampFormatFinder.getJavaTimestampFormats()) .setNeedClientTimezone(needClientTimeZone) .setGrokPattern(grokPattern) - .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(grokPattern, interimTimestampField, - bestTimestamp.v1().javaTimestampFormats, needClientTimeZone)) + .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(grokPattern, + customGrokPatternDefinitions, interimTimestampField, + timestampFormatFinder.getJavaTimestampFormats(), needClientTimeZone)) .setMappings(mappings) .setFieldStats(fieldStats) .setExplanation(explanation) @@ -153,79 +164,23 @@ public FileStructure getStructure() { return structure; } - static Tuple> mostLikelyTimestamp(String[] sampleLines, FileStructureOverrides overrides, - TimeoutChecker timeoutChecker) { + static TimestampFormatFinder populateTimestampFormatFinder(List explanation, String[] sampleLines, + FileStructureOverrides overrides, TimeoutChecker timeoutChecker) { + TimestampFormatFinder timestampFormatFinder = + new TimestampFormatFinder(explanation, overrides.getTimestampFormat(), false, false, false, timeoutChecker); - Map>> timestampMatches = new LinkedHashMap<>(); - - int remainingLines = sampleLines.length; - double differenceBetweenTwoHighestWeights = 0.0; for (String sampleLine : sampleLines) { - TimestampMatch match = TimestampFormatFinder.findFirstMatch(sampleLine, overrides.getTimestampFormat(), timeoutChecker); - if (match != null) { - TimestampMatch pureMatch = new TimestampMatch(match.candidateIndex, "", match.jodaTimestampFormats, - match.javaTimestampFormats, match.simplePattern, match.grokPatternName, ""); - timestampMatches.compute(pureMatch, (k, v) -> { - if (v == null) { - return new Tuple<>(weightForMatch(match.preface), new HashSet<>(Collections.singletonList(match.preface))); - } else { - v.v2().add(match.preface); - return new Tuple<>(v.v1() + weightForMatch(match.preface), v.v2()); - } - }); - differenceBetweenTwoHighestWeights = findDifferenceBetweenTwoHighestWeights(timestampMatches.values()); - } - timeoutChecker.check("timestamp format determination"); - // The highest possible weight is 1, so if the difference between the two highest weights - // is less than the number of lines remaining then the leader cannot possibly be overtaken - if (differenceBetweenTwoHighestWeights > --remainingLines) { - break; - } + timestampFormatFinder.addSample(sampleLine); } - double highestWeight = 0.0; - Tuple> highestWeightMatch = null; - for (Map.Entry>> entry : timestampMatches.entrySet()) { - double weight = entry.getValue().v1(); - if (weight > highestWeight) { - highestWeight = weight; - highestWeightMatch = new Tuple<>(entry.getKey(), entry.getValue().v2()); - } - } - return highestWeightMatch; - } - - /** - * Used to weight a timestamp match according to how far along the line it is found. - * Timestamps at the very beginning of the line are given a weight of 1. The weight - * progressively decreases the more text there is preceding the timestamp match, but - * is always greater than 0. - * @return A weight in the range (0, 1]. - */ - private static double weightForMatch(String preface) { - return Math.pow(1.0 + preface.length() / 15.0, -1.1); - } - - private static double findDifferenceBetweenTwoHighestWeights(Collection>> timestampMatches) { - double highestWeight = 0.0; - double secondHighestWeight = 0.0; - for (Tuple> timestampMatch : timestampMatches) { - double weight = timestampMatch.v1(); - if (weight > highestWeight) { - secondHighestWeight = highestWeight; - highestWeight = weight; - } else if (weight > secondHighestWeight) { - secondHighestWeight = weight; - } - } - return highestWeight - secondHighestWeight; + return timestampFormatFinder; } - static String createMultiLineMessageStartRegex(Collection prefaces, String timestampRegex) { + static String createMultiLineMessageStartRegex(Collection prefaces, String simpleDateRegex) { StringBuilder builder = new StringBuilder("^"); GrokPatternCreator.addIntermediateRegex(builder, prefaces); - builder.append(timestampRegex); + builder.append(simpleDateRegex); if (builder.substring(0, 3).equals("^\\b")) { builder.delete(1, 3); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java index c19a93a7be99e..0283437d64808 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java @@ -5,56 +5,106 @@ */ package org.elasticsearch.xpack.ml.filestructurefinder; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.grok.Grok; +import java.time.DateTimeException; +import java.time.Instant; +import java.time.LocalDate; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.time.format.FormatStyle; +import java.time.format.ResolverStyle; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; +import java.util.ArrayList; import java.util.Arrays; +import java.util.BitSet; import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Set; +import java.util.function.Function; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; -import java.util.stream.Stream; /** * Used to find the best timestamp format for one of the following situations: * 1. Matching an entire field value * 2. Matching a timestamp found somewhere within a message + * + * This class is not thread safe. Each object of this class should only be used from within a single thread. */ public final class TimestampFormatFinder { private static final String PREFACE = "preface"; private static final String EPILOGUE = "epilogue"; + private static final String PUNCTUATION_THAT_NEEDS_ESCAPING_IN_REGEX = "\\|()[]{}^$.*?"; private static final String FRACTIONAL_SECOND_SEPARATORS = ":.,"; - private static final Pattern FRACTIONAL_SECOND_INTERPRETER = Pattern.compile("([" + FRACTIONAL_SECOND_SEPARATORS + "])(\\d{3,9})"); - private static final char DEFAULT_FRACTIONAL_SECOND_SEPARATOR = ','; - private static final Pattern FRACTIONAL_SECOND_TIMESTAMP_FORMAT_PATTERN = - Pattern.compile("([" + FRACTIONAL_SECOND_SEPARATORS + "]S{3,9})"); - private static final String DEFAULT_FRACTIONAL_SECOND_FORMAT = DEFAULT_FRACTIONAL_SECOND_SEPARATOR + "SSS"; - - /** - * The timestamp patterns are complex and it can be slow to prove they do not - * match anywhere in a long message. Many of the timestamps are similar and - * will never be found in a string if simpler sub-patterns do not exist in the - * string. These sub-patterns can be used to quickly rule out multiple complex - * patterns. These patterns do not need to represent quantities that are - * useful to know the value of, merely character sequences that can be used to - * prove that several more complex patterns cannot possibly match. - */ - private static final List QUICK_RULE_OUT_PATTERNS = Arrays.asList( - // YYYY-MM-dd followed by a space - Pattern.compile("\\b\\d{4}-\\d{2}-\\d{2} "), - // The end of some number (likely year or day) followed by a space then HH:mm - Pattern.compile("\\d \\d{2}:\\d{2}\\b"), - // HH:mm:ss surrounded by spaces - Pattern.compile(" \\d{2}:\\d{2}:\\d{2} "), - // Literal 'T' surrounded by numbers - Pattern.compile("\\dT\\d") - ); + private static final char INDETERMINATE_FIELD_PLACEHOLDER = '?'; + // The ? characters in this must match INDETERMINATE_FIELD_PLACEHOLDER + // above, but they're literals in this regex to aid readability + private static final Pattern INDETERMINATE_FORMAT_INTERPRETER = Pattern.compile("([^?]*)(\\?{1,2})(?:([^?]*)(\\?{1,2})([^?]*))?"); + + /** + * These are the date format letter groups that are supported in custom formats + * + * (Note: Fractional seconds is a special case as they have to follow seconds.) + */ + private static final Map> VALID_LETTER_GROUPS; + static { + Map> validLetterGroups = new HashMap<>(); + validLetterGroups.put("yyyy", new Tuple<>("%{YEAR}", "\\d{4}")); + validLetterGroups.put("yy", new Tuple<>("%{YEAR}", "\\d{2}")); + validLetterGroups.put("M", new Tuple<>("%{MONTHNUM}", "\\d{1,2}")); + validLetterGroups.put("MM", new Tuple<>("%{MONTHNUM2}", "\\d{2}")); + // The simple regex here is based on the fact that the %{MONTH} Grok pattern only matches English and German month names + validLetterGroups.put("MMM", new Tuple<>("%{MONTH}", "[A-Z]\\S{2}")); + validLetterGroups.put("MMMM", new Tuple<>("%{MONTH}", "[A-Z]\\S{2,8}")); + validLetterGroups.put("d", new Tuple<>("%{MONTHDAY}", "\\d{1,2}")); + validLetterGroups.put("dd", new Tuple<>("%{MONTHDAY}", "\\d{2}")); + // The simple regex here is based on the fact that the %{DAY} Grok pattern only matches English and German day names + validLetterGroups.put("EEE", new Tuple<>("%{DAY}", "[A-Z]\\S{2}")); + validLetterGroups.put("EEEE", new Tuple<>("%{DAY}", "[A-Z]\\S{2,8}")); + validLetterGroups.put("H", new Tuple<>("%{HOUR}", "\\d{1,2}")); + validLetterGroups.put("HH", new Tuple<>("%{HOUR}", "\\d{2}")); + validLetterGroups.put("h", new Tuple<>("%{HOUR}", "\\d{1,2}")); + validLetterGroups.put("mm", new Tuple<>("%{MINUTE}", "\\d{2}")); + validLetterGroups.put("ss", new Tuple<>("%{SECOND}", "\\d{2}")); + validLetterGroups.put("a", new Tuple<>("(?:AM|PM)", "[AP]M")); + validLetterGroups.put("XX", new Tuple<>("%{ISO8601_TIMEZONE}", "(?:Z|[+-]\\d{4})")); + validLetterGroups.put("XXX", new Tuple<>("%{ISO8601_TIMEZONE}", "(?:Z|[+-]\\d{2}:\\d{2})")); + validLetterGroups.put("zzz", new Tuple<>("%{TZ}", "[A-Z]{3}")); + VALID_LETTER_GROUPS = Collections.unmodifiableMap(validLetterGroups); + } + + static final String CUSTOM_TIMESTAMP_GROK_NAME = "CUSTOM_TIMESTAMP"; + + /** + * Candidates for the special format strings (ISO8601, UNIX_MS, UNIX and TAI64N) + */ + static final CandidateTimestampFormat ISO8601_CANDIDATE_FORMAT = + new CandidateTimestampFormat(CandidateTimestampFormat::iso8601FormatFromExample, + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", "\\b%{TIMESTAMP_ISO8601}\\b", "TIMESTAMP_ISO8601", + "1111 11 11 11 11", 0, 19); + static final CandidateTimestampFormat UNIX_MS_CANDIDATE_FORMAT = + new CandidateTimestampFormat(example -> Collections.singletonList("UNIX_MS"), "\\b\\d{13}\\b", "\\b\\d{13}\\b", "POSINT", + "1111111111111", 0, 0); + static final CandidateTimestampFormat UNIX_CANDIDATE_FORMAT = + new CandidateTimestampFormat(example -> Collections.singletonList("UNIX"), "\\b\\d{10}\\b", "\\b\\d{10}(?:\\.\\d{3,9})?\\b", + "NUMBER", "1111111111", 0, 10); + static final CandidateTimestampFormat TAI64N_CANDIDATE_FORMAT = + new CandidateTimestampFormat(example -> Collections.singletonList("TAI64N"), "\\b[0-9A-Fa-f]{24}\\b", "\\b[0-9A-Fa-f]{24}\\b", + "BASE16NUM"); /** * The first match in this list will be chosen, so it needs to be ordered @@ -64,427 +114,1210 @@ public final class TimestampFormatFinder { // The TOMCAT_DATESTAMP format has to come before ISO8601 because it's basically ISO8601 but // with a space before the timezone, and because the timezone is optional in ISO8601 it will // be recognised as that with the timezone missed off if ISO8601 is checked first - new CandidateTimestampFormat("YYYY-MM-dd HH:mm:ss,SSS Z", "yyyy-MM-dd HH:mm:ss,SSS XX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}", + new CandidateTimestampFormat(example -> CandidateTimestampFormat.iso8601LikeFormatFromExample(example, " ", " "), + "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}[:.,]\\d{3}", "\\b20\\d{2}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)[:.,][0-9]{3,9} (?:Z|[+-]%{HOUR}%{MINUTE})\\b", - "TOMCAT_DATESTAMP", Arrays.asList(0, 1)), - // The Elasticsearch ISO8601 parser requires a literal T between the date and time, so - // longhand formats are needed if there's a space instead - new CandidateTimestampFormat("YYYY-MM-dd HH:mm:ss,SSSZ", "yyyy-MM-dd HH:mm:ss,SSSXX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}", - "\\b%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)[:.,][0-9]{3,9}(?:Z|[+-]%{HOUR}%{MINUTE})\\b", - "TIMESTAMP_ISO8601", Arrays.asList(0, 1)), - new CandidateTimestampFormat("YYYY-MM-dd HH:mm:ss,SSSZZ", "yyyy-MM-dd HH:mm:ss,SSSXXX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}", - "\\b%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)[:.,][0-9]{3,9}[+-]%{HOUR}:%{MINUTE}\\b", - "TIMESTAMP_ISO8601", Arrays.asList(0, 1)), - new CandidateTimestampFormat("YYYY-MM-dd HH:mm:ss,SSS", "yyyy-MM-dd HH:mm:ss,SSS", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}", - "\\b%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)[:.,][0-9]{3,9}\\b", "TIMESTAMP_ISO8601", - Arrays.asList(0, 1)), - new CandidateTimestampFormat("YYYY-MM-dd HH:mm:ssZ", "yyyy-MM-dd HH:mm:ssXX", "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", - "\\b%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)(?:Z|[+-]%{HOUR}%{MINUTE})\\b", "TIMESTAMP_ISO8601", - Arrays.asList(0, 1)), - new CandidateTimestampFormat("YYYY-MM-dd HH:mm:ssZZ", "yyyy-MM-dd HH:mm:ssXXX", "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", - "\\b%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)[+-]%{HOUR}:%{MINUTE}\\b", "TIMESTAMP_ISO8601", - Arrays.asList(0, 1)), - new CandidateTimestampFormat("YYYY-MM-dd HH:mm:ss", "yyyy-MM-dd HH:mm:ss", "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", - "\\b%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)\\b", "TIMESTAMP_ISO8601", - Arrays.asList(0, 1)), - // When using Java time the Elasticsearch ISO8601 parser for fractional time requires that the fractional - // separator match the current JVM locale, which is too restrictive for arbitrary log file parsing - new CandidateTimestampFormat("ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSSXX", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", - "\\b%{YEAR}-%{MONTHNUM}-%{MONTHDAY}T%{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)[:.,][0-9]{3,9}(?:Z|[+-]%{HOUR}%{MINUTE})\\b", - "TIMESTAMP_ISO8601", Collections.singletonList(3)), - new CandidateTimestampFormat("ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSSXXX", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", - "\\b%{YEAR}-%{MONTHNUM}-%{MONTHDAY}T%{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)[:.,][0-9]{3,9}[+-]%{HOUR}:%{MINUTE}\\b", - "TIMESTAMP_ISO8601", Collections.singletonList(3)), - new CandidateTimestampFormat("ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSS", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", - "\\b%{YEAR}-%{MONTHNUM}-%{MONTHDAY}T%{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)[:.,][0-9]{3,9}\\b", "TIMESTAMP_ISO8601", - Collections.singletonList(3)), - new CandidateTimestampFormat("ISO8601", "ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", "\\b%{TIMESTAMP_ISO8601}\\b", - "TIMESTAMP_ISO8601", Collections.singletonList(3)), - new CandidateTimestampFormat("EEE MMM dd YYYY HH:mm:ss zzz", "EEE MMM dd yyyy HH:mm:ss zzz", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{4} \\d{2}:\\d{2}:\\d{2} ", - "\\b%{DAY} %{MONTH} %{MONTHDAY} %{YEAR} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) %{TZ}\\b", "DATESTAMP_RFC822", Arrays.asList(1, 2)), - new CandidateTimestampFormat("EEE MMM dd YYYY HH:mm zzz", "EEE MMM dd yyyy HH:mm zzz", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{4} \\d{2}:\\d{2} ", - "\\b%{DAY} %{MONTH} %{MONTHDAY} %{YEAR} %{HOUR}:%{MINUTE} %{TZ}\\b", "DATESTAMP_RFC822", Collections.singletonList(1)), - new CandidateTimestampFormat("EEE, dd MMM YYYY HH:mm:ss ZZ", "EEE, dd MMM yyyy HH:mm:ss XXX", - "\\b[A-Z]\\S{2,8}, \\d{1,2} [A-Z]\\S{2,8} \\d{4} \\d{2}:\\d{2}:\\d{2} ", - "\\b%{DAY}, %{MONTHDAY} %{MONTH} %{YEAR} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) (?:Z|[+-]%{HOUR}:%{MINUTE})\\b", - "DATESTAMP_RFC2822", Arrays.asList(1, 2)), - new CandidateTimestampFormat("EEE, dd MMM YYYY HH:mm:ss Z", "EEE, dd MMM yyyy HH:mm:ss XX", - "\\b[A-Z]\\S{2,8}, \\d{1,2} [A-Z]\\S{2,8} \\d{4} \\d{2}:\\d{2}:\\d{2} ", - "\\b%{DAY}, %{MONTHDAY} %{MONTH} %{YEAR} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) (?:Z|[+-]%{HOUR}%{MINUTE})\\b", - "DATESTAMP_RFC2822", Arrays.asList(1, 2)), - new CandidateTimestampFormat("EEE, dd MMM YYYY HH:mm ZZ", "EEE, dd MMM yyyy HH:mm XXX", - "\\b[A-Z]\\S{2,8}, \\d{1,2} [A-Z]\\S{2,8} \\d{4} \\d{2}:\\d{2} ", - "\\b%{DAY}, %{MONTHDAY} %{MONTH} %{YEAR} %{HOUR}:%{MINUTE} (?:Z|[+-]%{HOUR}:%{MINUTE})\\b", "DATESTAMP_RFC2822", - Collections.singletonList(1)), - new CandidateTimestampFormat("EEE, dd MMM YYYY HH:mm Z", "EEE, dd MMM yyyy HH:mm XX", - "\\b[A-Z]\\S{2,8}, \\d{1,2} [A-Z]\\S{2,8} \\d{4} \\d{2}:\\d{2} ", - "\\b%{DAY}, %{MONTHDAY} %{MONTH} %{YEAR} %{HOUR}:%{MINUTE} (?:Z|[+-]%{HOUR}%{MINUTE})\\b", "DATESTAMP_RFC2822", - Collections.singletonList(1)), - new CandidateTimestampFormat("EEE MMM dd HH:mm:ss zzz YYYY", "EEE MMM dd HH:mm:ss zzz yyyy", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{2}:\\d{2}:\\d{2} [A-Z]{3,4} \\d{4}\\b", - "\\b%{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) %{TZ} %{YEAR}\\b", "DATESTAMP_OTHER", - Arrays.asList(1, 2)), - new CandidateTimestampFormat("EEE MMM dd HH:mm zzz YYYY", "EEE MMM dd HH:mm zzz yyyy", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{2}:\\d{2} [A-Z]{3,4} \\d{4}\\b", - "\\b%{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE} %{TZ} %{YEAR}\\b", "DATESTAMP_OTHER", Collections.singletonList(1)), - new CandidateTimestampFormat("YYYYMMddHHmmss", "yyyyMMddHHmmss", "\\b\\d{14}\\b", + "TOMCAT_DATESTAMP", "1111 11 11 11 11 11 111", 0, 13), + ISO8601_CANDIDATE_FORMAT, + new CandidateTimestampFormat( + example -> Arrays.asList("EEE MMM dd yy HH:mm:ss zzz", "EEE MMM d yy HH:mm:ss zzz"), + "\\b[A-Z]\\S{2} [A-Z]\\S{2} \\d{1,2} \\d{2} \\d{2}:\\d{2}:\\d{2}\\b", + "\\b%{DAY} %{MONTH} %{MONTHDAY} %{YEAR} %{HOUR}:%{MINUTE}(?::(?:[0-5][0-9]|60)) %{TZ}\\b", "DATESTAMP_RFC822", + Arrays.asList(" 11 11 11 11 11", " 1 11 11 11 11"), 0, 5), + new CandidateTimestampFormat( + example -> CandidateTimestampFormat.adjustTrailingTimezoneFromExample(example, "EEE, dd MMM yyyy HH:mm:ss XX"), + "\\b[A-Z]\\S{2}, \\d{1,2} [A-Z]\\S{2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", + "\\b%{DAY}, %{MONTHDAY} %{MONTH} %{YEAR} %{HOUR}:%{MINUTE}(?::(?:[0-5][0-9]|60)) (?:Z|[+-]%{HOUR}:?%{MINUTE})\\b", + "DATESTAMP_RFC2822", Arrays.asList(" 11 1111 11 11 11", " 1 1111 11 11 11"), 0, 7), + new CandidateTimestampFormat( + example -> Arrays.asList("EEE MMM dd HH:mm:ss zzz yyyy", "EEE MMM d HH:mm:ss zzz yyyy"), + "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", + "\\b%{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}(?::(?:[0-5][0-9]|60)) %{TZ} %{YEAR}\\b", "DATESTAMP_OTHER", + Arrays.asList(" 11 11 11 11", " 1 11 11 11"), 12, 10), + new CandidateTimestampFormat(example -> Collections.singletonList("yyyyMMddHHmmss"), "\\b\\d{14}\\b", "\\b20\\d{2}%{MONTHNUM2}(?:(?:0[1-9])|(?:[12][0-9])|(?:3[01]))(?:2[0123]|[01][0-9])%{MINUTE}(?:[0-5][0-9]|60)\\b", - "DATESTAMP_EVENTLOG"), - new CandidateTimestampFormat("EEE MMM dd HH:mm:ss YYYY", "EEE MMM dd HH:mm:ss yyyy", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{2}:\\d{2}:\\d{2} \\d{4}\\b", - "\\b%{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) %{YEAR}\\b", "HTTPDERROR_DATE", Arrays.asList(1, 2)), - new CandidateTimestampFormat(Arrays.asList("MMM dd HH:mm:ss,SSS", "MMM d HH:mm:ss,SSS"), - Arrays.asList("MMM dd HH:mm:ss,SSS", "MMM d HH:mm:ss,SSS"), - "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2},\\d{3}", - "%{MONTH} +%{MONTHDAY} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60)[:.,][0-9]{3,9}\\b", "SYSLOGTIMESTAMP", - Collections.singletonList(1)), - new CandidateTimestampFormat(Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss"), - Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss"), - "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", "%{MONTH} +%{MONTHDAY} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60)\\b", - "SYSLOGTIMESTAMP", Collections.singletonList(1)), - new CandidateTimestampFormat("dd/MMM/YYYY:HH:mm:ss Z", "dd/MMM/yyyy:HH:mm:ss XX", + "DATESTAMP_EVENTLOG", "11111111111111", 0, 0), + new CandidateTimestampFormat(example -> Collections.singletonList("EEE MMM dd HH:mm:ss yyyy"), + "\\b[A-Z]\\S{2} [A-Z]\\S{2} \\d{2} \\d{2}:\\d{2}:\\d{2} \\d{4}\\b", + "\\b%{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) %{YEAR}\\b", "HTTPDERROR_DATE", + " 11 11 11 11 1111", 0, 0), + new CandidateTimestampFormat( + example -> CandidateTimestampFormat.expandDayAndAdjustFractionalSecondsFromExample(example, "MMM dd HH:mm:ss"), + "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", + "%{MONTH} +%{MONTHDAY} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60)(?:[:.,][0-9]{3,9})?\\b", "SYSLOGTIMESTAMP", + Arrays.asList(" 11 11 11 11", " 1 11 11 11"), 4, 10), + new CandidateTimestampFormat(example -> Collections.singletonList("dd/MMM/yyyy:HH:mm:ss XX"), "\\b\\d{2}/[A-Z]\\S{2}/\\d{4}:\\d{2}:\\d{2}:\\d{2} ", - "\\b%{MONTHDAY}/%{MONTH}/%{YEAR}:%{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) [+-]?%{HOUR}%{MINUTE}\\b", "HTTPDATE"), - new CandidateTimestampFormat("MMM dd, YYYY h:mm:ss a", "MMM dd, yyyy h:mm:ss a", - "\\b[A-Z]\\S{2,8} \\d{1,2}, \\d{4} \\d{1,2}:\\d{2}:\\d{2} [AP]M\\b", - "%{MONTH} %{MONTHDAY}, 20\\d{2} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) (?:AM|PM)\\b", "CATALINA_DATESTAMP"), - new CandidateTimestampFormat(Arrays.asList("MMM dd YYYY HH:mm:ss", "MMM d YYYY HH:mm:ss"), - Arrays.asList("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss"), - "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", - "%{MONTH} +%{MONTHDAY} %{YEAR} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60)\\b", "CISCOTIMESTAMP", Collections.singletonList(1)), - new CandidateTimestampFormat("UNIX_MS", "UNIX_MS", "\\b\\d{13}\\b", "\\b\\d{13}\\b", "POSINT"), - new CandidateTimestampFormat("UNIX", "UNIX", "\\b\\d{10}\\.\\d{3,9}\\b", "\\b\\d{10}\\.(?:\\d{3}){1,3}\\b", "NUMBER"), - new CandidateTimestampFormat("UNIX", "UNIX", "\\b\\d{10}\\b", "\\b\\d{10}\\b", "POSINT"), - new CandidateTimestampFormat("TAI64N", "TAI64N", "\\b[0-9A-Fa-f]{24}\\b", "\\b[0-9A-Fa-f]{24}\\b", "BASE16NUM") + "\\b%{MONTHDAY}/%{MONTH}/%{YEAR}:%{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) [+-]?%{HOUR}%{MINUTE}\\b", "HTTPDATE", + "11 1111 11 11 11", 0, 6), + new CandidateTimestampFormat(example -> Collections.singletonList("MMM dd, yyyy h:mm:ss a"), + "\\b[A-Z]\\S{2} \\d{2}, \\d{4} \\d{1,2}:\\d{2}:\\d{2} [AP]M\\b", + "%{MONTH} %{MONTHDAY}, 20\\d{2} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) (?:AM|PM)\\b", "CATALINA_DATESTAMP", + Arrays.asList(" 11 1111 1 11 11", " 11 1111 11 11 11"), 0, 3), + new CandidateTimestampFormat(example -> Arrays.asList("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss"), + "\\b[A-Z]\\S{2} {1,2}\\d{1,2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", + "%{MONTH} +%{MONTHDAY} %{YEAR} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60)\\b", "CISCOTIMESTAMP", + Arrays.asList(" 11 1111 11 11 11", " 1 1111 11 11 11"), 0, 0), + new CandidateTimestampFormat(CandidateTimestampFormat::indeterminateDayMonthFormatFromExample, + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "\\b%{DATESTAMP}\\b", "DATESTAMP", + // In DATESTAMP the month may be 1 or 2 digits, but the day must be 2 + Arrays.asList("11 11 1111 11 11 11", "1 11 1111 11 11 11", "11 1 1111 11 11 11"), 0, 10), + new CandidateTimestampFormat(CandidateTimestampFormat::indeterminateDayMonthFormatFromExample, + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}\\b", "\\b%{DATE}\\b", "DATE", + // In DATE the month may be 1 or 2 digits, but the day must be 2 + Arrays.asList("11 11 1111", "11 1 1111", "1 11 1111"), 0, 0), + UNIX_MS_CANDIDATE_FORMAT, + UNIX_CANDIDATE_FORMAT, + TAI64N_CANDIDATE_FORMAT, + // This one is an ISO8601 date with no time, but the TIMESTAMP_ISO8601 Grok pattern doesn't cover it + new CandidateTimestampFormat(example -> Collections.singletonList("ISO8601"), + "\\b\\d{4}-\\d{2}-\\d{2}\\b", "\\b%{YEAR}-%{MONTHNUM2}-%{MONTHDAY}\\b", CUSTOM_TIMESTAMP_GROK_NAME, + "1111 11 11", 0, 0) ); - private TimestampFormatFinder() { + /** + * It is expected that the explanation will be shared with other code. + * Both this class and other classes will update it. + */ + private final List explanation; + private final boolean requireFullMatch; + private final boolean errorOnNoTimestamp; + private final boolean errorOnMultiplePatterns; + private final List orderedCandidateFormats; + private final TimeoutChecker timeoutChecker; + private final List matches; + // These two are not volatile because the class is explicitly not for use from multiple threads. + // But if it ever were to be made thread safe, making these volatile would be one required step. + private List matchedFormats; + private List cachedJavaTimestampFormats; + + /** + * Construct without any specific timestamp format override. + * @param explanation List of reasons for making decisions. May contain items when passed and new reasons + * can be appended by the methods of this class. + * @param requireFullMatch Must samples added to this object represent a timestamp in their entirety? + * @param errorOnNoTimestamp Should an exception be thrown if a sample is added that does not contain a recognised timestamp? + * @param errorOnMultiplePatterns Should an exception be thrown if samples are uploaded that require different Grok patterns? + * @param timeoutChecker Will abort the operation if its timeout is exceeded. + */ + public TimestampFormatFinder(List explanation, boolean requireFullMatch, boolean errorOnNoTimestamp, + boolean errorOnMultiplePatterns, TimeoutChecker timeoutChecker) { + this(explanation, null, requireFullMatch, errorOnNoTimestamp, errorOnMultiplePatterns, timeoutChecker); } /** - * Find the first timestamp format that matches part of the supplied value. - * @param text The value that the returned timestamp format must exist within. - * @param timeoutChecker Will abort the operation if its timeout is exceeded. - * @return The timestamp format, or null if none matches. + * Construct with a timestamp format override. + * @param explanation List of reasons for making decisions. May contain items when passed and new reasons + * can be appended by the methods of this class. + * @param overrideFormat A timestamp format that will take precedence when looking for timestamps. If null + * then the effect is to have no such override, i.e. equivalent to calling the other constructor. + * Timestamps will also be matched that have slightly different formats, but match the same Grok + * pattern as is implied by the override format. + * @param requireFullMatch Must samples added to this object represent a timestamp in their entirety? + * @param errorOnNoTimestamp Should an exception be thrown if a sample is added that does not contain a recognised timestamp? + * @param errorOnMultiplePatterns Should an exception be thrown if samples are uploaded that require different Grok patterns? + * @param timeoutChecker Will abort the operation if its timeout is exceeded. */ - public static TimestampMatch findFirstMatch(String text, TimeoutChecker timeoutChecker) { - return findFirstMatch(text, 0, timeoutChecker); + public TimestampFormatFinder(List explanation, @Nullable String overrideFormat, boolean requireFullMatch, + boolean errorOnNoTimestamp, boolean errorOnMultiplePatterns, TimeoutChecker timeoutChecker) { + this.explanation = Objects.requireNonNull(explanation); + this.requireFullMatch = requireFullMatch; + this.errorOnNoTimestamp = errorOnNoTimestamp; + this.errorOnMultiplePatterns = errorOnMultiplePatterns; + this.orderedCandidateFormats = (overrideFormat != null) + ? Collections.singletonList(makeCandidateFromOverrideFormat(overrideFormat, timeoutChecker)) + : ORDERED_CANDIDATE_FORMATS; + this.timeoutChecker = Objects.requireNonNull(timeoutChecker); + this.matches = new ArrayList<>(); + this.matchedFormats = new ArrayList<>(); } /** - * Find the first timestamp format that matches part of the supplied value. - * @param text The value that the returned timestamp format must exist within. - * @param requiredFormat A timestamp format that any returned match must support. - * @param timeoutChecker Will abort the operation if its timeout is exceeded. - * @return The timestamp format, or null if none matches. + * Convert a user supplied Java timestamp format to a Grok pattern and simple regular expression. + * @param overrideFormat A user supplied Java timestamp format. + * @return A tuple where the first value is a Grok pattern and the second is a simple regex. */ - public static TimestampMatch findFirstMatch(String text, String requiredFormat, TimeoutChecker timeoutChecker) { - return findFirstMatch(text, 0, requiredFormat, timeoutChecker); + static Tuple overrideFormatToGrokAndRegex(String overrideFormat) { + + if (overrideFormat.indexOf('\n') >= 0 || overrideFormat.indexOf('\r') >= 0) { + throw new IllegalArgumentException("Multi-line timestamp formats [" + overrideFormat + "] not supported"); + } + + if (overrideFormat.indexOf(INDETERMINATE_FIELD_PLACEHOLDER) >= 0) { + throw new IllegalArgumentException("Timestamp format [" + overrideFormat + "] not supported because it contains [" + + INDETERMINATE_FIELD_PLACEHOLDER + "]"); + } + + StringBuilder grokPatternBuilder = new StringBuilder(); + StringBuilder regexBuilder = new StringBuilder(); + + boolean notQuoted = true; + char prevChar = '\0'; + String prevLetterGroup = null; + int pos = 0; + while (pos < overrideFormat.length()) { + char curChar = overrideFormat.charAt(pos); + + if (curChar == '\'') { + notQuoted = !notQuoted; + } else if (notQuoted && Character.isLetter(curChar)) { + int startPos = pos; + int endPos = startPos + 1; + while (endPos < overrideFormat.length() && overrideFormat.charAt(endPos) == curChar) { + ++endPos; + ++pos; + } + String letterGroup = overrideFormat.substring(startPos, endPos); + Tuple grokPatternAndRegexForGroup = VALID_LETTER_GROUPS.get(letterGroup); + if (grokPatternAndRegexForGroup == null) { + // Special case of fractional seconds + if (curChar != 'S' || FRACTIONAL_SECOND_SEPARATORS.indexOf(prevChar) == -1 || + "ss".equals(prevLetterGroup) == false || endPos - startPos > 9) { + String msg = "Letter group [" + letterGroup + "] in [" + overrideFormat + "] is not supported"; + if (curChar == 'S') { + msg += " because it is not preceeded by [ss] and a separator from [" + FRACTIONAL_SECOND_SEPARATORS + "]"; + } + throw new IllegalArgumentException(msg); + } + // No need to append to the Grok pattern as %{SECOND} already allows for an optional + // fraction, but we need to remove the separator that's included in %{SECOND} + grokPatternBuilder.deleteCharAt(grokPatternBuilder.length() - 1); + regexBuilder.append("\\d{").append(endPos - startPos).append('}'); + } else { + grokPatternBuilder.append(grokPatternAndRegexForGroup.v1()); + if (regexBuilder.length() == 0) { + regexBuilder.append("\\b"); + } + regexBuilder.append(grokPatternAndRegexForGroup.v2()); + } + if (pos + 1 == overrideFormat.length()) { + regexBuilder.append("\\b"); + } + prevLetterGroup = letterGroup; + } else { + if (PUNCTUATION_THAT_NEEDS_ESCAPING_IN_REGEX.indexOf(curChar) >= 0) { + grokPatternBuilder.append('\\'); + regexBuilder.append('\\'); + } + grokPatternBuilder.append(curChar); + regexBuilder.append(curChar); + } + + prevChar = curChar; + ++pos; + } + + if (prevLetterGroup == null) { + throw new IllegalArgumentException("No time format letter groups in override format [" + overrideFormat + "]"); + } + + return new Tuple<>(grokPatternBuilder.toString(), regexBuilder.toString()); } /** - * Find the first timestamp format that matches part of the supplied value, - * excluding a specified number of candidate formats. - * @param text The value that the returned timestamp format must exist within. - * @param ignoreCandidates The number of candidate formats to exclude from the search. + * Given a user supplied Java timestamp format, return an appropriate candidate timestamp object as required by this class. + * The returned candidate might be a built-in one, or might be generated from the supplied format. + * @param overrideFormat A user supplied Java timestamp format. * @param timeoutChecker Will abort the operation if its timeout is exceeded. - * @return The timestamp format, or null if none matches. + * @return An appropriate candidate timestamp object. */ - public static TimestampMatch findFirstMatch(String text, int ignoreCandidates, TimeoutChecker timeoutChecker) { - return findFirstMatch(text, ignoreCandidates, null, timeoutChecker); + static CandidateTimestampFormat makeCandidateFromOverrideFormat(String overrideFormat, TimeoutChecker timeoutChecker) { + + // First check for a special format string + switch (overrideFormat.toUpperCase(Locale.ROOT)) { + case "ISO8601": + return ISO8601_CANDIDATE_FORMAT; + case "UNIX_MS": + return UNIX_MS_CANDIDATE_FORMAT; + case "UNIX": + return UNIX_CANDIDATE_FORMAT; + case "TAI64N": + return TAI64N_CANDIDATE_FORMAT; + } + + // Next check for a built-in candidate that incorporates the override, and prefer this + + // If the override is not a valid format then one or other of these two calls will + // throw, and that is how we'll report the invalid format to the user + Tuple grokPatternAndRegex = overrideFormatToGrokAndRegex(overrideFormat); + DateTimeFormatter javaTimeFormatter = DateTimeFormatter.ofPattern(overrideFormat, Locale.ROOT); + + // This timestamp (2001-02-03T04:05:06,123456789+0545) is chosen such that the month, day and hour all have just 1 digit. + // This means that it will distinguish between formats that do/don't output leading zeroes for month, day and hour. + // Additionally it has the full 9 digits of fractional second precision, to avoid the possibility of truncating the fraction. + String generatedTimestamp = javaTimeFormatter.withZone(ZoneOffset.ofHoursMinutesSeconds(5, 45, 0)) + .format(Instant.ofEpochMilli(981173106123L).plusNanos(456789L)); + for (CandidateTimestampFormat candidate : ORDERED_CANDIDATE_FORMATS) { + + TimestampMatch match = checkCandidate(candidate, generatedTimestamp, null, true, timeoutChecker); + if (match != null) { + return new CandidateTimestampFormat(example -> { + + // Modify the built-in candidate so it prefers to return the user supplied format + // if at all possible, and only falls back to standard logic for other situations + try { + // TODO consider support for overriding the locale too + // But since Grok only supports English and German date words ingest + // via Grok will fall down at an earlier stage for other languages... + javaTimeFormatter.parse(example); + return Collections.singletonList(overrideFormat); + } catch (DateTimeException e) { + return candidate.javaTimestampFormatSupplier.apply(example); + } + }, candidate.simplePattern.pattern(), candidate.strictGrokPattern, candidate.outputGrokPatternName); + } + } + + // None of the out-of-the-box formats were close, so use the built Grok pattern and simple regex for the override + return new CandidateTimestampFormat(example -> Collections.singletonList(overrideFormat), + grokPatternAndRegex.v2(), grokPatternAndRegex.v1(), CUSTOM_TIMESTAMP_GROK_NAME); } /** - * Find the first timestamp format that matches part of the supplied value, - * excluding a specified number of candidate formats. - * @param text The value that the returned timestamp format must exist within. - * @param ignoreCandidates The number of candidate formats to exclude from the search. - * @param requiredFormat A timestamp format that any returned match must support. - * @param timeoutChecker Will abort the operation if its timeout is exceeded. + * Find the first timestamp format that matches part or all of the supplied text. + * @param candidate The timestamp candidate to consider. + * @param text The text that the returned timestamp format must exist within. + * @param numberPosBitSet If not null, each bit must be set to true if and only if the + * corresponding position in {@code text} is a digit. + * @param requireFullMatch Does the candidate have to match the entire text? + * @param timeoutChecker Will abort the operation if its timeout is exceeded. * @return The timestamp format, or null if none matches. */ - public static TimestampMatch findFirstMatch(String text, int ignoreCandidates, String requiredFormat, TimeoutChecker timeoutChecker) { - if (ignoreCandidates >= ORDERED_CANDIDATE_FORMATS.size()) { - return null; - } - Boolean[] quickRuleoutMatches = new Boolean[QUICK_RULE_OUT_PATTERNS.size()]; - int index = ignoreCandidates; - String adjustedRequiredFormat = adjustRequiredFormat(requiredFormat); - for (CandidateTimestampFormat candidate : ORDERED_CANDIDATE_FORMATS.subList(ignoreCandidates, ORDERED_CANDIDATE_FORMATS.size())) { - if (adjustedRequiredFormat == null || candidate.jodaTimestampFormats.contains(adjustedRequiredFormat) || - candidate.javaTimestampFormats.contains(adjustedRequiredFormat)) { - boolean quicklyRuledOut = false; - for (Integer quickRuleOutIndex : candidate.quickRuleOutIndices) { - if (quickRuleoutMatches[quickRuleOutIndex] == null) { - quickRuleoutMatches[quickRuleOutIndex] = QUICK_RULE_OUT_PATTERNS.get(quickRuleOutIndex).matcher(text).find(); + private static TimestampMatch checkCandidate(CandidateTimestampFormat candidate, String text, @Nullable BitSet numberPosBitSet, + boolean requireFullMatch, TimeoutChecker timeoutChecker) { + if (requireFullMatch) { + Map captures = timeoutChecker.grokCaptures(candidate.strictFullMatchGrok, text, + "timestamp format determination"); + if (captures != null) { + return new TimestampMatch(candidate, "", text, ""); + } + } else { + // Since a search in a long string that has sections that nearly match will be very slow, it's + // worth doing an initial sanity check to see if the relative positions of digits necessary to + // get a match exist first + Tuple boundsForCandidate = findBoundsForCandidate(candidate, numberPosBitSet); + if (boundsForCandidate.v1() >= 0) { + assert boundsForCandidate.v2() > boundsForCandidate.v1(); + String matchIn = text.substring(boundsForCandidate.v1(), Math.min(boundsForCandidate.v2(), text.length())); + Map captures = timeoutChecker.grokCaptures(candidate.strictSearchGrok, matchIn, + "timestamp format determination"); + if (captures != null) { + StringBuilder prefaceBuilder = new StringBuilder(); + if (boundsForCandidate.v1() > 0) { + prefaceBuilder.append(text.subSequence(0, boundsForCandidate.v1())); + } + prefaceBuilder.append(captures.getOrDefault(PREFACE, "")); + StringBuilder epilogueBuilder = new StringBuilder(); + epilogueBuilder.append(captures.getOrDefault(EPILOGUE, "")); + if (boundsForCandidate.v2() < text.length()) { + epilogueBuilder.append(text.subSequence(boundsForCandidate.v2(), text.length())); } - if (quickRuleoutMatches[quickRuleOutIndex] == false) { - quicklyRuledOut = true; + return new TimestampMatch(candidate, prefaceBuilder.toString(), text.substring(prefaceBuilder.length(), + text.length() - epilogueBuilder.length()), epilogueBuilder.toString()); + } + } else { + timeoutChecker.check("timestamp format determination"); + } + } + + return null; + } + + /** + * Add a sample value to be considered by the format finder. If {@code requireFullMatch} was set to + * true on construction then the entire sample will be tested to see if it is a timestamp, + * otherwise a timestamp may be detected as just a portion of the sample. An exception will be thrown + * if {@code errorOnNoTimestamp} was set to true on construction, and no timestamp is + * found. An exception will also be thrown if {@code errorOnMultiplePatterns} was set to true + * on construction and a new timestamp format is detected that cannot be merged with a previously detected + * format. + * @param text The sample in which to detect a timestamp. + */ + public void addSample(String text) { + + BitSet numberPosBitSet = requireFullMatch ? null : stringToNumberPosBitSet(text); + + for (CandidateTimestampFormat candidate : orderedCandidateFormats) { + + TimestampMatch match = checkCandidate(candidate, text, numberPosBitSet, requireFullMatch, timeoutChecker); + if (match != null) { + TimestampFormat newFormat = match.timestampFormat; + boolean mustAdd = true; + for (int i = 0; i < matchedFormats.size(); ++i) { + TimestampFormat existingFormat = matchedFormats.get(i); + if (existingFormat.canMergeWith(newFormat)) { + matchedFormats.set(i, existingFormat.mergeWith(newFormat)); + mustAdd = false; + // Sharing formats considerably reduces the memory usage during the analysis + // when there are many samples, so reconstruct the match with a shared format + match = new TimestampMatch(match, matchedFormats.get(i)); break; } } - if (quicklyRuledOut == false) { - Map captures = timeoutChecker.grokCaptures(candidate.strictSearchGrok, text, - "timestamp format determination"); - if (captures != null) { - String preface = captures.getOrDefault(PREFACE, "").toString(); - String epilogue = captures.getOrDefault(EPILOGUE, "").toString(); - return makeTimestampMatch(candidate, index, preface, text.substring(preface.length(), - text.length() - epilogue.length()), epilogue); + if (mustAdd) { + if (errorOnMultiplePatterns && matchedFormats.isEmpty() == false) { + throw new IllegalArgumentException("Multiple timestamp formats found [" + + matchedFormats.get(0) + "] and [" + newFormat + "]"); } + matchedFormats.add(newFormat); } + + matches.add(match); + cachedJavaTimestampFormats = null; + return; } - ++index; } - return null; + + if (errorOnNoTimestamp) { + throw new IllegalArgumentException("No timestamp found in [" + text + "]"); + } } /** - * Find the best timestamp format for matching an entire field value. - * @param text The value that the returned timestamp format must match in its entirety. - * @param timeoutChecker Will abort the operation if its timeout is exceeded. - * @return The timestamp format, or null if none matches. + * Where multiple timestamp formats have been found, select the "best" one, whose details + * will then be returned by methods such as {@link #getGrokPatternName} and + * {@link #getJavaTimestampFormats}. If fewer than two timestamp formats have been found + * then this method does nothing. */ - public static TimestampMatch findFirstFullMatch(String text, TimeoutChecker timeoutChecker) { - return findFirstFullMatch(text, 0, timeoutChecker); + public void selectBestMatch() { + + if (matchedFormats.size() < 2) { + // Nothing to do + return; + } + + double[] weights = calculateMatchWeights(); + timeoutChecker.check("timestamp format determination"); + int highestWeightFormatIndex = findHighestWeightIndex(weights); + timeoutChecker.check("timestamp format determination"); + selectHighestWeightFormat(highestWeightFormatIndex); } /** - * Find the best timestamp format for matching an entire field value. - * @param text The value that the returned timestamp format must match in its entirety. - * @param requiredFormat A timestamp format that any returned match must support. - * @param timeoutChecker Will abort the operation if its timeout is exceeded. - * @return The timestamp format, or null if none matches. + * For each matched format, calculate a weight that can be used to decide which match is best. The + * weight for each matched format is the sum of the weights for all matches that have that format. + * @return An array of weights. There is one entry in the array for each entry in {@link #matchedFormats}, + * in the same order as the entries in {@link #matchedFormats}. */ - public static TimestampMatch findFirstFullMatch(String text, String requiredFormat, TimeoutChecker timeoutChecker) { - return findFirstFullMatch(text, 0, requiredFormat, timeoutChecker); + private double[] calculateMatchWeights() { + + int remainingMatches = matches.size(); + double[] weights = new double[matchedFormats.size()]; + for (TimestampMatch match : matches) { + + for (int matchedFormatIndex = 0; matchedFormatIndex < matchedFormats.size(); ++matchedFormatIndex) { + if (matchedFormats.get(matchedFormatIndex).canMergeWith(match.timestampFormat)) { + weights[matchedFormatIndex] += weightForMatch(match.preface); + break; + } + ++matchedFormatIndex; + } + + // The highest possible weight is 1, so if the difference between the two highest weights + // is less than the number of lines remaining then the leader cannot possibly be overtaken + if (findDifferenceBetweenTwoHighestWeights(weights) > --remainingMatches) { + break; + } + } + + return weights; } /** - * Find the best timestamp format for matching an entire field value, - * excluding a specified number of candidate formats. - * @param text The value that the returned timestamp format must match in its entirety. - * @param ignoreCandidates The number of candidate formats to exclude from the search. - * @param timeoutChecker Will abort the operation if its timeout is exceeded. - * @return The timestamp format, or null if none matches. + * Used to weight a timestamp match according to how far along the line it is found. + * Timestamps at the very beginning of the line are given a weight of 1. The weight + * progressively decreases the more text there is preceding the timestamp match, but + * is always greater than 0. + * @return A weight in the range (0, 1]. */ - public static TimestampMatch findFirstFullMatch(String text, int ignoreCandidates, TimeoutChecker timeoutChecker) { - return findFirstFullMatch(text, ignoreCandidates, null, timeoutChecker); + private static double weightForMatch(String preface) { + return Math.pow(1.0 + preface.length() / 15.0, -1.1); } /** - * Find the best timestamp format for matching an entire field value, - * excluding a specified number of candidate formats. - * @param text The value that the returned timestamp format must match in its entirety. - * @param ignoreCandidates The number of candidate formats to exclude from the search. - * @param requiredFormat A timestamp format that any returned match must support. - * @param timeoutChecker Will abort the operation if its timeout is exceeded. - * @return The timestamp format, or null if none matches. + * Given an array of weights, find the difference between the two highest values. + * @param weights Array of weights. Must have at least two elements. + * @return The difference between the two highest values. */ - public static TimestampMatch findFirstFullMatch(String text, int ignoreCandidates, String requiredFormat, - TimeoutChecker timeoutChecker) { - if (ignoreCandidates >= ORDERED_CANDIDATE_FORMATS.size()) { + private static double findDifferenceBetweenTwoHighestWeights(double[] weights) { + assert weights.length >= 2; + + double highestWeight = 0.0; + double secondHighestWeight = 0.0; + for (double weight : weights) { + if (weight > highestWeight) { + secondHighestWeight = highestWeight; + highestWeight = weight; + } else if (weight > secondHighestWeight) { + secondHighestWeight = weight; + } + } + return highestWeight - secondHighestWeight; + } + + /** + * Given an array of weights, find the index with the highest weight. + * @param weights Array of weights. + * @return The index of the element with the highest weight. + */ + private static int findHighestWeightIndex(double[] weights) { + + double highestWeight = Double.NEGATIVE_INFINITY; + int highestWeightFormatIndex = -1; + for (int index = 0; index < weights.length; ++index) { + double weight = weights[index]; + if (weight > highestWeight) { + highestWeight = weight; + highestWeightFormatIndex = index; + } + } + + return highestWeightFormatIndex; + } + + /** + * Ensure the highest weight matched format is at the beginning of the list of matched formats. + * @param highestWeightFormatIndex The index of the matched format with the highest weight. + */ + private void selectHighestWeightFormat(int highestWeightFormatIndex) { + + assert highestWeightFormatIndex >= 0; + // If the selected format is already at the beginning of the list there's nothing to do + if (highestWeightFormatIndex == 0) { + return; + } + + cachedJavaTimestampFormats = null; + List newMatchedFormats = new ArrayList<>(matchedFormats); + // Swap the selected format with the one that's currently at the beginning of the list + newMatchedFormats.set(0, matchedFormats.get(highestWeightFormatIndex)); + newMatchedFormats.set(highestWeightFormatIndex, matchedFormats.get(0)); + matchedFormats = newMatchedFormats; + } + + /** + * How many different timestamp formats have been matched in the supplied samples? + * @return The number of different timestamp formats that have been matched in the supplied samples. + */ + public int getNumMatchedFormats() { + return matchedFormats.size(); + } + + /** + * Get the Grok pattern name that corresponds to the selected timestamp format. + * @return The Grok pattern name that corresponds to the selected timestamp format. + */ + public String getGrokPatternName() { + if (matchedFormats.isEmpty()) { + // If errorOnNoTimestamp is set and we get here it means no samples have been added, which is likely a programmer mistake + assert errorOnNoTimestamp == false; return null; } - int index = ignoreCandidates; - String adjustedRequiredFormat = adjustRequiredFormat(requiredFormat); - for (CandidateTimestampFormat candidate : ORDERED_CANDIDATE_FORMATS.subList(ignoreCandidates, ORDERED_CANDIDATE_FORMATS.size())) { - if (adjustedRequiredFormat == null || candidate.jodaTimestampFormats.contains(adjustedRequiredFormat) || - candidate.javaTimestampFormats.contains(adjustedRequiredFormat)) { - Map captures = timeoutChecker.grokCaptures(candidate.strictFullMatchGrok, text, - "timestamp format determination"); - if (captures != null) { - return makeTimestampMatch(candidate, index, "", text, ""); + return matchedFormats.get(0).grokPatternName; + } + + /** + * Get the custom Grok pattern definitions derived from the override format, if any. + * @return The custom Grok pattern definitions for the selected timestamp format. + * If there are none an empty map is returned. + */ + public Map getCustomGrokPatternDefinitions() { + if (matchedFormats.isEmpty()) { + // If errorOnNoTimestamp is set and we get here it means no samples have been added, which is likely a programmer mistake + assert errorOnNoTimestamp == false; + return Collections.emptyMap(); + } + return matchedFormats.get(0).customGrokPatternDefinitions; + } + + /** + * Of all the samples added that correspond to the selected format, return + * the portion of the sample that comes before the timestamp. + * @return A list of prefaces from samples that match the selected timestamp format. + */ + public List getPrefaces() { + if (matchedFormats.isEmpty()) { + // If errorOnNoTimestamp is set and we get here it means no samples have been added, which is likely a programmer mistake + assert errorOnNoTimestamp == false; + return Collections.emptyList(); + } + return matches.stream().filter(match -> matchedFormats.size() < 2 || matchedFormats.get(0).canMergeWith(match.timestampFormat)) + .map(match -> match.preface).collect(Collectors.toList()); + } + + /** + * Get the simple regular expression that can be used to identify timestamps + * of the selected format in almost any programming language. + * @return A {@link Pattern} that will match timestamps of the selected format. + */ + public Pattern getSimplePattern() { + if (matchedFormats.isEmpty()) { + // If errorOnNoTimestamp is set and we get here it means no samples have been added, which is likely a programmer mistake + assert errorOnNoTimestamp == false; + return null; + } + return matchedFormats.get(0).simplePattern; + } + + /** + * These are similar to Java timestamp formats but may contain indeterminate day/month + * placeholders if the order of day and month is uncertain. + * @return A list of Java timestamp formats possibly containing indeterminate day/month placeholders. + */ + public List getRawJavaTimestampFormats() { + if (matchedFormats.isEmpty()) { + // If errorOnNoTimestamp is set and we get here it means no samples have been added, which is likely a programmer mistake + assert errorOnNoTimestamp == false; + return Collections.emptyList(); + } + return matchedFormats.get(0).rawJavaTimestampFormats; + } + + /** + * These are used by ingest pipeline and index mappings. + * @return A list of Java timestamp formats to use for parsing documents. + */ + public List getJavaTimestampFormats() { + if (cachedJavaTimestampFormats != null) { + return cachedJavaTimestampFormats; + } + return determiniseJavaTimestampFormats(getRawJavaTimestampFormats(), + // With multiple formats, only consider the matches that correspond to the first + // in the list (which is what we're returning information about via the getters). + // With just one format it's most efficient not to bother checking formats. + (matchedFormats.size() > 1) ? matchedFormats.get(0) : null); + } + + /** + * Given a list of timestamp formats that might contain indeterminate day/month parts, + * return the corresponding pattern with the placeholders replaced with concrete + * day/month formats. + */ + private List determiniseJavaTimestampFormats(List rawJavaTimestampFormats, + @Nullable TimestampFormat onlyConsiderFormat) { + + // This method needs rework if the class is ever made thread safe + + if (rawJavaTimestampFormats.stream().anyMatch(format -> format.indexOf(INDETERMINATE_FIELD_PLACEHOLDER) >= 0)) { + boolean isDayFirst = guessIsDayFirst(rawJavaTimestampFormats, onlyConsiderFormat, Locale.getDefault()); + cachedJavaTimestampFormats = rawJavaTimestampFormats.stream() + .map(format -> determiniseJavaTimestampFormat(format, isDayFirst)).collect(Collectors.toList()); + } else { + cachedJavaTimestampFormats = rawJavaTimestampFormats; + } + return cachedJavaTimestampFormats; + } + + /** + * If timestamp formats where the order of day and month could vary (as in a choice between dd/MM/yyyy + * or MM/dd/yyyy for example), make a guess about whether the day comes first. + * @return true if the day comes first and false if the month comes first. + */ + private boolean guessIsDayFirst(List rawJavaTimestampFormats, @Nullable TimestampFormat onlyConsiderFormat, + Locale localeForFallback) { + + Boolean isDayFirst = guessIsDayFirstFromFormats(rawJavaTimestampFormats); + if (isDayFirst != null) { + return isDayFirst; + } + isDayFirst = guessIsDayFirstFromMatches(onlyConsiderFormat); + if (isDayFirst != null) { + return isDayFirst; + } + return guessIsDayFirstFromLocale(localeForFallback); + } + + /** + * If timestamp formats where the order of day and month could vary (as in a choice between dd/MM/yyyy + * or MM/dd/yyyy for example), make a guess about whether the day comes first based on quirks of the + * built-in Grok patterns. + * @return true if the day comes first, false if the month comes first, and + * null if there is insufficient evidence to decide. + */ + Boolean guessIsDayFirstFromFormats(List rawJavaTimestampFormats) { + + Boolean isDayFirst = null; + + for (String rawJavaTimestampFormat : rawJavaTimestampFormats) { + Matcher matcher = INDETERMINATE_FORMAT_INTERPRETER.matcher(rawJavaTimestampFormat); + if (matcher.matches()) { + String firstNumber = matcher.group(2); + assert firstNumber != null; + String secondNumber = matcher.group(4); + if (secondNumber == null) { + return null; + } + if (firstNumber.length() == 2 && secondNumber.length() == 1) { + if (Boolean.FALSE.equals(isDayFirst)) { + // Inconsistency + return null; + } + isDayFirst = Boolean.TRUE; } + if (firstNumber.length() == 1 && secondNumber.length() == 2) { + if (Boolean.TRUE.equals(isDayFirst)) { + // Inconsistency + return null; + } + isDayFirst = Boolean.FALSE; + } + } + } + + if (isDayFirst != null) { + if (isDayFirst) { + explanation.add("Guessing day precedes month in timestamps as all detected formats have a two digits in the first number " + + "and a single digit in the second number which is what the %{MONTHDAY} and %{MONTHNUM} Grok patterns permit"); + } else { + explanation.add("Guessing month precedes day in timestamps as all detected formats have a single digit in the first number " + + "and two digits in the second number which is what the %{MONTHNUM} and %{MONTHDAY} Grok patterns permit"); } - ++index; } + + return isDayFirst; + } + + /** + * If timestamp formats where the order of day and month could vary (as in a choice between dd/MM/yyyy + * or MM/dd/yyyy for example), make a guess about whether the day comes first based on observed values + * of the first and second numbers. + * @return true if the day comes first, false if the month comes first, and + * null if there is insufficient evidence to decide. + */ + Boolean guessIsDayFirstFromMatches(@Nullable TimestampFormat onlyConsiderFormat) { + + BitSet firstIndeterminateNumbers = new BitSet(); + BitSet secondIndeterminateNumbers = new BitSet(); + + for (TimestampMatch match : matches) { + + if (onlyConsiderFormat == null || onlyConsiderFormat.canMergeWith(match.timestampFormat)) { + + // Valid indeterminate day/month numbers will be in the range 1 to 31. + // -1 is used to mean "not present", and we ignore that here. + + if (match.firstIndeterminateDateNumber > 0) { + assert match.firstIndeterminateDateNumber <= 31; + if (match.firstIndeterminateDateNumber > 12) { + explanation.add("Guessing day precedes month in timestamps as one sample had first number [" + + match.firstIndeterminateDateNumber + "]"); + return Boolean.TRUE; + } + firstIndeterminateNumbers.set(match.firstIndeterminateDateNumber); + } + if (match.secondIndeterminateDateNumber > 0) { + assert match.secondIndeterminateDateNumber <= 31; + if (match.secondIndeterminateDateNumber > 12) { + explanation.add("Guessing month precedes day in timestamps as one sample had second number [" + + match.secondIndeterminateDateNumber + "]"); + return Boolean.FALSE; + } + secondIndeterminateNumbers.set(match.secondIndeterminateDateNumber); + } + } + } + + // If there are many more values of one number than the other then assume that's the day + final int ratioForResult = 3; + int firstCardinality = firstIndeterminateNumbers.cardinality(); + int secondCardinality = secondIndeterminateNumbers.cardinality(); + if (secondCardinality == 0) { + // This happens in the following cases: + // - No indeterminate numbers (in which case the answer is irrelevant) + // - Only one indeterminate number (in which case we favour month over day) + return Boolean.FALSE; + } + // firstCardinality can be 0, but then secondCardinality should have been 0 too + assert firstCardinality > 0; + if (firstCardinality >= ratioForResult * secondCardinality) { + explanation.add("Guessing day precedes month in timestamps as there were [" + + firstCardinality + "] distinct values of the first number but only [" + secondCardinality + "] for the second"); + return Boolean.TRUE; + } + if (secondCardinality >= ratioForResult * firstCardinality) { + explanation.add("Guessing month precedes day in timestamps as there " + (firstCardinality == 1 ? "was" : "were") + " only [" + + firstCardinality + "] distinct " + (firstCardinality == 1 ? "value" : "values") + + " of the first number but [" + secondCardinality + "] for the second"); + return Boolean.FALSE; + } + return null; } /** - * If a required timestamp format contains a fractional seconds component, adjust it to the - * fractional seconds format that's in the candidate timestamp formats, i.e. ",SSS". So, for - * example, "YYYY-MM-dd HH:mm:ss.SSSSSSSSS Z" would get adjusted to "YYYY-MM-dd HH:mm:ss,SSS Z". + * If timestamp formats where the order of day and month could vary (as in a choice between dd/MM/yyyy + * or MM/dd/yyyy for example), make a guess about whether the day comes first based on the default order + * for a given locale. + * @return true if the day comes first and false if the month comes first. */ - static String adjustRequiredFormat(String requiredFormat) { + boolean guessIsDayFirstFromLocale(Locale locale) { - return (requiredFormat == null) ? null : - FRACTIONAL_SECOND_TIMESTAMP_FORMAT_PATTERN.matcher(requiredFormat).replaceFirst(DEFAULT_FRACTIONAL_SECOND_FORMAT); + // Fall back to whether the day comes before the month in the default short date format for the server locale. + // Can't use 1 as that occurs in 1970, so 3rd Feb is the earliest date that will reveal the server default. + String feb3rd1970 = makeShortLocalizedDateTimeFormatterForLocale(locale).format(LocalDate.ofEpochDay(33)); + if (feb3rd1970.indexOf('3') < feb3rd1970.indexOf('2')) { + explanation.add("Guessing day precedes month in timestamps based on server locale [" + + locale.getDisplayName(Locale.ROOT) + "]"); + return true; + } else { + explanation.add("Guessing month precedes day in timestamps based on server locale [" + + locale.getDisplayName(Locale.ROOT) + "]"); + return false; + } } - private static TimestampMatch makeTimestampMatch(CandidateTimestampFormat chosenTimestampFormat, int chosenIndex, - String preface, String matchedDate, String epilogue) { - Tuple fractionalSecondsInterpretation = interpretFractionalSeconds(matchedDate); - List jodaTimestampFormats = chosenTimestampFormat.jodaTimestampFormats; - List javaTimestampFormats = chosenTimestampFormat.javaTimestampFormats; - Pattern simplePattern = chosenTimestampFormat.simplePattern; - char separator = fractionalSecondsInterpretation.v1(); - if (separator != DEFAULT_FRACTIONAL_SECOND_SEPARATOR) { - jodaTimestampFormats = jodaTimestampFormats.stream() - .map(jodaTimestampFormat -> jodaTimestampFormat.replace(DEFAULT_FRACTIONAL_SECOND_SEPARATOR, separator)) - .collect(Collectors.toList()); - javaTimestampFormats = javaTimestampFormats.stream() - .map(javaTimestampFormat -> javaTimestampFormat.replace(DEFAULT_FRACTIONAL_SECOND_SEPARATOR, separator)) - .collect(Collectors.toList()); - if (jodaTimestampFormats.stream().noneMatch(jodaTimestampFormat -> jodaTimestampFormat.startsWith("UNIX"))) { - String patternStr = simplePattern.pattern(); - int separatorPos = patternStr.lastIndexOf(DEFAULT_FRACTIONAL_SECOND_SEPARATOR); - if (separatorPos >= 0) { - StringBuilder newPatternStr = new StringBuilder(patternStr); - newPatternStr.replace(separatorPos, separatorPos + 1, ((separator == '.') ? "\\" : "") + separator); - simplePattern = Pattern.compile(newPatternStr.toString()); + @SuppressForbidden(reason = "DateTimeFormatter.ofLocalizedDate() is forbidden because it uses the default locale, " + + "but here we are explicitly setting the locale on the formatter in a subsequent call") + private static DateTimeFormatter makeShortLocalizedDateTimeFormatterForLocale(Locale locale) { + return DateTimeFormatter.ofLocalizedDate(FormatStyle.SHORT).withLocale(locale).withZone(ZoneOffset.UTC); + } + + /** + * Given a raw timestamp format that might contain indeterminate day/month parts, + * return the corresponding pattern with the placeholders replaced with concrete + * day/month formats. + */ + static String determiniseJavaTimestampFormat(String rawJavaTimestampFormat, boolean isDayFirst) { + + Matcher matcher = INDETERMINATE_FORMAT_INTERPRETER.matcher(rawJavaTimestampFormat); + if (matcher.matches()) { + StringBuilder builder = new StringBuilder(); + for (int groupNum = 1; groupNum <= matcher.groupCount(); ++groupNum) { + switch (groupNum) { + case 2: { + char formatChar = isDayFirst ? 'd' : 'M'; + for (int count = matcher.group(groupNum).length(); count > 0; --count) { + builder.append(formatChar); + } + break; + } + case 4: { + char formatChar = isDayFirst ? 'M' : 'd'; + for (int count = matcher.group(groupNum).length(); count > 0; --count) { + builder.append(formatChar); + } + break; + } + default: + builder.append(matcher.group(groupNum)); + break; } } + return builder.toString(); + } else { + return rawJavaTimestampFormat; } - int numberOfDigitsInFractionalComponent = fractionalSecondsInterpretation.v2(); - if (numberOfDigitsInFractionalComponent > 3) { - String fractionalSecondsFormat = "SSSSSSSSS".substring(0, numberOfDigitsInFractionalComponent); - jodaTimestampFormats = jodaTimestampFormats.stream() - .map(jodaTimestampFormat -> jodaTimestampFormat.replace("SSS", fractionalSecondsFormat)) - .collect(Collectors.toList()); - javaTimestampFormats = javaTimestampFormats.stream() - .map(javaTimestampFormat -> javaTimestampFormat.replace("SSS", fractionalSecondsFormat)) - .collect(Collectors.toList()); + } + + /** + * These are still used by Logstash. + * @return A list of Joda timestamp formats that correspond to the detected Java timestamp formats. + */ + public List getJodaTimestampFormats() { + List javaTimestampFormats = getJavaTimestampFormats(); + return (javaTimestampFormats == null) ? null : javaTimestampFormats.stream() + .map(format -> format.replace("yy", "YY").replace("XXX", "ZZ").replace("XX", "Z")).collect(Collectors.toList()); + } + + /** + * Does the parsing the timestamp produce different results depending on the timezone of the parser? + * I.e., does the textual representation NOT define the timezone? + */ + public boolean hasTimezoneDependentParsing() { + if (matchedFormats.isEmpty()) { + // If errorOnNoTimestamp is set and we get here it means no samples have been added, which is likely a programmer mistake + assert errorOnNoTimestamp == false; + return false; } - return new TimestampMatch(chosenIndex, preface, jodaTimestampFormats, javaTimestampFormats, simplePattern, - chosenTimestampFormat.standardGrokPatternName, epilogue); + return matches.stream().filter(match -> matchedFormats.size() < 2 || matchedFormats.get(0).canMergeWith(match.timestampFormat)) + .anyMatch(match -> match.hasTimezoneDependentParsing); } /** - * Interpret the fractional seconds component of a date to determine two things: - * 1. The separator character - one of colon, comma and dot. - * 2. The number of digits in the fractional component. - * @param date The textual representation of the date for which fractional seconds are to be interpreted. - * @return A tuple of (fractional second separator character, number of digits in fractional component). + * Sometimes Elasticsearch mappings for dates need to include the format. + * This method returns appropriate mappings settings: at minimum "type" : "date", + * and possibly also a "format" setting. */ - static Tuple interpretFractionalSeconds(String date) { + public Map getEsDateMappingTypeWithFormat() { + List javaTimestampFormats = getJavaTimestampFormats(); + if (javaTimestampFormats.contains("TAI64N")) { + // There's no format for TAI64N in the timestamp formats used in mappings + return Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"); + } + Map mapping = new LinkedHashMap<>(); + mapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); + String formats = javaTimestampFormats.stream().map(format -> { + switch (format) { + case "ISO8601": + return "iso8601"; + case "UNIX_MS": + return "epoch_millis"; + case "UNIX": + return "epoch_second"; + default: + return format; + } + }).collect(Collectors.joining("||")); + if (formats.isEmpty() == false) { + mapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, formats); + } + return mapping; + } - Matcher matcher = FRACTIONAL_SECOND_INTERPRETER.matcher(date); - if (matcher.find()) { - return new Tuple<>(matcher.group(1).charAt(0), matcher.group(2).length()); + /** + * Given a timestamp candidate and a bit set showing the positions of digits in a piece of text, find the range + * of indices over which the candidate might possibly match. Searching for Grok patterns that nearly + * match but don't quite is very expensive, so this method allows only a substring of a long string to be + * searched using the full Grok pattern. + * @param candidate The timestamp candidate to consider. + * @param numberPosBitSet If not null, each bit must be set to true if and only if the + * corresponding position in the original text is a digit. + * @return A tuple of the form (start index, end index). If the timestamp candidate cannot possibly match + * anywhere then (-1, -1) is returned. The end index in the returned tuple may be beyond the end of the + * string (because the bit set is not necessarily the same length as the string so it cannot be capped + * by this method), so the caller must cap it before passing to {@link String#substring(int, int)}. + */ + static Tuple findBoundsForCandidate(CandidateTimestampFormat candidate, BitSet numberPosBitSet) { + + if (numberPosBitSet == null || candidate.quickRuleOutBitSets.isEmpty()) { + return new Tuple<>(0, Integer.MAX_VALUE); } - return new Tuple<>(DEFAULT_FRACTIONAL_SECOND_SEPARATOR, 0); + int minFirstMatchStart = -1; + int maxLastMatchEnd = -1; + for (BitSet quickRuleOutBitSet : candidate.quickRuleOutBitSets) { + int currentMatch = findBitPattern(numberPosBitSet, 0, quickRuleOutBitSet); + if (currentMatch >= 0) { + if (minFirstMatchStart == -1 || currentMatch < minFirstMatchStart) { + minFirstMatchStart = currentMatch; + } + do { + int currentMatchEnd = currentMatch + quickRuleOutBitSet.length(); + if (currentMatchEnd > maxLastMatchEnd) { + maxLastMatchEnd = currentMatchEnd; + } + currentMatch = findBitPattern(numberPosBitSet, currentMatch + 1, quickRuleOutBitSet); + } while (currentMatch > 0); + } + } + if (minFirstMatchStart == -1) { + assert maxLastMatchEnd == -1; + return new Tuple<>(-1, -1); + } + int lowerBound = Math.max(0, minFirstMatchStart - candidate.maxCharsBeforeQuickRuleOutMatch); + int upperBound = (Integer.MAX_VALUE - candidate.maxCharsAfterQuickRuleOutMatch - maxLastMatchEnd < 0) ? + Integer.MAX_VALUE : (maxLastMatchEnd + candidate.maxCharsAfterQuickRuleOutMatch); + return new Tuple<>(lowerBound, upperBound); } /** - * Represents a timestamp that has matched a field value or been found within a message. + * This is basically the "Shift-Add" algorithm for string matching from the paper "A New Approach to Text Searching". + * In this case the "alphabet" has just two "characters": 0 and 1 (or false and true in + * some places because of the {@link BitSet} interface). + * @see A New Approach to Text Searching + * @param findIn The binary string to search in; "text" in the terminology of the paper. + * @param beginIndex The index to start searching {@code findIn}. + * @param toFind The binary string to find; "pattern" in the terminology of the paper. + * @return The index (starting from 0) of the first match of {@code toFind} in {@code findIn}, or -1 if no match is found. */ - public static final class TimestampMatch { + static int findBitPattern(BitSet findIn, int beginIndex, BitSet toFind) { - /** - * The index of the corresponding entry in the ORDERED_CANDIDATE_FORMATS list. - */ - public final int candidateIndex; + assert beginIndex >= 0; - /** - * Text that came before the timestamp in the matched field/message. - */ - public final String preface; + // Note that this only compares up to the highest bit that is set, so trailing non digit characters will not participate + // in the comparison. This is not currently a problem for this class, but is something to consider if this functionality + // is ever reused elsewhere. The solution would be to use a wrapper class containing a BitSet and a separate int to store + // the length to compare. + int toFindLength = toFind.length(); + int findInLength = findIn.length(); + if (toFindLength == 0) { + return beginIndex; + } + // 63 here is the largest bit position (starting from 0) in a long + if (toFindLength > Math.min(63, findInLength)) { + // Since we control the input we should avoid the situation + // where the pattern to find has more bits than a single long + assert toFindLength <= 63 : "Length to find was [" + toFindLength + "] - cannot be greater than 63"; + return -1; + } + // ~1L means all bits set except the least significant + long state = ~1L; + // This array has one entry per "character" in the "alphabet" (which for this method consists of just 0 and 1) + // ~0L means all bits set + long[] toFindMask = { ~0L, ~0L }; + for (int i = 0; i < toFindLength; ++i) { + toFindMask[toFind.get(i) ? 1 : 0] &= ~(1L << i); + } + for (int i = beginIndex; i < findInLength; ++i) { + state |= toFindMask[findIn.get(i) ? 1 : 0]; + state <<= 1; + if ((state & (1L << toFindLength)) == 0L) { + return i - toFindLength + 1; + } + } - /** - * Time format specifier(s) that will work with Logstash and Ingest pipeline date parsers. - */ - public final List jodaTimestampFormats; + return -1; + } + + /** + * Converts a string into a {@link BitSet} with one bit per character of the string and bits + * set to 1 if the corresponding character in the string is a digit and 0 if not. (The first + * character of the string corresponds to the least significant bit in the {@link BitSet}, so + * if the {@link BitSet} is printed in natural order it will be reversed compared to the input, + * and then the most significant bit will be printed first. However, in terms of random access + * to individual characters/bits, this "reversal" is by far the most intuitive representation.) + * @param str The string to be mapped. + * @return A {@link BitSet} suitable for use as input to {@link #findBitPattern}. + */ + static BitSet stringToNumberPosBitSet(String str) { + + BitSet result = new BitSet(); + for (int index = 0; index < str.length(); ++index) { + if (Character.isDigit(str.charAt(index))) { + result.set(index); + } + } + return result; + } + + /** + * Represents an overall format matched within the supplied samples. + * Similar {@link TimestampFormat}s can be merged when they can be + * recognised by the same Grok pattern, simple regular expression, and + * punctuation in the preface, but have different Java timestamp formats. + * + * Objects are immutable. Merges that result in changes return new + * objects. + */ + static final class TimestampFormat { /** - * Time format specifier(s) that will work with Logstash and Ingest pipeline date parsers. + * Java time formats that may contain indeterminate day/month patterns. */ - public final List javaTimestampFormats; + final List rawJavaTimestampFormats; /** * A simple regex that will work in many languages to detect whether the timestamp format * exists in a particular line. */ - public final Pattern simplePattern; + final Pattern simplePattern; /** - * Name of an out-of-the-box Grok pattern that will match the timestamp. + * Name of a Grok pattern that will match the timestamp. */ - public final String grokPatternName; + final String grokPatternName; /** - * Text that came after the timestamp in the matched field/message. + * If {@link #grokPatternName} is not an out-of-the-box Grok pattern, then its definition. + */ + final Map customGrokPatternDefinitions; + + /** + * The punctuation characters in the text preceeding the timestamp in the samples. */ - public final String epilogue; + final String prefacePunctuation; + + TimestampFormat(List rawJavaTimestampFormats, Pattern simplePattern, String grokPatternName, + Map customGrokPatternDefinitions, String prefacePunctuation) { + this.rawJavaTimestampFormats = Collections.unmodifiableList(rawJavaTimestampFormats); + this.simplePattern = Objects.requireNonNull(simplePattern); + this.grokPatternName = Objects.requireNonNull(grokPatternName); + this.customGrokPatternDefinitions = Objects.requireNonNull(customGrokPatternDefinitions); + this.prefacePunctuation = prefacePunctuation; + } + + boolean canMergeWith(TimestampFormat other) { + + if (this == other) { + return true; + } - TimestampMatch(int candidateIndex, String preface, String jodaTimestampFormat, String javaTimestampFormat, String simpleRegex, - String grokPatternName, String epilogue) { - this(candidateIndex, preface, Collections.singletonList(jodaTimestampFormat), Collections.singletonList(javaTimestampFormat), - simpleRegex, grokPatternName, epilogue); + return other != null && + this.simplePattern.pattern().equals(other.simplePattern.pattern()) && + this.grokPatternName.equals(other.grokPatternName) && + Objects.equals(this.customGrokPatternDefinitions, other.customGrokPatternDefinitions) && + this.prefacePunctuation.equals(other.prefacePunctuation); } - TimestampMatch(int candidateIndex, String preface, List jodaTimestampFormats, List javaTimestampFormats, - String simpleRegex, String grokPatternName, String epilogue) { - this(candidateIndex, preface, jodaTimestampFormats, javaTimestampFormats, Pattern.compile(simpleRegex), grokPatternName, - epilogue); + TimestampFormat mergeWith(TimestampFormat other) { + + if (canMergeWith(other)) { + if (rawJavaTimestampFormats.equals(other.rawJavaTimestampFormats) == false) { + // Do the merge like this to preserve ordering + Set mergedJavaTimestampFormats = new LinkedHashSet<>(rawJavaTimestampFormats); + if (mergedJavaTimestampFormats.addAll(other.rawJavaTimestampFormats)) { + return new TimestampFormat(new ArrayList<>(mergedJavaTimestampFormats), simplePattern, grokPatternName, + customGrokPatternDefinitions, prefacePunctuation); + } + } + // The merged format is exactly the same as this format, so there's no need to create a new object + return this; + } + + throw new IllegalArgumentException("Cannot merge timestamp format [" + this + "] with [" + other + "]"); + } + + @Override + public int hashCode() { + return Objects.hash(rawJavaTimestampFormats, simplePattern.pattern(), grokPatternName, customGrokPatternDefinitions, + prefacePunctuation); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + + TimestampFormat that = (TimestampFormat) other; + return Objects.equals(this.rawJavaTimestampFormats, that.rawJavaTimestampFormats) && + Objects.equals(this.simplePattern.pattern(), that.simplePattern.pattern()) && + Objects.equals(this.grokPatternName, that.grokPatternName) && + Objects.equals(this.customGrokPatternDefinitions, that.customGrokPatternDefinitions) && + Objects.equals(this.prefacePunctuation, that.prefacePunctuation); } - TimestampMatch(int candidateIndex, String preface, List jodaTimestampFormats, List javaTimestampFormats, - Pattern simplePattern, String grokPatternName, - String epilogue) { - this.candidateIndex = candidateIndex; - this.preface = preface; - this.jodaTimestampFormats = Collections.unmodifiableList(jodaTimestampFormats); - this.javaTimestampFormats = Collections.unmodifiableList(javaTimestampFormats); - this.simplePattern = simplePattern; - this.grokPatternName = grokPatternName; - this.epilogue = epilogue; + @Override + public String toString() { + return "Java timestamp formats = " + rawJavaTimestampFormats.stream().collect(Collectors.joining("', '", "[ '", "' ]")) + + ", simple pattern = '" + simplePattern.pattern() + "', grok pattern = '" + grokPatternName + "'" + + (customGrokPatternDefinitions.isEmpty() ? "" : ", custom grok pattern definitions = " + customGrokPatternDefinitions) + + ", preface punctuation = '" + prefacePunctuation + "'"; } + } + + /** + * Represents one match of a timestamp in one added sample. + */ + static final class TimestampMatch { + + // This picks out punctuation that is likely to represent a field separator. It deliberately + // leaves out punctuation that's most likely to vary between field values, such as dots. + private static final Pattern NON_PUNCTUATION_PATTERN = Pattern.compile("[^\\\\/|~:;,<>()\\[\\]{}«»\t]+"); + + // Used for deciding whether an ISO8601 timestamp contains a timezone. + private static final Pattern ISO8601_TIMEZONE_PATTERN = Pattern.compile("(Z|[+-]\\d{2}:?\\d{2})$"); /** - * Does the parsing the timestamp produce different results depending on the timezone of the parser? - * I.e., does the textual representation NOT define the timezone? + * Text that came before the timestamp in the matched field/message. */ - public boolean hasTimezoneDependentParsing() { - return javaTimestampFormats.stream().anyMatch(javaTimestampFormat -> - javaTimestampFormat.indexOf('X') == -1 && javaTimestampFormat.indexOf('z') == -1 && javaTimestampFormat.contains("mm")); - } + final String preface; /** - * Sometimes Elasticsearch mappings for dates need to include the format. - * This method returns appropriate mappings settings: at minimum "type"="date", - * and possibly also a "format" setting. + * Time format specifier(s) that will work with Logstash and Ingest pipeline date parsers. + */ + final TimestampFormat timestampFormat; + + /** + * These store the first and second numbers when the ordering of day and month is unclear, + * for example in 05/05/2019. Where the ordering is obvious they are set to -1. */ - public Map getEsDateMappingTypeWithFormat() { - if (javaTimestampFormats.contains("TAI64N")) { - // There's no format for TAI64N in the timestamp formats used in mappings - return Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"); + final int firstIndeterminateDateNumber; + final int secondIndeterminateDateNumber; + + final boolean hasTimezoneDependentParsing; + + /** + * Text that came after the timestamp in the matched field/message. + */ + final String epilogue; + + TimestampMatch(CandidateTimestampFormat chosenTimestampFormat, String preface, String matchedDate, String epilogue) { + this.preface = Objects.requireNonNull(preface); + this.timestampFormat = new TimestampFormat(chosenTimestampFormat.javaTimestampFormatSupplier.apply(matchedDate), + chosenTimestampFormat.simplePattern, chosenTimestampFormat.outputGrokPatternName, + chosenTimestampFormat.customGrokPatternDefinitions(), + preface.isEmpty() ? preface : NON_PUNCTUATION_PATTERN.matcher(preface).replaceAll("")); + int[] indeterminateDateNumbers = parseIndeterminateDateNumbers(matchedDate, timestampFormat.rawJavaTimestampFormats); + this.firstIndeterminateDateNumber = indeterminateDateNumbers[0]; + this.secondIndeterminateDateNumber = indeterminateDateNumbers[1]; + this.hasTimezoneDependentParsing = requiresTimezoneDependentParsing(timestampFormat.rawJavaTimestampFormats.get(0), + matchedDate); + this.epilogue = Objects.requireNonNull(epilogue); + } + + TimestampMatch(TimestampMatch toCopyExceptFormat, TimestampFormat timestampFormat) { + this.preface = toCopyExceptFormat.preface; + this.timestampFormat = Objects.requireNonNull(timestampFormat); + this.firstIndeterminateDateNumber = toCopyExceptFormat.firstIndeterminateDateNumber; + this.secondIndeterminateDateNumber = toCopyExceptFormat.secondIndeterminateDateNumber; + this.hasTimezoneDependentParsing = toCopyExceptFormat.hasTimezoneDependentParsing; + this.epilogue = toCopyExceptFormat.epilogue; + } + + static boolean requiresTimezoneDependentParsing(String format, String matchedDate) { + switch (format) { + case "ISO8601": + assert matchedDate.length() > 6; + return ISO8601_TIMEZONE_PATTERN.matcher(matchedDate).find(matchedDate.length() - 6) == false; + case "UNIX_MS": + case "UNIX": + case "TAI64N": + return false; + default: + boolean notQuoted = true; + for (int pos = 0; pos < format.length(); ++pos) { + char curChar = format.charAt(pos); + if (curChar == '\'') { + notQuoted = !notQuoted; + } else if (notQuoted && (curChar == 'X' || curChar == 'z')) { + return false; + } + } + return true; } - Map mapping = new LinkedHashMap<>(); - mapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); - String formats = javaTimestampFormats.stream().flatMap(format -> { - switch (format) { - case "ISO8601": - return Stream.empty(); - case "UNIX_MS": - return Stream.of("epoch_millis"); - case "UNIX": - return Stream.of("epoch_second"); - default: - return Stream.of(format); + } + + static int[] parseIndeterminateDateNumbers(String matchedDate, List rawJavaTimestampFormats) { + int[] indeterminateDateNumbers = { -1, -1 }; + + for (String rawJavaTimestampFormat : rawJavaTimestampFormats) { + + if (rawJavaTimestampFormat.indexOf(INDETERMINATE_FIELD_PLACEHOLDER) >= 0) { + + try { + // Parse leniently under the assumption the first sequence of hashes is day and the + // second is month - this may not be true but all we do is extract the numbers + String javaTimestampFormat = determiniseJavaTimestampFormat(rawJavaTimestampFormat, true); + + // TODO consider support for overriding the locale too + // But it's not clear-cut as Grok only knows English and German date + // words and for indeterminate formats we're expecting numbers anyway + DateTimeFormatter javaTimeFormatter = DateTimeFormatter.ofPattern(javaTimestampFormat, Locale.ROOT) + .withResolverStyle(ResolverStyle.LENIENT); + TemporalAccessor accessor = javaTimeFormatter.parse(matchedDate); + indeterminateDateNumbers[0] = accessor.get(ChronoField.DAY_OF_MONTH); + + // Now parse again leniently under the assumption the first sequence of hashes is month and the + // second is day - we have to do it twice and extract day as the lenient parser will wrap months > 12 + javaTimestampFormat = determiniseJavaTimestampFormat(rawJavaTimestampFormat, false); + + // TODO consider support for overriding the locale too + // But it's not clear-cut as Grok only knows English and German date + // words and for indeterminate formats we're expecting numbers anyway + javaTimeFormatter = DateTimeFormatter.ofPattern(javaTimestampFormat, Locale.ROOT) + .withResolverStyle(ResolverStyle.LENIENT); + accessor = javaTimeFormatter.parse(matchedDate); + indeterminateDateNumbers[1] = accessor.get(ChronoField.DAY_OF_MONTH); + if (indeterminateDateNumbers[0] > 0 && indeterminateDateNumbers[1] > 0) { + break; + } + } catch (DateTimeException e) { + // Move on to the next format + } } - }).collect(Collectors.joining("||")); - if (formats.isEmpty() == false) { - mapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, formats); } - return mapping; + + return indeterminateDateNumbers; } @Override public int hashCode() { - return Objects.hash(candidateIndex, preface, jodaTimestampFormats, javaTimestampFormats, simplePattern.pattern(), - grokPatternName, epilogue); + return Objects.hash(preface, timestampFormat, firstIndeterminateDateNumber, secondIndeterminateDateNumber, + hasTimezoneDependentParsing, epilogue); } @Override @@ -497,66 +1330,186 @@ public boolean equals(Object other) { } TimestampMatch that = (TimestampMatch) other; - return this.candidateIndex == that.candidateIndex && - Objects.equals(this.preface, that.preface) && - Objects.equals(this.jodaTimestampFormats, that.jodaTimestampFormats) && - Objects.equals(this.javaTimestampFormats, that.javaTimestampFormats) && - Objects.equals(this.simplePattern.pattern(), that.simplePattern.pattern()) && - Objects.equals(this.grokPatternName, that.grokPatternName) && + return Objects.equals(this.preface, that.preface) && + Objects.equals(this.timestampFormat, that.timestampFormat) && + this.firstIndeterminateDateNumber == that.firstIndeterminateDateNumber && + this.secondIndeterminateDateNumber == that.secondIndeterminateDateNumber && + this.hasTimezoneDependentParsing == that.hasTimezoneDependentParsing && Objects.equals(this.epilogue, that.epilogue); } @Override public String toString() { - return "index = " + candidateIndex + (preface.isEmpty() ? "" : ", preface = '" + preface + "'") + - ", Joda timestamp formats = " + jodaTimestampFormats.stream().collect(Collectors.joining("', '", "[ '", "' ]")) + - ", Java timestamp formats = " + javaTimestampFormats.stream().collect(Collectors.joining("', '", "[ '", "' ]")) + - ", simple pattern = '" + simplePattern.pattern() + "', grok pattern = '" + grokPatternName + "'" + - (epilogue.isEmpty() ? "" : ", epilogue = '" + epilogue + "'"); + return (preface.isEmpty() ? "" : "preface = '" + preface + "', ") + timestampFormat + + ((firstIndeterminateDateNumber > 0 || secondIndeterminateDateNumber > 0) + ? ", indeterminate date numbers = (" + firstIndeterminateDateNumber + "," + secondIndeterminateDateNumber + ")" + : "") + + ", has timezone-dependent parsing = " + hasTimezoneDependentParsing + + (epilogue.isEmpty() ? "" : ", epilogue = '" + epilogue + "'"); } } + /** + * Stores the details of a possible timestamp format to consider when looking for timestamps. + */ static final class CandidateTimestampFormat { - final List jodaTimestampFormats; - final List javaTimestampFormats; + private static final Pattern FRACTIONAL_SECOND_INTERPRETER = Pattern.compile("([" + FRACTIONAL_SECOND_SEPARATORS + "])(\\d{3,9})$"); + // This means that in the case of a literal Z, XXX is preferred + private static final Pattern TRAILING_OFFSET_WITHOUT_COLON_FINDER = Pattern.compile("[+-]\\d{4}$"); + + final Function> javaTimestampFormatSupplier; final Pattern simplePattern; + final String strictGrokPattern; final Grok strictSearchGrok; final Grok strictFullMatchGrok; - final String standardGrokPatternName; - final List quickRuleOutIndices; - - CandidateTimestampFormat(String jodaTimestampFormat, String javaTimestampFormat, String simpleRegex, String strictGrokPattern, - String standardGrokPatternName) { - this(Collections.singletonList(jodaTimestampFormat), Collections.singletonList(javaTimestampFormat), simpleRegex, - strictGrokPattern, standardGrokPatternName); - } + final String outputGrokPatternName; + final List quickRuleOutBitSets; + final int maxCharsBeforeQuickRuleOutMatch; + final int maxCharsAfterQuickRuleOutMatch; - CandidateTimestampFormat(String jodaTimestampFormat, String javaTimestampFormat, String simpleRegex, String strictGrokPattern, - String standardGrokPatternName, List quickRuleOutIndices) { - this(Collections.singletonList(jodaTimestampFormat), Collections.singletonList(javaTimestampFormat), simpleRegex, - strictGrokPattern, standardGrokPatternName, quickRuleOutIndices); + CandidateTimestampFormat(Function> javaTimestampFormatSupplier, String simpleRegex, String strictGrokPattern, + String outputGrokPatternName) { + this(javaTimestampFormatSupplier, simpleRegex, strictGrokPattern, outputGrokPatternName, Collections.emptyList(), + Integer.MAX_VALUE, Integer.MAX_VALUE); } - CandidateTimestampFormat(List jodaTimestampFormats, List javaTimestampFormats, String simpleRegex, - String strictGrokPattern, String standardGrokPatternName) { - this(jodaTimestampFormats, javaTimestampFormats, simpleRegex, strictGrokPattern, standardGrokPatternName, - Collections.emptyList()); + CandidateTimestampFormat(Function> javaTimestampFormatSupplier, String simpleRegex, String strictGrokPattern, + String outputGrokPatternName, String quickRuleOutPattern, int maxCharsBeforeQuickRuleOutMatch, + int maxCharsAfterQuickRuleOutMatch) { + this(javaTimestampFormatSupplier, simpleRegex, strictGrokPattern, outputGrokPatternName, + Collections.singletonList(quickRuleOutPattern), maxCharsBeforeQuickRuleOutMatch, maxCharsAfterQuickRuleOutMatch); } - CandidateTimestampFormat(List jodaTimestampFormats, List javaTimestampFormats, String simpleRegex, - String strictGrokPattern, String standardGrokPatternName, List quickRuleOutIndices) { - this.jodaTimestampFormats = jodaTimestampFormats; - this.javaTimestampFormats = javaTimestampFormats; + CandidateTimestampFormat(Function> javaTimestampFormatSupplier, String simpleRegex, String strictGrokPattern, + String outputGrokPatternName, List quickRuleOutPatterns, int maxCharsBeforeQuickRuleOutMatch, + int maxCharsAfterQuickRuleOutMatch) { + this.javaTimestampFormatSupplier = Objects.requireNonNull(javaTimestampFormatSupplier); this.simplePattern = Pattern.compile(simpleRegex, Pattern.MULTILINE); + this.strictGrokPattern = Objects.requireNonNull(strictGrokPattern); // The (?m) here has the Ruby meaning, which is equivalent to (?s) in Java this.strictSearchGrok = new Grok(Grok.getBuiltinPatterns(), "(?m)%{DATA:" + PREFACE + "}" + strictGrokPattern + "%{GREEDYDATA:" + EPILOGUE + "}", TimeoutChecker.watchdog); this.strictFullMatchGrok = new Grok(Grok.getBuiltinPatterns(), "^" + strictGrokPattern + "$", TimeoutChecker.watchdog); - this.standardGrokPatternName = standardGrokPatternName; - assert quickRuleOutIndices.stream() - .noneMatch(quickRuleOutIndex -> quickRuleOutIndex < 0 || quickRuleOutIndex >= QUICK_RULE_OUT_PATTERNS.size()); - this.quickRuleOutIndices = quickRuleOutIndices; + this.outputGrokPatternName = Objects.requireNonNull(outputGrokPatternName); + this.quickRuleOutBitSets = quickRuleOutPatterns.stream().map(TimestampFormatFinder::stringToNumberPosBitSet) + .collect(Collectors.toList()); + assert maxCharsBeforeQuickRuleOutMatch >= 0; + this.maxCharsBeforeQuickRuleOutMatch = maxCharsBeforeQuickRuleOutMatch; + assert maxCharsAfterQuickRuleOutMatch >= 0; + this.maxCharsAfterQuickRuleOutMatch = maxCharsAfterQuickRuleOutMatch; + } + + Map customGrokPatternDefinitions() { + return CUSTOM_TIMESTAMP_GROK_NAME.equals(outputGrokPatternName) + ? Collections.singletonMap(CUSTOM_TIMESTAMP_GROK_NAME, strictGrokPattern) + : Collections.emptyMap(); + } + + static List iso8601FormatFromExample(String example) { + + // The Elasticsearch ISO8601 parser requires a literal T between the date and time, so + // longhand formats are needed if there's a space instead + return (example.indexOf('T') >= 0) ? Collections.singletonList("ISO8601") : iso8601LikeFormatFromExample(example, " ", ""); + } + + static List iso8601LikeFormatFromExample(String example, String timeSeparator, String timezoneSeparator) { + + StringBuilder builder = new StringBuilder("yyyy-MM-dd"); + builder.append(timeSeparator).append("HH:mm"); + + // Seconds are optional in ISO8601 + if (example.length() > builder.length() && example.charAt(builder.length()) == ':') { + builder.append(":ss"); + } + + if (example.length() > builder.length()) { + + // Add fractional seconds pattern if appropriate + char nextChar = example.charAt(builder.length()); + if (FRACTIONAL_SECOND_SEPARATORS.indexOf(nextChar) >= 0) { + builder.append(nextChar); + for (int pos = builder.length(); pos < example.length(); ++pos) { + if (Character.isDigit(example.charAt(pos))) { + builder.append('S'); + } else { + break; + } + } + } + + // Add timezone if appropriate - in the case of a literal Z, XX is preferred + if (example.length() > builder.length()) { + builder.append(timezoneSeparator).append((example.indexOf(':', builder.length()) > 0) ? "XXX" : "XX"); + } + } else { + // This method should not have been called if the example didn't include the bare minimum of date and time + assert example.length() == builder.length() : "Expected [" + example + "] and [" + builder + "] to be the same length"; + } + + return Collections.singletonList(builder.toString()); + } + + static List adjustTrailingTimezoneFromExample(String example, String formatWithSecondsAndXX) { + return Collections.singletonList( + TRAILING_OFFSET_WITHOUT_COLON_FINDER.matcher(example).find() ? formatWithSecondsAndXX : formatWithSecondsAndXX + "X"); + } + + private static String adjustFractionalSecondsFromEndOfExample(String example, String formatNoFraction) { + + Matcher matcher = FRACTIONAL_SECOND_INTERPRETER.matcher(example); + return matcher.find() + ? (formatNoFraction + matcher.group(1).charAt(0) + "SSSSSSSSS".substring(0, matcher.group(2).length())) + : formatNoFraction; + } + + static List expandDayAndAdjustFractionalSecondsFromExample(String example, String formatWithddAndNoFraction) { + + String formatWithdd = adjustFractionalSecondsFromEndOfExample(example, formatWithddAndNoFraction); + return Arrays.asList(formatWithdd, formatWithdd.replace(" dd", " d")); + } + + static List indeterminateDayMonthFormatFromExample(String example) { + + StringBuilder builder = new StringBuilder(); + int examplePos = 0; + + // INDETERMINATE_FIELD_PLACEHOLDER here could represent either a day number (d) or month number (M) - it + // will get changed later based on evidence from many examples + for (Character patternChar + : Arrays.asList(INDETERMINATE_FIELD_PLACEHOLDER, INDETERMINATE_FIELD_PLACEHOLDER, 'y', 'H', 'm', 's')) { + + boolean foundDigit = false; + while (examplePos < example.length() && Character.isDigit(example.charAt(examplePos))) { + foundDigit = true; + builder.append(patternChar); + ++examplePos; + } + + if (patternChar == 's' || examplePos >= example.length() || foundDigit == false) { + break; + } + + builder.append(example.charAt(examplePos)); + ++examplePos; + } + + String format = builder.toString(); + // The Grok pattern should ensure we got at least as far as the year + assert format.contains("yy") : "Unexpected format [" + format + "] from example [" + example + "]"; + + if (examplePos < example.length()) { + // If we haven't consumed the whole example then we should have got as far as + // the (whole) seconds, and the bit afterwards should be the fractional seconds + assert builder.toString().endsWith("ss") : "Unexpected format [" + format + "] from example [" + example + "]"; + format = adjustFractionalSecondsFromEndOfExample(example, format); + } + + assert Character.isLetter(format.charAt(format.length() - 1)) + : "Unexpected format [" + format + "] from example [" + example + "]"; + assert format.length() == example.length() : "Unexpected format [" + format + "] from example [" + example + "]"; + + return Collections.singletonList(format); } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinder.java index 53550ebf18dd3..d2572b7fd2085 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinder.java @@ -8,7 +8,6 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; -import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import org.w3c.dom.Document; import org.w3c.dom.NamedNodeMap; import org.w3c.dom.Node; @@ -79,6 +78,9 @@ static XmlFileStructureFinder makeXmlFileStructureFinder(List explanatio ++linesConsumed; } + // null to allow GC before timestamp search + sampleDocEnds = null; + // If we get here the XML parser should have confirmed this assert messagePrefix.charAt(0) == '<'; String topLevelTag = messagePrefix.substring(1); @@ -91,17 +93,17 @@ static XmlFileStructureFinder makeXmlFileStructureFinder(List explanatio .setNumMessagesAnalyzed(sampleRecords.size()) .setMultilineStartPattern("^\\s*<" + topLevelTag); - Tuple timeField = + Tuple timeField = FileStructureUtils.guessTimestampField(explanation, sampleRecords, overrides, timeoutChecker); if (timeField != null) { boolean needClientTimeZone = timeField.v2().hasTimezoneDependentParsing(); structureBuilder.setTimestampField(timeField.v1()) - .setJodaTimestampFormats(timeField.v2().jodaTimestampFormats) - .setJavaTimestampFormats(timeField.v2().javaTimestampFormats) + .setJodaTimestampFormats(timeField.v2().getJodaTimestampFormats()) + .setJavaTimestampFormats(timeField.v2().getJavaTimestampFormats()) .setNeedClientTimezone(needClientTimeZone) - .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(null, topLevelTag + "." + timeField.v1(), - timeField.v2().javaTimestampFormats, needClientTimeZone)); + .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), + topLevelTag + "." + timeField.v1(), timeField.v2().getJavaTimestampFormats(), needClientTimeZone)); } Tuple, SortedMap> mappingsAndFieldStats = diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java index 10bdf0d16d8eb..280a50324e447 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java @@ -42,7 +42,7 @@ public void testCreateConfigsGivenCompleteCsv() throws Exception { assertEquals(hasByteOrderMarker, structure.getHasByteOrderMarker()); } assertEquals("^\"?time\"?,\"?message\"?", structure.getExcludeLinesPattern()); - assertEquals("^\"?\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); + assertEquals("^\"?\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertEquals(Character.valueOf(','), structure.getDelimiter()); assertEquals(Character.valueOf('"'), structure.getQuote()); assertTrue(structure.getHasHeaderRow()); @@ -77,7 +77,7 @@ public void testCreateConfigsGivenCompleteCsvAndColumnNamesOverride() throws Exc assertEquals(hasByteOrderMarker, structure.getHasByteOrderMarker()); } assertEquals("^\"?time\"?,\"?message\"?", structure.getExcludeLinesPattern()); - assertEquals("^\"?\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); + assertEquals("^\"?\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertEquals(Character.valueOf(','), structure.getDelimiter()); assertEquals(Character.valueOf('"'), structure.getQuote()); assertTrue(structure.getHasHeaderRow()); @@ -147,7 +147,7 @@ public void testCreateConfigsGivenCsvWithIncompleteLastRecord() throws Exception assertEquals(hasByteOrderMarker, structure.getHasByteOrderMarker()); } assertEquals("^\"?message\"?,\"?time\"?,\"?count\"?", structure.getExcludeLinesPattern()); - assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); + assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertEquals(Character.valueOf(','), structure.getDelimiter()); assertEquals(Character.valueOf('"'), structure.getQuote()); assertTrue(structure.getHasHeaderRow()); @@ -185,7 +185,7 @@ public void testCreateConfigsGivenCsvWithTrailingNulls() throws Exception { "\"?RatecodeID\"?,\"?store_and_fwd_flag\"?,\"?PULocationID\"?,\"?DOLocationID\"?,\"?payment_type\"?,\"?fare_amount\"?," + "\"?extra\"?,\"?mta_tax\"?,\"?tip_amount\"?,\"?tolls_amount\"?,\"?improvement_surcharge\"?,\"?total_amount\"?,\"?\"?,\"?\"?", structure.getExcludeLinesPattern()); - assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); + assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertEquals(Character.valueOf(','), structure.getDelimiter()); assertEquals(Character.valueOf('"'), structure.getQuote()); assertTrue(structure.getHasHeaderRow()); @@ -230,7 +230,7 @@ public void testCreateConfigsGivenCsvWithTrailingNullsAndOverriddenTimeField() t "\"?RatecodeID\"?,\"?store_and_fwd_flag\"?,\"?PULocationID\"?,\"?DOLocationID\"?,\"?payment_type\"?,\"?fare_amount\"?," + "\"?extra\"?,\"?mta_tax\"?,\"?tip_amount\"?,\"?tolls_amount\"?,\"?improvement_surcharge\"?,\"?total_amount\"?,\"?\"?,\"?\"?", structure.getExcludeLinesPattern()); - assertEquals("^.*?,.*?,\"?\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); + assertEquals("^.*?,.*?,\"?\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertEquals(Character.valueOf(','), structure.getDelimiter()); assertEquals(Character.valueOf('"'), structure.getQuote()); assertTrue(structure.getHasHeaderRow()); @@ -270,7 +270,7 @@ public void testCreateConfigsGivenCsvWithTrailingNullsExceptHeader() throws Exce "\"?RatecodeID\"?,\"?store_and_fwd_flag\"?,\"?PULocationID\"?,\"?DOLocationID\"?,\"?payment_type\"?,\"?fare_amount\"?," + "\"?extra\"?,\"?mta_tax\"?,\"?tip_amount\"?,\"?tolls_amount\"?,\"?improvement_surcharge\"?,\"?total_amount\"?", structure.getExcludeLinesPattern()); - assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); + assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertEquals(Character.valueOf(','), structure.getDelimiter()); assertEquals(Character.valueOf('"'), structure.getQuote()); assertTrue(structure.getHasHeaderRow()); @@ -317,7 +317,7 @@ public void testCreateConfigsGivenCsvWithTrailingNullsExceptHeaderAndColumnNames "\"?RatecodeID\"?,\"?store_and_fwd_flag\"?,\"?PULocationID\"?,\"?DOLocationID\"?,\"?payment_type\"?,\"?fare_amount\"?," + "\"?extra\"?,\"?mta_tax\"?,\"?tip_amount\"?,\"?tolls_amount\"?,\"?improvement_surcharge\"?,\"?total_amount\"?", structure.getExcludeLinesPattern()); - assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); + assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertEquals(Character.valueOf(','), structure.getDelimiter()); assertEquals(Character.valueOf('"'), structure.getQuote()); assertTrue(structure.getHasHeaderRow()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java index 264521e68fb51..c0adccd0eb477 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java @@ -7,7 +7,6 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; -import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.util.Arrays; import java.util.Collections; @@ -35,12 +34,12 @@ public void testMoreLikelyGivenKeyword() { public void testGuessTimestampGivenSingleSampleSingleField() { Map sample = Collections.singletonMap("field1", "2018-05-24T17:28:31,735"); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("field1", match.v1()); - assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd'T'HH:mm:ss,SSS")); - assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); + assertThat(match.v2().getJavaTimestampFormats(), contains("ISO8601")); + assertEquals("TIMESTAMP_ISO8601", match.v2().getGrokPatternName()); } public void testGuessTimestampGivenSingleSampleSingleFieldAndConsistentTimeFieldOverride() { @@ -48,12 +47,12 @@ public void testGuessTimestampGivenSingleSampleSingleFieldAndConsistentTimeField FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampField("field1").build(); Map sample = Collections.singletonMap("field1", "2018-05-24T17:28:31,735"); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample), overrides, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("field1", match.v1()); - assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd'T'HH:mm:ss,SSS")); - assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); + assertThat(match.v2().getJavaTimestampFormats(), contains("ISO8601")); + assertEquals("TIMESTAMP_ISO8601", match.v2().getGrokPatternName()); } public void testGuessTimestampGivenSingleSampleSingleFieldAndImpossibleTimeFieldOverride() { @@ -73,12 +72,12 @@ public void testGuessTimestampGivenSingleSampleSingleFieldAndConsistentTimeForma FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampFormat("ISO8601").build(); Map sample = Collections.singletonMap("field1", "2018-05-24T17:28:31,735"); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample), overrides, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("field1", match.v1()); - assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd'T'HH:mm:ss,SSS")); - assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); + assertThat(match.v2().getJavaTimestampFormats(), contains("ISO8601")); + assertEquals("TIMESTAMP_ISO8601", match.v2().getGrokPatternName()); } public void testGuessTimestampGivenSingleSampleSingleFieldAndImpossibleTimeFormatOverride() { @@ -97,18 +96,18 @@ public void testGuessTimestampGivenSingleSampleSingleFieldAndImpossibleTimeForma public void testGuessTimestampGivenSamplesWithSameSingleTimeField() { Map sample1 = Collections.singletonMap("field1", "2018-05-24T17:28:31,735"); Map sample2 = Collections.singletonMap("field1", "2018-05-24T17:33:39,406"); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("field1", match.v1()); - assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd'T'HH:mm:ss,SSS")); - assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); + assertThat(match.v2().getJavaTimestampFormats(), contains("ISO8601")); + assertEquals("TIMESTAMP_ISO8601", match.v2().getGrokPatternName()); } public void testGuessTimestampGivenSamplesWithOneSingleTimeFieldDifferentFormat() { Map sample1 = Collections.singletonMap("field1", "2018-05-24T17:28:31,735"); - Map sample2 = Collections.singletonMap("field1", "2018-05-24 17:33:39,406"); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), + Map sample2 = Collections.singletonMap("field1", "Thu May 24 17:33:39 2018"); + Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNull(match); } @@ -116,7 +115,7 @@ public void testGuessTimestampGivenSamplesWithOneSingleTimeFieldDifferentFormat( public void testGuessTimestampGivenSamplesWithDifferentSingleTimeField() { Map sample1 = Collections.singletonMap("field1", "2018-05-24T17:28:31,735"); Map sample2 = Collections.singletonMap("another_field", "2018-05-24T17:33:39,406"); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNull(match); } @@ -126,12 +125,12 @@ public void testGuessTimestampGivenSingleSampleManyFieldsOneTimeFormat() { sample.put("foo", "not a time"); sample.put("time", "2018-05-24 17:28:31,735"); sample.put("bar", 42); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("time", match.v1()); - assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd HH:mm:ss,SSS")); - assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); + assertThat(match.v2().getJavaTimestampFormats(), contains("yyyy-MM-dd HH:mm:ss,SSS")); + assertEquals("TIMESTAMP_ISO8601", match.v2().getGrokPatternName()); } public void testGuessTimestampGivenSamplesWithManyFieldsSameSingleTimeFormat() { @@ -143,12 +142,12 @@ public void testGuessTimestampGivenSamplesWithManyFieldsSameSingleTimeFormat() { sample2.put("foo", "whatever"); sample2.put("time", "2018-05-29 11:53:02,837"); sample2.put("bar", 17); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("time", match.v1()); - assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd HH:mm:ss,SSS")); - assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); + assertThat(match.v2().getJavaTimestampFormats(), contains("yyyy-MM-dd HH:mm:ss,SSS")); + assertEquals("TIMESTAMP_ISO8601", match.v2().getGrokPatternName()); } public void testGuessTimestampGivenSamplesWithManyFieldsSameTimeFieldDifferentTimeFormat() { @@ -160,7 +159,7 @@ public void testGuessTimestampGivenSamplesWithManyFieldsSameTimeFieldDifferentTi sample2.put("foo", "whatever"); sample2.put("time", "May 29 2018 11:53:02"); sample2.put("bar", 17); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNull(match); } @@ -174,12 +173,12 @@ public void testGuessTimestampGivenSamplesWithManyFieldsSameSingleTimeFormatDist sample2.put("red_herring", "whatever"); sample2.put("time", "2018-05-29 11:53:02,837"); sample2.put("bar", 17); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("time", match.v1()); - assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd HH:mm:ss,SSS")); - assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); + assertThat(match.v2().getJavaTimestampFormats(), contains("yyyy-MM-dd HH:mm:ss,SSS")); + assertEquals("TIMESTAMP_ISO8601", match.v2().getGrokPatternName()); } public void testGuessTimestampGivenSamplesWithManyFieldsSameSingleTimeFormatDistractionAfter() { @@ -191,12 +190,12 @@ public void testGuessTimestampGivenSamplesWithManyFieldsSameSingleTimeFormatDist sample2.put("foo", "whatever"); sample2.put("time", "May 29 2018 11:53:02"); sample2.put("red_herring", "17"); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("time", match.v1()); - assertThat(match.v2().javaTimestampFormats, contains("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss")); - assertEquals("CISCOTIMESTAMP", match.v2().grokPatternName); + assertThat(match.v2().getJavaTimestampFormats(), contains("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss")); + assertEquals("CISCOTIMESTAMP", match.v2().getGrokPatternName()); } public void testGuessTimestampGivenSamplesWithManyFieldsInconsistentTimeFields() { @@ -208,7 +207,7 @@ public void testGuessTimestampGivenSamplesWithManyFieldsInconsistentTimeFields() sample2.put("foo", "whatever"); sample2.put("time2", "May 29 2018 11:53:02"); sample2.put("bar", 42); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNull(match); } @@ -224,12 +223,12 @@ public void testGuessTimestampGivenSamplesWithManyFieldsInconsistentAndConsisten sample2.put("time2", "May 10 2018 11:53:02"); sample2.put("time3", "Thu, May 10 2018 11:53:02"); sample2.put("bar", 42); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("time2", match.v1()); - assertThat(match.v2().javaTimestampFormats, contains("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss")); - assertEquals("CISCOTIMESTAMP", match.v2().grokPatternName); + assertThat(match.v2().getJavaTimestampFormats(), contains("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss")); + assertEquals("CISCOTIMESTAMP", match.v2().getGrokPatternName()); } public void testGuessMappingGivenNothing() { @@ -273,7 +272,9 @@ public void testGuessMappingGivenLong() { } public void testGuessMappingGivenDate() { - Map expected = Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); + Map expected = new HashMap<>(); + expected.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); + expected.put(FileStructureUtils.MAPPING_FORMAT_SETTING, "iso8601"); assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("2018-06-11T13:26:47Z", "2018-06-11T13:27:12Z"))); } @@ -347,18 +348,19 @@ public void testGuessMappingsAndCalculateFieldStats() { public void testMakeIngestPipelineDefinitionGivenStructuredWithoutTimestamp() { - assertNull(FileStructureUtils.makeIngestPipelineDefinition(null, null, null, false)); + assertNull(FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), null, null, false)); } @SuppressWarnings("unchecked") public void testMakeIngestPipelineDefinitionGivenStructuredWithTimestamp() { String timestampField = randomAlphaOfLength(10); - List timestampFormats = randomFrom(TimestampFormatFinder.ORDERED_CANDIDATE_FORMATS).javaTimestampFormats; + List timestampFormats = randomFrom(Collections.singletonList("ISO8601"), + Arrays.asList("EEE MMM dd HH:mm:ss yyyy", "EEE MMM d HH:mm:ss yyyy")); boolean needClientTimezone = randomBoolean(); - Map pipeline = - FileStructureUtils.makeIngestPipelineDefinition(null, timestampField, timestampFormats, needClientTimezone); + Map pipeline = FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), timestampField, + timestampFormats, needClientTimezone); assertNotNull(pipeline); assertEquals("Ingest pipeline created by file structure finder", pipeline.remove("description")); @@ -382,11 +384,12 @@ public void testMakeIngestPipelineDefinitionGivenSemiStructured() { String grokPattern = randomAlphaOfLength(100); String timestampField = randomAlphaOfLength(10); - List timestampFormats = randomFrom(TimestampFormatFinder.ORDERED_CANDIDATE_FORMATS).javaTimestampFormats; + List timestampFormats = randomFrom(Collections.singletonList("ISO8601"), + Arrays.asList("EEE MMM dd HH:mm:ss yyyy", "EEE MMM d HH:mm:ss yyyy")); boolean needClientTimezone = randomBoolean(); - Map pipeline = - FileStructureUtils.makeIngestPipelineDefinition(grokPattern, timestampField, timestampFormats, needClientTimezone); + Map pipeline = FileStructureUtils.makeIngestPipelineDefinition(grokPattern, Collections.emptyMap(), timestampField, + timestampFormats, needClientTimezone); assertNotNull(pipeline); assertEquals("Ingest pipeline created by file structure finder", pipeline.remove("description")); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreatorTests.java index dc48662fb35f7..7e6363602dcdd 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreatorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreatorTests.java @@ -43,7 +43,7 @@ public void testPopulatePrefacesAndEpiloguesGivenTimestamp() { Collection prefaces = new ArrayList<>(); Collection epilogues = new ArrayList<>(); - candidate.processCaptures(fieldNameCountStore, matchingStrings, prefaces, epilogues, null, null, NOOP_TIMEOUT_CHECKER); + candidate.processCaptures(explanation, fieldNameCountStore, matchingStrings, prefaces, epilogues, null, null, NOOP_TIMEOUT_CHECKER); assertThat(prefaces, containsInAnyOrder("[", "[", "junk [", "[")); assertThat(epilogues, containsInAnyOrder("] DEBUG ", "] ERROR ", "] INFO ", "] DEBUG ")); @@ -60,7 +60,7 @@ public void testPopulatePrefacesAndEpiloguesGivenEmailAddress() { Collection prefaces = new ArrayList<>(); Collection epilogues = new ArrayList<>(); - candidate.processCaptures(fieldNameCountStore, matchingStrings, prefaces, epilogues, null, null, NOOP_TIMEOUT_CHECKER); + candidate.processCaptures(explanation, fieldNameCountStore, matchingStrings, prefaces, epilogues, null, null, NOOP_TIMEOUT_CHECKER); assertThat(prefaces, containsInAnyOrder("before ", "abc ", "")); assertThat(epilogues, containsInAnyOrder(" after", " xyz", "")); @@ -73,7 +73,8 @@ public void testAppendBestGrokMatchForStringsGivenTimestampsAndLogLevels() { "junk [2018-01-22T07:33:23] INFO ", "[2018-01-21T03:33:23] DEBUG "); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, NOOP_TIMEOUT_CHECKER); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER); grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); assertEquals(".*?\\[%{TIMESTAMP_ISO8601:extra_timestamp}\\] %{LOGLEVEL:loglevel} ", @@ -87,7 +88,8 @@ public void testAppendBestGrokMatchForStringsGivenNumbersInBrackets() { " (4)", " (-5) "); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, NOOP_TIMEOUT_CHECKER); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER); grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); assertEquals(".*?\\(%{INT:field}\\).*?", grokPatternCreator.getOverallGrokPatternBuilder().toString()); @@ -99,7 +101,8 @@ public void testAppendBestGrokMatchForStringsGivenNegativeNumbersWithoutBreak() "prior to-3", "-4"); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, NOOP_TIMEOUT_CHECKER); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER); grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); // It seems sensible that we don't detect these suffices as either base 10 or base 16 numbers @@ -113,7 +116,8 @@ public void testAppendBestGrokMatchForStringsGivenHexNumbers() { " -123", "1f is hex"); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, NOOP_TIMEOUT_CHECKER); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER); grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); assertEquals(".*?%{BASE16NUM:field}.*?", grokPatternCreator.getOverallGrokPatternBuilder().toString()); @@ -124,7 +128,8 @@ public void testAppendBestGrokMatchForStringsGivenHostnamesWithNumbers() { Collection snippets = Arrays.asList(" mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER); assertEquals("%{SYSLOGTIMESTAMP:timestamp} .*? .*?\\[%{INT:field}\\]: %{LOGLEVEL:loglevel} \\(.*? .*? .*?\\) .*? " + @@ -216,7 +225,7 @@ public void testCreateGrokPatternFromExamplesGivenCatalinaLogs() { "Invalid chunk ignored."); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER); assertEquals("%{CATALINA_DATESTAMP:timestamp} .*? .*?\\n%{LOGLEVEL:loglevel}: .*", @@ -239,7 +248,7 @@ public void testCreateGrokPatternFromExamplesGivenMultiTimestampLogs() { "Info\tsshd\tsubsystem request for sftp"); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER); assertEquals("%{INT:field}\\t%{TIMESTAMP_ISO8601:timestamp}\\t%{TIMESTAMP_ISO8601:extra_timestamp}\\t%{INT:field2}\\t.*?\\t" + @@ -247,7 +256,101 @@ public void testCreateGrokPatternFromExamplesGivenMultiTimestampLogs() { grokPatternCreator.createGrokPatternFromExamples("TIMESTAMP_ISO8601", "timestamp")); assertEquals(5, mappings.size()); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); - assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "date"), mappings.get("extra_timestamp")); + Map expectedDateMapping = new HashMap<>(); + expectedDateMapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); + expectedDateMapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, "iso8601"); + assertEquals(expectedDateMapping, mappings.get("extra_timestamp")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field2")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); + } + + public void testCreateGrokPatternFromExamplesGivenMultiTimestampLogsAndIndeterminateFormat() { + + // Two timestamps: one ISO8601, one indeterminate day/month + Collection sampleMessages = Arrays.asList( + "559550912540598297\t2016-04-20T14:06:53\t20/04/2016 21:06:53,123456\t38545844\tserv02nw07\t192.168.114.28\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912548986880\t2016-04-20T14:06:53\t20/04/2016 21:06:53,123456\t9049724\tserv02nw03\t10.120.48.147\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912548986887\t2016-04-20T14:06:53\t20/04/2016 21:06:53,123456\t884343\tserv02tw03\t192.168.121.189\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912603512850\t2016-04-20T14:06:53\t20/04/2016 21:06:53,123456\t8907014\tserv02nw01\t192.168.118.208\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp"); + + Map mappings = new HashMap<>(); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER); + + assertEquals("%{INT:field}\\t%{TIMESTAMP_ISO8601:timestamp}\\t%{DATESTAMP:extra_timestamp}\\t%{INT:field2}\\t.*?\\t" + + "%{IP:ipaddress}\\t.*?\\t%{LOGLEVEL:loglevel}\\t.*", + grokPatternCreator.createGrokPatternFromExamples("TIMESTAMP_ISO8601", "timestamp")); + assertEquals(5, mappings.size()); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); + Map expectedDateMapping = new HashMap<>(); + expectedDateMapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); + expectedDateMapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, "dd/MM/yyyy HH:mm:ss,SSSSSS"); + assertEquals(expectedDateMapping, mappings.get("extra_timestamp")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field2")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); + } + + public void testCreateGrokPatternFromExamplesGivenMultiTimestampLogsAndCustomDefinition() { + + // Two timestamps: one custom, one built-in + Collection sampleMessages = Arrays.asList( + "559550912540598297\t4/20/2016 2:06PM\t2016-04-20T21:06:53Z\t38545844\tserv02nw07\t192.168.114.28\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912548986880\t4/20/2016 2:06PM\t2016-04-20T21:06:53Z\t9049724\tserv02nw03\t10.120.48.147\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912548986887\t4/20/2016 2:06PM\t2016-04-20T21:06:53Z\t884343\tserv02tw03\t192.168.121.189\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912603512850\t4/20/2016 2:06PM\t2016-04-20T21:06:53Z\t8907014\tserv02nw01\t192.168.118.208\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp"); + + Map mappings = new HashMap<>(); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, + Collections.singletonMap("CUSTOM_TIMESTAMP", "%{MONTHNUM}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}(?:AM|PM)"), + NOOP_TIMEOUT_CHECKER); + + assertEquals("%{INT:field}\\t%{CUSTOM_TIMESTAMP:timestamp}\\t%{TIMESTAMP_ISO8601:extra_timestamp}\\t%{INT:field2}\\t.*?\\t" + + "%{IP:ipaddress}\\t.*?\\t%{LOGLEVEL:loglevel}\\t.*", + grokPatternCreator.createGrokPatternFromExamples("CUSTOM_TIMESTAMP", "timestamp")); + assertEquals(5, mappings.size()); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); + Map expectedDateMapping = new HashMap<>(); + expectedDateMapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); + expectedDateMapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, "iso8601"); + assertEquals(expectedDateMapping, mappings.get("extra_timestamp")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field2")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); + } + + public void testCreateGrokPatternFromExamplesGivenTimestampAndTimeWithoutDate() { + + // Two timestamps: one with date, one without + Collection sampleMessages = Arrays.asList( + "559550912540598297\t2016-04-20T14:06:53\t21:06:53.123456\t38545844\tserv02nw07\t192.168.114.28\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912548986880\t2016-04-20T14:06:53\t21:06:53.123456\t9049724\tserv02nw03\t10.120.48.147\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912548986887\t2016-04-20T14:06:53\t21:06:53.123456\t884343\tserv02tw03\t192.168.121.189\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912603512850\t2016-04-20T14:06:53\t21:06:53.123456\t8907014\tserv02nw01\t192.168.118.208\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp"); + + Map mappings = new HashMap<>(); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER); + + assertEquals("%{INT:field}\\t%{TIMESTAMP_ISO8601:timestamp}\\t%{TIME:time}\\t%{INT:field2}\\t.*?\\t" + + "%{IP:ipaddress}\\t.*?\\t%{LOGLEVEL:loglevel}\\t.*", + grokPatternCreator.createGrokPatternFromExamples("TIMESTAMP_ISO8601", "timestamp")); + assertEquals(5, mappings.size()); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("time")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field2")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); @@ -273,7 +376,7 @@ public void testFindFullLineGrokPatternGivenApacheCombinedLogs() { "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36\""); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER); assertEquals(new Tuple<>("timestamp", "%{COMBINEDAPACHELOG}"), @@ -304,7 +407,8 @@ public void testAdjustForPunctuationGivenCommonPrefix() { ",\"rule1\",\"Accept\",\"\",\"\",\"\",\"0000000000000000\"" ); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, NOOP_TIMEOUT_CHECKER); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER); Collection adjustedSnippets = grokPatternCreator.adjustForPunctuation(snippets); assertEquals("\",", grokPatternCreator.getOverallGrokPatternBuilder().toString()); @@ -321,7 +425,8 @@ public void testAdjustForPunctuationGivenNoCommonPrefix() { "was added by 'User1'(id:2) to servergroup 'GAME'(id:9)" ); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, NOOP_TIMEOUT_CHECKER); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER); Collection adjustedSnippets = grokPatternCreator.adjustForPunctuation(snippets); assertEquals("", grokPatternCreator.getOverallGrokPatternBuilder().toString()); @@ -346,18 +451,61 @@ public void testValidateFullLineGrokPatternGivenValid() { "559550912603512850\t2016-04-20T14:06:53\t2016-04-20T21:06:53Z\t8907014\tserv02nw01\t192.168.118.208\tAuthpriv\t" + "Info\tsshd\tsubsystem request for sftp"); + Map mappings = new HashMap<>(); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER); + + grokPatternCreator.validateFullLineGrokPattern(grokPattern, timestampField); + assertEquals(9, mappings.size()); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("serial_no")); + Map expectedDateMapping = new HashMap<>(); + expectedDateMapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); + expectedDateMapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, "iso8601"); + assertEquals(expectedDateMapping, mappings.get("local_timestamp")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("user_id")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("host")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("client_ip")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("method")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("severity")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("program")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("message")); + } + + public void testValidateFullLineGrokPatternGivenValidAndCustomDefinition() { + + String timestampField = "local_timestamp"; + String grokPattern = "%{INT:serial_no}\\t%{CUSTOM_TIMESTAMP:local_timestamp}\\t%{TIMESTAMP_ISO8601:utc_timestamp}\\t" + + "%{INT:user_id}\\t%{HOSTNAME:host}\\t%{IP:client_ip}\\t%{WORD:method}\\t%{LOGLEVEL:severity}\\t%{PROG:program}\\t" + + "%{GREEDYDATA:message}"; + + // Two timestamps: one local, one UTC + Collection sampleMessages = Arrays.asList( + "559550912540598297\t4/20/2016 2:06PM\t2016-04-20T21:06:53Z\t38545844\tserv02nw07\t192.168.114.28\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912548986880\t4/20/2016 2:06PM\t2016-04-20T21:06:53Z\t9049724\tserv02nw03\t10.120.48.147\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912548986887\t4/20/2016 2:06PM\t2016-04-20T21:06:53Z\t884343\tserv02tw03\t192.168.121.189\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912603512850\t4/20/2016 2:06PM\t2016-04-20T21:06:53Z\t8907014\tserv02nw01\t192.168.118.208\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp"); + Map mappings = new HashMap<>(); GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, + Collections.singletonMap("CUSTOM_TIMESTAMP", "%{MONTHNUM}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}(?:AM|PM)"), NOOP_TIMEOUT_CHECKER); grokPatternCreator.validateFullLineGrokPattern(grokPattern, timestampField); assertEquals(9, mappings.size()); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("serial_no")); - assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "date"), mappings.get("local_timestamp")); + Map expectedDateMapping = new HashMap<>(); + expectedDateMapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); + expectedDateMapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, "iso8601"); + assertEquals(expectedDateMapping, mappings.get("utc_timestamp")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("user_id")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("host")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("client_ip")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("method")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("severity")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("program")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("message")); } @@ -376,7 +524,7 @@ public void testValidateFullLineGrokPatternGivenInvalid() { "Sep 8 11:55:42 linux named[22529]: error (unexpected RCODE REFUSED) resolving 'b.akamaiedge.net/A/IN': 95.110.64.205#53"); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java index 6cf4d61cf176c..6ac672f61780e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java @@ -5,11 +5,9 @@ */ package org.elasticsearch.xpack.ml.filestructurefinder; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; -import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.util.Collections; import java.util.Set; @@ -20,90 +18,6 @@ public class TextLogFileStructureFinderTests extends FileStructureTestCase { - private static final String EXCEPTION_TRACE_SAMPLE = - "[2018-02-28T14:49:40,517][DEBUG][o.e.a.b.TransportShardBulkAction] [an_index][2] failed to execute bulk item " + - "(index) BulkShardRequest [[an_index][2]] containing [33] requests\n" + - "java.lang.IllegalArgumentException: Document contains at least one immense term in field=\"message.keyword\" (whose UTF8 " + - "encoding is longer than the max length 32766), all of which were skipped. Please correct the analyzer to not produce " + - "such terms. The prefix of the first immense term is: '[60, 83, 79, 65, 80, 45, 69, 78, 86, 58, 69, 110, 118, 101, 108, " + - "111, 112, 101, 32, 120, 109, 108, 110, 115, 58, 83, 79, 65, 80, 45]...', original message: bytes can be at most 32766 " + - "in length; got 49023\n" + - "\tat org.apache.lucene.index.DefaultIndexingChain$PerField.invert(DefaultIndexingChain.java:796) " + - "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + - "\tat org.apache.lucene.index.DefaultIndexingChain.processField(DefaultIndexingChain.java:430) " + - "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + - "\tat org.apache.lucene.index.DefaultIndexingChain.processDocument(DefaultIndexingChain.java:392) " + - "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + - "\tat org.apache.lucene.index.DocumentsWriterPerThread.updateDocument(DocumentsWriterPerThread.java:240) " + - "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + - "\tat org.apache.lucene.index.DocumentsWriter.updateDocument(DocumentsWriter.java:496) " + - "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + - "\tat org.apache.lucene.index.IndexWriter.updateDocument(IndexWriter.java:1729) " + - "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + - "\tat org.apache.lucene.index.IndexWriter.addDocument(IndexWriter.java:1464) " + - "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + - "\tat org.elasticsearch.index.engine.InternalEngine.index(InternalEngine.java:1070) ~[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.index.engine.InternalEngine.indexIntoLucene(InternalEngine.java:1012) " + - "~[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.index.engine.InternalEngine.index(InternalEngine.java:878) ~[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.index.shard.IndexShard.index(IndexShard.java:738) ~[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.index.shard.IndexShard.applyIndexOperation(IndexShard.java:707) ~[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.index.shard.IndexShard.applyIndexOperationOnPrimary(IndexShard.java:673) " + - "~[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.executeIndexRequestOnPrimary(TransportShardBulkAction.java:548) " + - "~[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.executeIndexRequest(TransportShardBulkAction.java:140) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.executeBulkItemRequest(TransportShardBulkAction.java:236) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.performOnPrimary(TransportShardBulkAction.java:123) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.shardOperationOnPrimary(TransportShardBulkAction.java:110) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.shardOperationOnPrimary(TransportShardBulkAction.java:72) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryShardReference.perform" + - "(TransportReplicationAction.java:1034) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryShardReference.perform" + - "(TransportReplicationAction.java:1012) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.ReplicationOperation.execute(ReplicationOperation.java:103) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.onResponse" + - "(TransportReplicationAction.java:359) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.onResponse" + - "(TransportReplicationAction.java:299) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$1.onResponse" + - "(TransportReplicationAction.java:975) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$1.onResponse" + - "(TransportReplicationAction.java:972) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.index.shard.IndexShardOperationPermits.acquire(IndexShardOperationPermits.java:238) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.index.shard.IndexShard.acquirePrimaryOperationPermit(IndexShard.java:2220) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction.acquirePrimaryShardReference" + - "(TransportReplicationAction.java:984) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction.access$500(TransportReplicationAction.java:98) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.doRun" + - "(TransportReplicationAction.java:320) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryOperationTransportHandler" + - ".messageReceived(TransportReplicationAction.java:295) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryOperationTransportHandler" + - ".messageReceived(TransportReplicationAction.java:282) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:66) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.transport.TransportService$7.doRun(TransportService.java:656) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:635) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) [?:1.8.0_144]\n" + - "\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) [?:1.8.0_144]\n" + - "\tat java.lang.Thread.run(Thread.java:748) [?:1.8.0_144]\n"; - private FileStructureFinderFactory factory = new TextLogFileStructureFinderFactory(); public void testCreateConfigsGivenElasticsearchLog() throws Exception { @@ -124,7 +38,7 @@ public void testCreateConfigsGivenElasticsearchLog() throws Exception { assertEquals(hasByteOrderMarker, structure.getHasByteOrderMarker()); } assertNull(structure.getExcludeLinesPattern()); - assertEquals("^\\[\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", structure.getMultilineStartPattern()); + assertEquals("^\\[\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertNull(structure.getDelimiter()); assertNull(structure.getQuote()); assertNull(structure.getHasHeaderRow()); @@ -139,6 +53,47 @@ public void testCreateConfigsGivenElasticsearchLog() throws Exception { } } + public void testCreateConfigsGivenElasticsearchLogAndTimestampFormatOverride() throws Exception { + + String sample = "12/31/2018 1:40PM INFO foo\n" + + "1/31/2019 11:40AM DEBUG bar\n" + + "2/1/2019 11:00PM INFO foo\n" + + "2/2/2019 1:23AM DEBUG bar\n"; + + FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampFormat("M/d/yyyy h:mma").build(); + + assertTrue(factory.canCreateFromSample(explanation, sample)); + + String charset = randomFrom(POSSIBLE_CHARSETS); + Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); + FileStructureFinder structureFinder = factory.createFromSample(explanation, sample, charset, hasByteOrderMarker, overrides, + NOOP_TIMEOUT_CHECKER); + + FileStructure structure = structureFinder.getStructure(); + + assertEquals(FileStructure.Format.SEMI_STRUCTURED_TEXT, structure.getFormat()); + assertEquals(charset, structure.getCharset()); + if (hasByteOrderMarker == null) { + assertNull(structure.getHasByteOrderMarker()); + } else { + assertEquals(hasByteOrderMarker, structure.getHasByteOrderMarker()); + } + assertNull(structure.getExcludeLinesPattern()); + assertEquals("^\\d{1,2}/\\d{1,2}/\\d{4} \\d{1,2}:\\d{2}[AP]M\\b", structure.getMultilineStartPattern()); + assertNull(structure.getDelimiter()); + assertNull(structure.getQuote()); + assertNull(structure.getHasHeaderRow()); + assertNull(structure.getShouldTrimFields()); + assertEquals("%{CUSTOM_TIMESTAMP:timestamp} %{LOGLEVEL:loglevel} .*", structure.getGrokPattern()); + assertEquals("timestamp", structure.getTimestampField()); + assertEquals(Collections.singletonList("M/d/YYYY h:mma"), structure.getJodaTimestampFormats()); + FieldStats messageFieldStats = structure.getFieldStats().get("message"); + assertNotNull(messageFieldStats); + for (String statMessage : messageFieldStats.getTopHits().stream().map(m -> (String) m.get("value")).collect(Collectors.toList())) { + assertThat(structureFinder.getSampleMessages(), hasItem(statMessage)); + } + } + public void testCreateConfigsGivenElasticsearchLogAndTimestampFieldOverride() throws Exception { FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampField("my_time").build(); @@ -160,7 +115,7 @@ public void testCreateConfigsGivenElasticsearchLogAndTimestampFieldOverride() th assertEquals(hasByteOrderMarker, structure.getHasByteOrderMarker()); } assertNull(structure.getExcludeLinesPattern()); - assertEquals("^\\[\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", structure.getMultilineStartPattern()); + assertEquals("^\\[\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertNull(structure.getDelimiter()); assertNull(structure.getQuote()); assertNull(structure.getHasHeaderRow()); @@ -197,7 +152,7 @@ public void testCreateConfigsGivenElasticsearchLogAndGrokPatternOverride() throw assertEquals(hasByteOrderMarker, structure.getHasByteOrderMarker()); } assertNull(structure.getExcludeLinesPattern()); - assertEquals("^\\[\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", structure.getMultilineStartPattern()); + assertEquals("^\\[\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertNull(structure.getDelimiter()); assertNull(structure.getQuote()); assertNull(structure.getHasHeaderRow()); @@ -303,98 +258,4 @@ public void testCreateMultiLineMessageStartRegexGivenManyPrefacesIncludingEmpty( TextLogFileStructureFinder.createMultiLineMessageStartRegex(prefaces, simpleDateRegex)); } } - - public void testMostLikelyTimestampGivenAllSame() { - String sample = "[2018-06-27T11:59:22,125][INFO ][o.e.n.Node ] [node-0] initializing ...\n" + - "[2018-06-27T11:59:22,201][INFO ][o.e.e.NodeEnvironment ] [node-0] using [1] data paths, mounts [[/ (/dev/disk1)]], " + - "net usable_space [216.1gb], net total_space [464.7gb], types [hfs]\n" + - "[2018-06-27T11:59:22,202][INFO ][o.e.e.NodeEnvironment ] [node-0] heap size [494.9mb], " + - "compressed ordinary object pointers [true]\n" + - "[2018-06-27T11:59:22,204][INFO ][o.e.n.Node ] [node-0] node name [node-0], node ID [Ha1gD8nNSDqjd6PIyu3DJA]\n" + - "[2018-06-27T11:59:22,204][INFO ][o.e.n.Node ] [node-0] version[6.4.0-SNAPSHOT], pid[2785], " + - "build[default/zip/3c60efa/2018-06-26T14:55:15.206676Z], OS[Mac OS X/10.12.6/x86_64], " + - "JVM[\"Oracle Corporation\"/Java HotSpot(TM) 64-Bit Server VM/10/10+46]\n" + - "[2018-06-27T11:59:22,205][INFO ][o.e.n.Node ] [node-0] JVM arguments [-Xms1g, -Xmx1g, " + - "-XX:+UseConcMarkSweepGC, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, " + - "-XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, " + - "-XX:-OmitStackTraceInFastThrow, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, " + - "-Dio.netty.recycler.maxCapacityPerThread=0, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, " + - "-Djava.io.tmpdir=/var/folders/k5/5sqcdlps5sg3cvlp783gcz740000h0/T/elasticsearch.nFUyeMH1, " + - "-XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=data, -XX:ErrorFile=logs/hs_err_pid%p.log, " + - "-Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m, " + - "-Djava.locale.providers=COMPAT, -Dio.netty.allocator.type=unpooled, -ea, -esa, -Xms512m, -Xmx512m, " + - "-Des.path.home=/Users/dave/elasticsearch/distribution/build/cluster/run node0/elasticsearch-6.4.0-SNAPSHOT, " + - "-Des.path.conf=/Users/dave/elasticsearch/distribution/build/cluster/run node0/elasticsearch-6.4.0-SNAPSHOT/config, " + - "-Des.distribution.flavor=default, -Des.distribution.type=zip]\n" + - "[2018-06-27T11:59:22,205][WARN ][o.e.n.Node ] [node-0] version [6.4.0-SNAPSHOT] is a pre-release version of " + - "Elasticsearch and is not suitable for production\n" + - "[2018-06-27T11:59:23,585][INFO ][o.e.p.PluginsService ] [node-0] loaded module [aggs-matrix-stats]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [analysis-common]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [ingest-common]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [lang-expression]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [lang-mustache]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [lang-painless]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [mapper-extras]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [parent-join]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [percolator]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [rank-eval]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [reindex]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [repository-url]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [transport-netty4]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-core]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-deprecation]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-graph]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-logstash]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-ml]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-monitoring]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-rollup]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-security]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-sql]\n" + - "[2018-06-27T11:59:23,588][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-upgrade]\n" + - "[2018-06-27T11:59:23,588][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-watcher]\n" + - "[2018-06-27T11:59:23,588][INFO ][o.e.p.PluginsService ] [node-0] no plugins loaded\n"; - - Tuple> mostLikelyMatch = - TextLogFileStructureFinder.mostLikelyTimestamp(sample.split("\n"), FileStructureOverrides.EMPTY_OVERRIDES, - NOOP_TIMEOUT_CHECKER); - assertNotNull(mostLikelyMatch); - assertEquals(new TimestampMatch(9, "", "ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSS", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), mostLikelyMatch.v1()); - } - - public void testMostLikelyTimestampGivenExceptionTrace() { - - Tuple> mostLikelyMatch = - TextLogFileStructureFinder.mostLikelyTimestamp(EXCEPTION_TRACE_SAMPLE.split("\n"), FileStructureOverrides.EMPTY_OVERRIDES, - NOOP_TIMEOUT_CHECKER); - assertNotNull(mostLikelyMatch); - - // Even though many lines have a timestamp near the end (in the Lucene version information), - // these are so far along the lines that the weight of the timestamp near the beginning of the - // first line should take precedence - assertEquals(new TimestampMatch(9, "", "ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSS", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), mostLikelyMatch.v1()); - } - - public void testMostLikelyTimestampGivenExceptionTraceAndTimestampFormatOverride() { - - FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampFormat("yyyy-MM-dd HH:mm:ss").build(); - - Tuple> mostLikelyMatch = - TextLogFileStructureFinder.mostLikelyTimestamp(EXCEPTION_TRACE_SAMPLE.split("\n"), overrides, NOOP_TIMEOUT_CHECKER); - assertNotNull(mostLikelyMatch); - - // The override should force the seemingly inferior choice of timestamp - assertEquals(new TimestampMatch(6, "", "YYYY-MM-dd HH:mm:ss", "yyyy-MM-dd HH:mm:ss", "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", - "TIMESTAMP_ISO8601", ""), mostLikelyMatch.v1()); - } - - public void testMostLikelyTimestampGivenExceptionTraceAndImpossibleTimestampFormatOverride() { - - FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampFormat("MMM dd HH:mm:ss").build(); - - Tuple> mostLikelyMatch = - TextLogFileStructureFinder.mostLikelyTimestamp(EXCEPTION_TRACE_SAMPLE.split("\n"), overrides, NOOP_TIMEOUT_CHECKER); - assertNull(mostLikelyMatch); - } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java index 0374ed6f34175..b80e8a5712aaa 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java @@ -6,9 +6,7 @@ package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.time.Instant; import java.time.ZoneId; @@ -18,306 +16,981 @@ import java.time.temporal.TemporalAccessor; import java.time.temporal.TemporalQueries; import java.util.Arrays; +import java.util.BitSet; +import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.Map; +import java.util.regex.Pattern; public class TimestampFormatFinderTests extends FileStructureTestCase { - public void testFindFirstMatchGivenNoMatch() { - - assertNull(TimestampFormatFinder.findFirstMatch("", NOOP_TIMEOUT_CHECKER)); - assertNull(TimestampFormatFinder.findFirstMatch("no timestamps in here", NOOP_TIMEOUT_CHECKER)); - assertNull(TimestampFormatFinder.findFirstMatch(":::", NOOP_TIMEOUT_CHECKER)); - assertNull(TimestampFormatFinder.findFirstMatch("/+", NOOP_TIMEOUT_CHECKER)); - } - - public void testFindFirstMatchGivenOnlyIso8601() { - - validateTimestampMatch(new TimestampMatch(7, "", "ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSSXX", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), "2018-05-15T16:14:56,374Z", - 1526400896374L); - validateTimestampMatch(new TimestampMatch(7, "", "ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSSXX", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), "2018-05-15T17:14:56,374+0100", - 1526400896374L); - validateTimestampMatch(new TimestampMatch(8, "", "ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSSXXX", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), "2018-05-15T17:14:56,374+01:00", - 1526400896374L); - validateTimestampMatch(new TimestampMatch(9, "", "ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSS", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), "2018-05-15T17:14:56,374", 1526400896374L); - - TimestampMatch pureIso8601Expected = new TimestampMatch(10, "", "ISO8601", "ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", "TIMESTAMP_ISO8601", ""); - - validateTimestampMatch(pureIso8601Expected, "2018-05-15T16:14:56Z", 1526400896000L); - validateTimestampMatch(pureIso8601Expected, "2018-05-15T17:14:56+0100", 1526400896000L); - validateTimestampMatch(pureIso8601Expected, "2018-05-15T17:14:56+01:00", 1526400896000L); - validateTimestampMatch(pureIso8601Expected, "2018-05-15T17:14:56", 1526400896000L); - - validateTimestampMatch(new TimestampMatch(1, "", "YYYY-MM-dd HH:mm:ss,SSSZ", "yyyy-MM-dd HH:mm:ss,SSSXX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), "2018-05-15 16:14:56,374Z", - 1526400896374L); - validateTimestampMatch(new TimestampMatch(1, "", "YYYY-MM-dd HH:mm:ss,SSSZ", "yyyy-MM-dd HH:mm:ss,SSSXX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), "2018-05-15 17:14:56,374+0100", - 1526400896374L); - validateTimestampMatch(new TimestampMatch(2, "", "YYYY-MM-dd HH:mm:ss,SSSZZ", "yyyy-MM-dd HH:mm:ss,SSSXXX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), "2018-05-15 17:14:56,374+01:00", - 1526400896374L); - validateTimestampMatch(new TimestampMatch(3, "", "YYYY-MM-dd HH:mm:ss,SSS", "yyyy-MM-dd HH:mm:ss,SSS", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), "2018-05-15 17:14:56,374", 1526400896374L); - validateTimestampMatch(new TimestampMatch(4, "", "YYYY-MM-dd HH:mm:ssZ", "yyyy-MM-dd HH:mm:ssXX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", "TIMESTAMP_ISO8601", ""), "2018-05-15 16:14:56Z", 1526400896000L); - validateTimestampMatch(new TimestampMatch(4, "", "YYYY-MM-dd HH:mm:ssZ", "yyyy-MM-dd HH:mm:ssXX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", "TIMESTAMP_ISO8601", ""), "2018-05-15 17:14:56+0100", 1526400896000L); - validateTimestampMatch(new TimestampMatch(5, "", "YYYY-MM-dd HH:mm:ssZZ", "yyyy-MM-dd HH:mm:ssXXX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", "TIMESTAMP_ISO8601", ""), "2018-05-15 17:14:56+01:00", 1526400896000L); - validateTimestampMatch(new TimestampMatch(6, "", "YYYY-MM-dd HH:mm:ss", "yyyy-MM-dd HH:mm:ss", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", "TIMESTAMP_ISO8601", ""), "2018-05-15 17:14:56", 1526400896000L); - } - - public void testFindFirstMatchGivenOnlyKnownTimestampFormat() { + private static final String EXCEPTION_TRACE_SAMPLE = + "[2018-02-28T14:49:40,517][DEBUG][o.e.a.b.TransportShardBulkAction] [an_index][2] failed to execute bulk item " + + "(index) BulkShardRequest [[an_index][2]] containing [33] requests\n" + + "java.lang.IllegalArgumentException: Document contains at least one immense term in field=\"message.keyword\" (whose UTF8 " + + "encoding is longer than the max length 32766), all of which were skipped. Please correct the analyzer to not produce " + + "such terms. The prefix of the first immense term is: '[60, 83, 79, 65, 80, 45, 69, 78, 86, 58, 69, 110, 118, 101, 108, " + + "111, 112, 101, 32, 120, 109, 108, 110, 115, 58, 83, 79, 65, 80, 45]...', original message: bytes can be at most 32766 " + + "in length; got 49023\n" + + "\tat org.apache.lucene.index.DefaultIndexingChain$PerField.invert(DefaultIndexingChain.java:796) " + + "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + + "\tat org.apache.lucene.index.DefaultIndexingChain.processField(DefaultIndexingChain.java:430) " + + "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + + "\tat org.apache.lucene.index.DefaultIndexingChain.processDocument(DefaultIndexingChain.java:392) " + + "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + + "\tat org.apache.lucene.index.DocumentsWriterPerThread.updateDocument(DocumentsWriterPerThread.java:240) " + + "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + + "\tat org.apache.lucene.index.DocumentsWriter.updateDocument(DocumentsWriter.java:496) " + + "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + + "\tat org.apache.lucene.index.IndexWriter.updateDocument(IndexWriter.java:1729) " + + "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + + "\tat org.apache.lucene.index.IndexWriter.addDocument(IndexWriter.java:1464) " + + "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + + "\tat org.elasticsearch.index.engine.InternalEngine.index(InternalEngine.java:1070) ~[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.index.engine.InternalEngine.indexIntoLucene(InternalEngine.java:1012) " + + "~[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.index.engine.InternalEngine.index(InternalEngine.java:878) ~[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.index.shard.IndexShard.index(IndexShard.java:738) ~[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.index.shard.IndexShard.applyIndexOperation(IndexShard.java:707) ~[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.index.shard.IndexShard.applyIndexOperationOnPrimary(IndexShard.java:673) " + + "~[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.executeIndexRequestOnPrimary(TransportShardBulkAction.java:548) " + + "~[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.executeIndexRequest(TransportShardBulkAction.java:140) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.executeBulkItemRequest(TransportShardBulkAction.java:236) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.performOnPrimary(TransportShardBulkAction.java:123) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.shardOperationOnPrimary(TransportShardBulkAction.java:110) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.shardOperationOnPrimary(TransportShardBulkAction.java:72) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryShardReference.perform" + + "(TransportReplicationAction.java:1034) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryShardReference.perform" + + "(TransportReplicationAction.java:1012) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.ReplicationOperation.execute(ReplicationOperation.java:103) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.onResponse" + + "(TransportReplicationAction.java:359) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.onResponse" + + "(TransportReplicationAction.java:299) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$1.onResponse" + + "(TransportReplicationAction.java:975) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$1.onResponse" + + "(TransportReplicationAction.java:972) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.index.shard.IndexShardOperationPermits.acquire(IndexShardOperationPermits.java:238) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.index.shard.IndexShard.acquirePrimaryOperationPermit(IndexShard.java:2220) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction.acquirePrimaryShardReference" + + "(TransportReplicationAction.java:984) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction.access$500(TransportReplicationAction.java:98) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.doRun" + + "(TransportReplicationAction.java:320) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryOperationTransportHandler" + + ".messageReceived(TransportReplicationAction.java:295) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryOperationTransportHandler" + + ".messageReceived(TransportReplicationAction.java:282) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:66) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.transport.TransportService$7.doRun(TransportService.java:656) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:635) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) [?:1.8.0_144]\n" + + "\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) [?:1.8.0_144]\n" + + "\tat java.lang.Thread.run(Thread.java:748) [?:1.8.0_144]\n"; + + public void testValidOverrideFormatToGrokAndRegex() { + + assertEquals(new Tuple<>("%{YEAR}-%{MONTHNUM2}-%{MONTHDAY}T%{HOUR}:%{MINUTE}:%{SECOND}%{ISO8601_TIMEZONE}", + "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}(?:Z|[+-]\\d{4})\\b"), + TimestampFormatFinder.overrideFormatToGrokAndRegex("yyyy-MM-dd'T'HH:mm:ss,SSSXX")); + assertEquals(new Tuple<>("%{MONTHDAY}\\.%{MONTHNUM2}\\.%{YEAR} %{HOUR}:%{MINUTE} (?:AM|PM)", + "\\b\\d{2}\\.\\d{2}\\.\\d{2} \\d{1,2}:\\d{2} [AP]M\\b"), + TimestampFormatFinder.overrideFormatToGrokAndRegex("dd.MM.yy h:mm a")); + assertEquals(new Tuple<>("%{MONTHNUM2}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ}", + "\\b\\d{2}/\\d{2}/\\d{4} \\d{1,2}:\\d{2}:\\d{2} [A-Z]{3}\\b"), + TimestampFormatFinder.overrideFormatToGrokAndRegex("MM/dd/yyyy H:mm:ss zzz")); + } + + public void testInvalidOverrideFormatToGrokAndRegex() { + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> TimestampFormatFinder.overrideFormatToGrokAndRegex("MM/dd/yyyy\nH:mm:ss zzz")); + assertEquals("Multi-line timestamp formats [MM/dd/yyyy\nH:mm:ss zzz] not supported", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, + () -> TimestampFormatFinder.overrideFormatToGrokAndRegex("MM/dd/YYYY H:mm:ss zzz")); + assertEquals("Letter group [YYYY] in [MM/dd/YYYY H:mm:ss zzz] is not supported", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, + () -> TimestampFormatFinder.overrideFormatToGrokAndRegex("MM/dd/yyy H:mm:ss zzz")); + assertEquals("Letter group [yyy] in [MM/dd/yyy H:mm:ss zzz] is not supported", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, + () -> TimestampFormatFinder.overrideFormatToGrokAndRegex("MM/dd/yyyy H:mm:ss+SSSSSS")); + assertEquals("Letter group [SSSSSS] in [MM/dd/yyyy H:mm:ss+SSSSSS] is not supported" + + " because it is not preceeded by [ss] and a separator from [:.,]", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, + () -> TimestampFormatFinder.overrideFormatToGrokAndRegex("MM/dd/yyyy H:mm,SSSSSS")); + assertEquals("Letter group [SSSSSS] in [MM/dd/yyyy H:mm,SSSSSS] is not supported" + + " because it is not preceeded by [ss] and a separator from [:.,]", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, + () -> TimestampFormatFinder.overrideFormatToGrokAndRegex(" 'T' ")); + assertEquals("No time format letter groups in override format [ 'T' ]", e.getMessage()); + } + + public void testMakeCandidateFromOverrideFormat() { + + // Override is a special format + assertSame(TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT, + TimestampFormatFinder.makeCandidateFromOverrideFormat("ISO8601", NOOP_TIMEOUT_CHECKER)); + assertSame(TimestampFormatFinder.UNIX_MS_CANDIDATE_FORMAT, + TimestampFormatFinder.makeCandidateFromOverrideFormat("UNIX_MS", NOOP_TIMEOUT_CHECKER)); + assertSame(TimestampFormatFinder.UNIX_CANDIDATE_FORMAT, + TimestampFormatFinder.makeCandidateFromOverrideFormat("UNIX", NOOP_TIMEOUT_CHECKER)); + assertSame(TimestampFormatFinder.TAI64N_CANDIDATE_FORMAT, + TimestampFormatFinder.makeCandidateFromOverrideFormat("TAI64N", NOOP_TIMEOUT_CHECKER)); + + // Override is covered by a built-in format + TimestampFormatFinder.CandidateTimestampFormat candidate = + TimestampFormatFinder.makeCandidateFromOverrideFormat("yyyy-MM-dd'T'HH:mm:ss.SSS", NOOP_TIMEOUT_CHECKER); + assertEquals(TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT.outputGrokPatternName, candidate.outputGrokPatternName); + assertEquals(TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT.strictGrokPattern, candidate.strictGrokPattern); + // Can't compare Grok objects as Grok doesn't implement equals() + assertEquals(TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT.simplePattern.pattern(), candidate.simplePattern.pattern()); + // Exact format supplied is returned if it matches + assertEquals(Collections.singletonList("yyyy-MM-dd'T'HH:mm:ss.SSS"), + candidate.javaTimestampFormatSupplier.apply("2018-05-15T16:14:56.374")); + // Other supported formats are returned if exact format doesn't match + assertEquals(Collections.singletonList("ISO8601"), candidate.javaTimestampFormatSupplier.apply("2018-05-15T16:14:56,374")); + + // Override is supported but not covered by any built-in format + candidate = + TimestampFormatFinder.makeCandidateFromOverrideFormat("MM/dd/yyyy H:mm:ss zzz", NOOP_TIMEOUT_CHECKER); + assertEquals(TimestampFormatFinder.CUSTOM_TIMESTAMP_GROK_NAME, candidate.outputGrokPatternName); + assertEquals("%{MONTHNUM2}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ}", candidate.strictGrokPattern); + assertEquals("\\b\\d{2}/\\d{2}/\\d{4} \\d{1,2}:\\d{2}:\\d{2} [A-Z]{3}\\b", candidate.simplePattern.pattern()); + assertEquals(Collections.singletonList("MM/dd/yyyy H:mm:ss zzz"), + candidate.javaTimestampFormatSupplier.apply("05/15/2018 16:14:56 UTC")); + + candidate = + TimestampFormatFinder.makeCandidateFromOverrideFormat("M/d/yyyy H:mm:ss zzz", NOOP_TIMEOUT_CHECKER); + assertEquals(TimestampFormatFinder.CUSTOM_TIMESTAMP_GROK_NAME, candidate.outputGrokPatternName); + assertEquals("%{MONTHNUM}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ}", candidate.strictGrokPattern); + assertEquals("\\b\\d{1,2}/\\d{1,2}/\\d{4} \\d{1,2}:\\d{2}:\\d{2} [A-Z]{3}\\b", candidate.simplePattern.pattern()); + assertEquals(Collections.singletonList("M/d/yyyy H:mm:ss zzz"), + candidate.javaTimestampFormatSupplier.apply("5/15/2018 16:14:56 UTC")); + } + + public void testRequiresTimezoneDependentParsing() { + + assertTrue(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("ISO8601", "2018-05-15T17:14:56")); + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("ISO8601", "2018-05-15T17:14:56Z")); + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("ISO8601", "2018-05-15T17:14:56-0100")); + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("ISO8601", "2018-05-15T17:14:56+01:00")); + + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("UNIX_MS", "1526400896374")); + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("UNIX", "1526400896")); + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("TAI64N", "400000005afb078a164ac980")); + + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("EEE, dd MMM yyyy HH:mm:ss XXX", + "Tue, 15 May 2018 17:14:56 +01:00")); + assertTrue(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("yyyyMMddHHmmss", "20180515171456")); + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("EEE MMM dd yy HH:mm:ss zzz", + "Tue May 15 18 16:14:56 UTC")); + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("yyyy-MM-dd HH:mm:ss,SSS XX", + "2018-05-15 17:14:56,374 +0100")); + assertTrue(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("MMM dd HH:mm:ss.SSS", "May 15 17:14:56.725")); + + assertTrue(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("yyyy.MM.dd'zXz'HH:mm:ss", + "2018.05.15zXz17:14:56")); + assertTrue(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("yyyy.MM.dd HH:mm:ss'z'", + "2018.05.15 17:14:56z")); + assertTrue(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("'XX'yyyy.MM.dd HH:mm:ss", + "XX2018.05.15 17:14:56")); + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("'XX'yyyy.MM.dd HH:mm:ssXX", + "XX2018.05.15 17:14:56Z")); + } + + public void testParseIndeterminateDateNumbers() { + + // Simplest case - nothing is indeterminate + int[] indeterminateDateNumbers = + TimestampFormatFinder.TimestampMatch.parseIndeterminateDateNumbers("2018-05-15T16:14:56,374Z", + Collections.singletonList("yyyy-MM-dd'T'HH:mm:ss,SSSXX")); + assertEquals(2, indeterminateDateNumbers.length); + assertEquals(-1, indeterminateDateNumbers[0]); + assertEquals(-1, indeterminateDateNumbers[1]); + + // US with padding + indeterminateDateNumbers = + TimestampFormatFinder.TimestampMatch.parseIndeterminateDateNumbers("05/15/2018 16:14:56", + Collections.singletonList("??/??/yyyy HH:mm:ss")); + assertEquals(2, indeterminateDateNumbers.length); + assertEquals(5, indeterminateDateNumbers[0]); + assertEquals(15, indeterminateDateNumbers[1]); + + // US without padding + indeterminateDateNumbers = + TimestampFormatFinder.TimestampMatch.parseIndeterminateDateNumbers("5/15/2018 16:14:56", + Collections.singletonList("?/?/yyyy HH:mm:ss")); + assertEquals(2, indeterminateDateNumbers.length); + assertEquals(5, indeterminateDateNumbers[0]); + assertEquals(15, indeterminateDateNumbers[1]); + + // EU with padding + indeterminateDateNumbers = + TimestampFormatFinder.TimestampMatch.parseIndeterminateDateNumbers("15/05/2018 16:14:56", + Collections.singletonList("??/??/yyyy HH:mm:ss")); + assertEquals(2, indeterminateDateNumbers.length); + assertEquals(15, indeterminateDateNumbers[0]); + assertEquals(5, indeterminateDateNumbers[1]); + + // EU without padding + indeterminateDateNumbers = + TimestampFormatFinder.TimestampMatch.parseIndeterminateDateNumbers("15/5/2018 16:14:56", + Collections.singletonList("?/?/yyyy HH:mm:ss")); + assertEquals(2, indeterminateDateNumbers.length); + assertEquals(15, indeterminateDateNumbers[0]); + assertEquals(5, indeterminateDateNumbers[1]); + } + + public void testDeterminiseJavaTimestampFormat() { + + // Indeterminate at the beginning of the pattern + assertEquals("dd/MM/yyyy HH:mm:ss", TimestampFormatFinder.determiniseJavaTimestampFormat("??/??/yyyy HH:mm:ss", true)); + assertEquals("MM/dd/yyyy HH:mm:ss", TimestampFormatFinder.determiniseJavaTimestampFormat("??/??/yyyy HH:mm:ss", false)); + assertEquals("d/M/yyyy HH:mm:ss", TimestampFormatFinder.determiniseJavaTimestampFormat("?/?/yyyy HH:mm:ss", true)); + assertEquals("M/d/yyyy HH:mm:ss", TimestampFormatFinder.determiniseJavaTimestampFormat("?/?/yyyy HH:mm:ss", false)); + // Indeterminate in the middle of the pattern + assertEquals("HH:mm:ss dd/MM/yyyy", TimestampFormatFinder.determiniseJavaTimestampFormat("HH:mm:ss ??/??/yyyy", true)); + assertEquals("HH:mm:ss MM/dd/yyyy", TimestampFormatFinder.determiniseJavaTimestampFormat("HH:mm:ss ??/??/yyyy", false)); + assertEquals("HH:mm:ss d/M/yyyy", TimestampFormatFinder.determiniseJavaTimestampFormat("HH:mm:ss ?/?/yyyy", true)); + assertEquals("HH:mm:ss M/d/yyyy", TimestampFormatFinder.determiniseJavaTimestampFormat("HH:mm:ss ?/?/yyyy", false)); + // No separators + assertEquals("ddMMyyyyHHmmss", TimestampFormatFinder.determiniseJavaTimestampFormat("????yyyyHHmmss", true)); + assertEquals("MMddyyyyHHmmss", TimestampFormatFinder.determiniseJavaTimestampFormat("????yyyyHHmmss", false)); + // It's unreasonable to expect a variable length format like 'd' or 'M' to work without separators + } + + public void testGuessIsDayFirstFromFormats() { + + TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/5/2018 16:14:56"); + timestampFormatFinder.addSample("06/6/2018 17:14:56"); + timestampFormatFinder.addSample("07/7/2018 18:14:56"); + + // This is based on the fact that %{MONTHNUM} can match a single digit whereas %{MONTHDAY} cannot + assertTrue(timestampFormatFinder.guessIsDayFirstFromFormats(timestampFormatFinder.getRawJavaTimestampFormats())); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("5/05/2018 16:14:56"); + timestampFormatFinder.addSample("6/06/2018 17:14:56"); + timestampFormatFinder.addSample("7/07/2018 18:14:56"); + + // This is based on the fact that %{MONTHNUM} can match a single digit whereas %{MONTHDAY} cannot + assertFalse(timestampFormatFinder.guessIsDayFirstFromFormats(timestampFormatFinder.getRawJavaTimestampFormats())); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("5/05/2018 16:14:56"); + timestampFormatFinder.addSample("06/6/2018 17:14:56"); + timestampFormatFinder.addSample("7/07/2018 18:14:56"); + + // Inconsistent so no decision + assertNull(timestampFormatFinder.guessIsDayFirstFromFormats(timestampFormatFinder.getRawJavaTimestampFormats())); + } + + public void testGuessIsDayFirstFromMatchesSingleFormat() { + + TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("05/15/2018 17:14:56"); + timestampFormatFinder.addSample("05/25/2018 18:14:56"); + + assertFalse(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + assertFalse(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("15/05/2018 17:14:56"); + timestampFormatFinder.addSample("25/05/2018 18:14:56"); + + assertTrue(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + assertTrue(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("05/06/2018 17:14:56"); + timestampFormatFinder.addSample("05/07/2018 18:14:56"); + + // Second number has 3 values, first only 1, so guess second is day + assertFalse(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + assertFalse(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("06/05/2018 17:14:56"); + timestampFormatFinder.addSample("07/05/2018 18:14:56"); + + // First number has 3 values, second only 1, so guess first is day + assertTrue(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + assertTrue(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("06/06/2018 17:14:56"); + timestampFormatFinder.addSample("07/07/2018 18:14:56"); + + // Insufficient evidence to decide + assertNull(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + assertNull(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + } + + public void testGuessIsDayFirstFromMatchesMultipleFormats() { + + // Similar to the test above, but with the possibility that the secondary + // ISO8601 formats cause confusion - this test proves that they don't + + TimestampFormatFinder.TimestampFormat expectedPrimaryFormat = + new TimestampFormatFinder.TimestampFormat(Collections.singletonList("??/??/yyyy HH:mm:ss"), + Pattern.compile("\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b"), "DATESTAMP", Collections.emptyMap(), ""); + + TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, false, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("2018-05-15T17:14:56"); + timestampFormatFinder.addSample("05/15/2018 17:14:56"); + timestampFormatFinder.addSample("2018-05-25T18:14:56"); + timestampFormatFinder.addSample("05/25/2018 18:14:56"); + + assertFalse(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + assertFalse(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, false, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("2018-05-15T17:14:56"); + timestampFormatFinder.addSample("15/05/2018 17:14:56"); + timestampFormatFinder.addSample("2018-05-25T18:14:56"); + timestampFormatFinder.addSample("25/05/2018 18:14:56"); + + assertTrue(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + assertTrue(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, false, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("2018-05-06T17:14:56"); + timestampFormatFinder.addSample("05/06/2018 17:14:56"); + timestampFormatFinder.addSample("2018-05-07T18:14:56"); + timestampFormatFinder.addSample("05/07/2018 18:14:56"); + + // Second number has 3 values, first only 1, so guess second is day + assertFalse(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + assertFalse(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, false, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("2018-05-06T17:14:56"); + timestampFormatFinder.addSample("06/05/2018 17:14:56"); + timestampFormatFinder.addSample("2018-05-07T18:14:56"); + timestampFormatFinder.addSample("07/05/2018 18:14:56"); + + // First number has 3 values, second only 1, so guess first is day + assertTrue(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + assertTrue(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, false, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("2018-06-06T17:14:56"); + timestampFormatFinder.addSample("06/06/2018 17:14:56"); + timestampFormatFinder.addSample("2018-07-07T18:14:56"); + timestampFormatFinder.addSample("07/07/2018 18:14:56"); + + // Insufficient evidence to decide + assertNull(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + assertNull(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + } + + public void testGuessIsDayFirstFromLocale() { + + TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + + // Locale fallback is the only way to decide + assertFalse(timestampFormatFinder.guessIsDayFirstFromLocale(Locale.US)); + assertTrue(timestampFormatFinder.guessIsDayFirstFromLocale(Locale.UK)); + assertTrue(timestampFormatFinder.guessIsDayFirstFromLocale(Locale.FRANCE)); + assertFalse(timestampFormatFinder.guessIsDayFirstFromLocale(Locale.JAPAN)); + } + + public void testStringToNumberPosBitSet() { + + BitSet bitSet = TimestampFormatFinder.stringToNumberPosBitSet(""); + assertTrue(bitSet.isEmpty()); + assertEquals(0, bitSet.length()); + + bitSet = TimestampFormatFinder.stringToNumberPosBitSet(" 1"); + assertEquals(2, bitSet.length()); + assertFalse(bitSet.get(0)); + assertTrue(bitSet.get(1)); + + bitSet = TimestampFormatFinder.stringToNumberPosBitSet("1 1 1"); + assertEquals(5, bitSet.length()); + assertTrue(bitSet.get(0)); + assertFalse(bitSet.get(1)); + assertTrue(bitSet.get(2)); + assertFalse(bitSet.get(3)); + assertTrue(bitSet.get(4)); + + bitSet = TimestampFormatFinder.stringToNumberPosBitSet("05/05/2018 16:14:56"); + assertEquals(19, bitSet.length()); + assertTrue(bitSet.get(0)); + assertTrue(bitSet.get(1)); + assertFalse(bitSet.get(2)); + assertTrue(bitSet.get(3)); + assertTrue(bitSet.get(4)); + assertFalse(bitSet.get(5)); + assertTrue(bitSet.get(6)); + assertTrue(bitSet.get(7)); + assertTrue(bitSet.get(8)); + assertTrue(bitSet.get(9)); + assertFalse(bitSet.get(10)); + assertTrue(bitSet.get(11)); + assertTrue(bitSet.get(12)); + assertFalse(bitSet.get(13)); + assertTrue(bitSet.get(14)); + assertTrue(bitSet.get(15)); + assertFalse(bitSet.get(16)); + assertTrue(bitSet.get(17)); + assertTrue(bitSet.get(18)); + } + + public void testFindBitPattern() { + + BitSet findIn = TimestampFormatFinder.stringToNumberPosBitSet(""); + BitSet toFind = TimestampFormatFinder.stringToNumberPosBitSet(""); + assertEquals(0, TimestampFormatFinder.findBitPattern(findIn, 0, toFind)); + + findIn = TimestampFormatFinder.stringToNumberPosBitSet("1 1 1"); + toFind = TimestampFormatFinder.stringToNumberPosBitSet(""); + assertEquals(0, TimestampFormatFinder.findBitPattern(findIn, 0, toFind)); + assertEquals(1, TimestampFormatFinder.findBitPattern(findIn, 1, toFind)); + assertEquals(2, TimestampFormatFinder.findBitPattern(findIn, 2, toFind)); + + findIn = TimestampFormatFinder.stringToNumberPosBitSet("1 1 1"); + toFind = TimestampFormatFinder.stringToNumberPosBitSet("1"); + assertEquals(0, TimestampFormatFinder.findBitPattern(findIn, 0, toFind)); + assertEquals(2, TimestampFormatFinder.findBitPattern(findIn, 1, toFind)); + assertEquals(2, TimestampFormatFinder.findBitPattern(findIn, 2, toFind)); + + findIn = TimestampFormatFinder.stringToNumberPosBitSet("1 1 1"); + toFind = TimestampFormatFinder.stringToNumberPosBitSet(" 1"); + assertEquals(1, TimestampFormatFinder.findBitPattern(findIn, 0, toFind)); + assertEquals(1, TimestampFormatFinder.findBitPattern(findIn, 1, toFind)); + assertEquals(3, TimestampFormatFinder.findBitPattern(findIn, 2, toFind)); + + findIn = TimestampFormatFinder.stringToNumberPosBitSet("1 1 1"); + toFind = TimestampFormatFinder.stringToNumberPosBitSet("1 1"); + assertEquals(0, TimestampFormatFinder.findBitPattern(findIn, 0, toFind)); + assertEquals(2, TimestampFormatFinder.findBitPattern(findIn, 1, toFind)); + assertEquals(2, TimestampFormatFinder.findBitPattern(findIn, 2, toFind)); + assertEquals(-1, TimestampFormatFinder.findBitPattern(findIn, 3, toFind)); + + findIn = TimestampFormatFinder.stringToNumberPosBitSet("1 11 1 1"); + toFind = TimestampFormatFinder.stringToNumberPosBitSet("11 1"); + assertEquals(2, TimestampFormatFinder.findBitPattern(findIn, 0, toFind)); + assertEquals(2, TimestampFormatFinder.findBitPattern(findIn, 1, toFind)); + assertEquals(2, TimestampFormatFinder.findBitPattern(findIn, 2, toFind)); + assertEquals(-1, TimestampFormatFinder.findBitPattern(findIn, 3, toFind)); + + findIn = TimestampFormatFinder.stringToNumberPosBitSet("1 11 1 1"); + toFind = TimestampFormatFinder.stringToNumberPosBitSet(" 11 1"); + assertEquals(1, TimestampFormatFinder.findBitPattern(findIn, 0, toFind)); + assertEquals(1, TimestampFormatFinder.findBitPattern(findIn, 1, toFind)); + assertEquals(-1, TimestampFormatFinder.findBitPattern(findIn, 2, toFind)); + + findIn = TimestampFormatFinder.stringToNumberPosBitSet("1 11 1 1"); + toFind = TimestampFormatFinder.stringToNumberPosBitSet(" 1 1"); + assertEquals(4, TimestampFormatFinder.findBitPattern(findIn, 0, toFind)); + assertEquals(4, TimestampFormatFinder.findBitPattern(findIn, 4, toFind)); + assertEquals(-1, TimestampFormatFinder.findBitPattern(findIn, 5, toFind)); + } + + public void testFindBoundsForCandidate() { + + final TimestampFormatFinder.CandidateTimestampFormat httpdCandidateFormat = TimestampFormatFinder.ORDERED_CANDIDATE_FORMATS + .stream().filter(candidate -> candidate.outputGrokPatternName.equals("HTTPDATE")).findAny().get(); + + BitSet numberPosBitSet = TimestampFormatFinder.stringToNumberPosBitSet("[2018-05-11T17:07:29,553][INFO ]" + + "[o.e.e.NodeEnvironment ] [node-0] heap size [3.9gb], compressed ordinary object pointers [true]"); + assertEquals(new Tuple<>(1, 36), + TimestampFormatFinder.findBoundsForCandidate(TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT, numberPosBitSet)); + assertEquals(new Tuple<>(-1, -1), TimestampFormatFinder.findBoundsForCandidate(httpdCandidateFormat, numberPosBitSet)); + // TAI64N doesn't necessarily contain digits, so this functionality cannot guarantee that it won't match somewhere in the text + assertEquals(new Tuple<>(0, Integer.MAX_VALUE), + TimestampFormatFinder.findBoundsForCandidate(TimestampFormatFinder.TAI64N_CANDIDATE_FORMAT, numberPosBitSet)); + + numberPosBitSet = TimestampFormatFinder.stringToNumberPosBitSet("192.168.62.101 - - [29/Jun/2016:12:11:31 +0000] " + + "\"POST //apiserv:8080/engine/v2/jobs HTTP/1.1\" 201 42 \"-\" \"curl/7.46.0\" 384"); + assertEquals(new Tuple<>(-1, -1), + TimestampFormatFinder.findBoundsForCandidate(TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT, numberPosBitSet)); + assertEquals(new Tuple<>(20, 46), TimestampFormatFinder.findBoundsForCandidate(httpdCandidateFormat, numberPosBitSet)); + assertEquals(new Tuple<>(0, Integer.MAX_VALUE), + TimestampFormatFinder.findBoundsForCandidate(TimestampFormatFinder.TAI64N_CANDIDATE_FORMAT, numberPosBitSet)); + } + + public void testFindFormatGivenNoMatch() { + + validateNoTimestampMatch(""); + validateNoTimestampMatch("no timestamps in here"); + validateNoTimestampMatch(":::"); + validateNoTimestampMatch("/+"); + } + + public void testFindFormatGivenOnlyIso8601() { + + validateTimestampMatch("2018-05-15T16:14:56,374Z", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400896374L); + validateTimestampMatch("2018-05-15T17:14:56,374+0100", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400896374L); + validateTimestampMatch("2018-05-15T17:14:56,374+01:00", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400896374L); + validateTimestampMatch("2018-05-15T17:14:56,374", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400896374L); + + validateTimestampMatch("2018-05-15T16:14:56Z", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400896000L); + validateTimestampMatch("2018-05-15T17:14:56+0100", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400896000L); + validateTimestampMatch("2018-05-15T17:14:56+01:00", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400896000L); + validateTimestampMatch("2018-05-15T17:14:56", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400896000L); + + validateTimestampMatch("2018-05-15T16:14Z", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400840000L); + validateTimestampMatch("2018-05-15T17:14+0100", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400840000L); + validateTimestampMatch("2018-05-15T17:14+01:00", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400840000L); + validateTimestampMatch("2018-05-15T17:14", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400840000L); + + // TIMESTAMP_ISO8601 doesn't match ISO8601 if it's only a date with no time + validateTimestampMatch("2018-05-15", "CUSTOM_TIMESTAMP", "\\b\\d{4}-\\d{2}-\\d{2}\\b", "ISO8601", 1526338800000L); + + validateTimestampMatch("2018-05-15 16:14:56,374Z", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ss,SSSXX", 1526400896374L); + validateTimestampMatch("2018-05-15 17:14:56,374+0100", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ss,SSSXX", 1526400896374L); + validateTimestampMatch("2018-05-15 17:14:56,374+01:00", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ss,SSSXXX", 1526400896374L); + validateTimestampMatch("2018-05-15 17:14:56,374", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ss,SSS", 1526400896374L); + + validateTimestampMatch("2018-05-15 16:14:56Z", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ssXX", 1526400896000L); + validateTimestampMatch("2018-05-15 17:14:56+0100", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ssXX", 1526400896000L); + validateTimestampMatch("2018-05-15 17:14:56+01:00", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ssXXX", 1526400896000L); + validateTimestampMatch("2018-05-15 17:14:56", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ss", 1526400896000L); + + validateTimestampMatch("2018-05-15 16:14Z", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mmXX", 1526400840000L); + validateTimestampMatch("2018-05-15 17:14+0100", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mmXX", 1526400840000L); + validateTimestampMatch("2018-05-15 17:14+01:00", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mmXXX", 1526400840000L); + validateTimestampMatch("2018-05-15 17:14", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm", 1526400840000L); + } + + public void testFindFormatGivenOnlyKnownTimestampFormat() { // Note: some of the time formats give millisecond accuracy, some second accuracy and some minute accuracy - validateTimestampMatch(new TimestampMatch(0, "", "YYYY-MM-dd HH:mm:ss,SSS Z", "yyyy-MM-dd HH:mm:ss,SSS XX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}", "TOMCAT_DATESTAMP", ""), "2018-05-15 17:14:56,374 +0100", - 1526400896374L); - - validateTimestampMatch(new TimestampMatch(11, "", "EEE MMM dd YYYY HH:mm:ss zzz", "EEE MMM dd yyyy HH:mm:ss zzz", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{4} \\d{2}:\\d{2}:\\d{2} ", "DATESTAMP_RFC822", ""), - "Tue May 15 2018 16:14:56 UTC", 1526400896000L); - validateTimestampMatch(new TimestampMatch(12, "", "EEE MMM dd YYYY HH:mm zzz", "EEE MMM dd yyyy HH:mm zzz", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{4} \\d{2}:\\d{2} ", "DATESTAMP_RFC822", ""), - "Tue May 15 2018 16:14 UTC", 1526400840000L); - - validateTimestampMatch(new TimestampMatch(13, "", "EEE, dd MMM YYYY HH:mm:ss ZZ", "EEE, dd MMM yyyy HH:mm:ss XXX", - "\\b[A-Z]\\S{2,8}, \\d{1,2} [A-Z]\\S{2,8} \\d{4} \\d{2}:\\d{2}:\\d{2} ", "DATESTAMP_RFC2822", ""), - "Tue, 15 May 2018 17:14:56 +01:00", 1526400896000L); - validateTimestampMatch(new TimestampMatch(14, "", "EEE, dd MMM YYYY HH:mm:ss Z", "EEE, dd MMM yyyy HH:mm:ss XX", - "\\b[A-Z]\\S{2,8}, \\d{1,2} [A-Z]\\S{2,8} \\d{4} \\d{2}:\\d{2}:\\d{2} ", "DATESTAMP_RFC2822", ""), - "Tue, 15 May 2018 17:14:56 +0100", 1526400896000L); - validateTimestampMatch(new TimestampMatch(15, "", "EEE, dd MMM YYYY HH:mm ZZ", "EEE, dd MMM yyyy HH:mm XXX", - "\\b[A-Z]\\S{2,8}, \\d{1,2} [A-Z]\\S{2,8} \\d{4} \\d{2}:\\d{2} ", "DATESTAMP_RFC2822", ""), - "Tue, 15 May 2018 17:14 +01:00", 1526400840000L); - validateTimestampMatch(new TimestampMatch(16, "", "EEE, dd MMM YYYY HH:mm Z", "EEE, dd MMM yyyy HH:mm XX", - "\\b[A-Z]\\S{2,8}, \\d{1,2} [A-Z]\\S{2,8} \\d{4} \\d{2}:\\d{2} ", "DATESTAMP_RFC2822", ""), "Tue, 15 May 2018 17:14 +0100", - 1526400840000L); - - validateTimestampMatch(new TimestampMatch(17, "", "EEE MMM dd HH:mm:ss zzz YYYY", "EEE MMM dd HH:mm:ss zzz yyyy", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{2}:\\d{2}:\\d{2} [A-Z]{3,4} \\d{4}\\b", "DATESTAMP_OTHER", ""), - "Tue May 15 16:14:56 UTC 2018", 1526400896000L); - validateTimestampMatch(new TimestampMatch(18, "", "EEE MMM dd HH:mm zzz YYYY", "EEE MMM dd HH:mm zzz yyyy", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{2}:\\d{2} [A-Z]{3,4} \\d{4}\\b", "DATESTAMP_OTHER", ""), - "Tue May 15 16:14 UTC 2018", 1526400840000L); - - validateTimestampMatch(new TimestampMatch(19, "", "YYYYMMddHHmmss", "yyyyMMddHHmmss", "\\b\\d{14}\\b", - "DATESTAMP_EVENTLOG", ""), - "20180515171456", 1526400896000L); - - validateTimestampMatch(new TimestampMatch(20, "", "EEE MMM dd HH:mm:ss YYYY", "EEE MMM dd HH:mm:ss yyyy", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{2}:\\d{2}:\\d{2} \\d{4}\\b", "HTTPDERROR_DATE", ""), - "Tue May 15 17:14:56 2018", 1526400896000L); - - validateTimestampMatch(new TimestampMatch(21, "", Arrays.asList("MMM dd HH:mm:ss.SSS", "MMM d HH:mm:ss.SSS"), - Arrays.asList("MMM dd HH:mm:ss.SSS", "MMM d HH:mm:ss.SSS"), - "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\.\\d{3}", "SYSLOGTIMESTAMP", ""), "May 15 17:14:56.725", 1526400896725L); - validateTimestampMatch(new TimestampMatch(22, "", Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss"), - Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss"), - "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", "SYSLOGTIMESTAMP", ""), "May 15 17:14:56", 1526400896000L); - - validateTimestampMatch(new TimestampMatch(23, "", "dd/MMM/YYYY:HH:mm:ss Z", "dd/MMM/yyyy:HH:mm:ss XX", - "\\b\\d{2}/[A-Z]\\S{2}/\\d{4}:\\d{2}:\\d{2}:\\d{2} ", "HTTPDATE", ""), "15/May/2018:17:14:56 +0100", 1526400896000L); - - validateTimestampMatch(new TimestampMatch(24, "", "MMM dd, YYYY h:mm:ss a", "MMM dd, yyyy h:mm:ss a", - "\\b[A-Z]\\S{2,8} \\d{1,2}, \\d{4} \\d{1,2}:\\d{2}:\\d{2} [AP]M\\b", "CATALINA_DATESTAMP", ""), "May 15, 2018 5:14:56 PM", - 1526400896000L); - - validateTimestampMatch(new TimestampMatch(25, "", Arrays.asList("MMM dd YYYY HH:mm:ss", "MMM d YYYY HH:mm:ss"), - Arrays.asList("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss"), - "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", "CISCOTIMESTAMP", ""), "May 15 2018 17:14:56", - 1526400896000L); - } - - public void testFindFirstMatchGivenOnlySystemDate() { - - assertEquals(new TimestampMatch(26, "", "UNIX_MS", "UNIX_MS", "\\b\\d{13}\\b", "POSINT", ""), - TimestampFormatFinder.findFirstMatch("1526400896374", NOOP_TIMEOUT_CHECKER)); - assertEquals(new TimestampMatch(26, "", "UNIX_MS", "UNIX_MS", "\\b\\d{13}\\b", "POSINT", ""), - TimestampFormatFinder.findFirstFullMatch("1526400896374", NOOP_TIMEOUT_CHECKER)); - - assertEquals(new TimestampMatch(27, "", "UNIX", "UNIX", "\\b\\d{10}\\.\\d{3,9}\\b", "NUMBER", ""), - TimestampFormatFinder.findFirstMatch("1526400896.736", NOOP_TIMEOUT_CHECKER)); - assertEquals(new TimestampMatch(27, "", "UNIX", "UNIX", "\\b\\d{10}\\.\\d{3,9}\\b", "NUMBER", ""), - TimestampFormatFinder.findFirstFullMatch("1526400896.736", NOOP_TIMEOUT_CHECKER)); - assertEquals(new TimestampMatch(28, "", "UNIX", "UNIX", "\\b\\d{10}\\b", "POSINT", ""), - TimestampFormatFinder.findFirstMatch("1526400896", NOOP_TIMEOUT_CHECKER)); - assertEquals(new TimestampMatch(28, "", "UNIX", "UNIX", "\\b\\d{10}\\b", "POSINT", ""), - TimestampFormatFinder.findFirstFullMatch("1526400896", NOOP_TIMEOUT_CHECKER)); - - assertEquals(new TimestampMatch(29, "", "TAI64N", "TAI64N", "\\b[0-9A-Fa-f]{24}\\b", "BASE16NUM", ""), - TimestampFormatFinder.findFirstMatch("400000005afb159a164ac980", NOOP_TIMEOUT_CHECKER)); - assertEquals(new TimestampMatch(29, "", "TAI64N", "TAI64N", "\\b[0-9A-Fa-f]{24}\\b", "BASE16NUM", ""), - TimestampFormatFinder.findFirstFullMatch("400000005afb159a164ac980", NOOP_TIMEOUT_CHECKER)); - } - - public void testFindFirstMatchGivenRealLogMessages() { - - assertEquals(new TimestampMatch(9, "[", "ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSS", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", - "][INFO ][o.e.e.NodeEnvironment ] [node-0] heap size [3.9gb], compressed ordinary object pointers [true]"), - TimestampFormatFinder.findFirstMatch("[2018-05-11T17:07:29,553][INFO ][o.e.e.NodeEnvironment ] [node-0] " + - "heap size [3.9gb], compressed ordinary object pointers [true]", NOOP_TIMEOUT_CHECKER)); - - assertEquals(new TimestampMatch(23, "192.168.62.101 - - [", "dd/MMM/YYYY:HH:mm:ss Z", "dd/MMM/yyyy:HH:mm:ss XX", - "\\b\\d{2}/[A-Z]\\S{2}/\\d{4}:\\d{2}:\\d{2}:\\d{2} ", "HTTPDATE", - "] \"POST //apiserv:8080/engine/v2/jobs HTTP/1.1\" 201 42 \"-\" \"curl/7.46.0\" 384"), - TimestampFormatFinder.findFirstMatch("192.168.62.101 - - [29/Jun/2016:12:11:31 +0000] " + - "\"POST //apiserv:8080/engine/v2/jobs HTTP/1.1\" 201 42 \"-\" \"curl/7.46.0\" 384", NOOP_TIMEOUT_CHECKER)); - - assertEquals(new TimestampMatch(24, "", "MMM dd, YYYY h:mm:ss a", "MMM dd, yyyy h:mm:ss a", - "\\b[A-Z]\\S{2,8} \\d{1,2}, \\d{4} \\d{1,2}:\\d{2}:\\d{2} [AP]M\\b", "CATALINA_DATESTAMP", - " org.apache.tomcat.util.http.Parameters processParameters"), - TimestampFormatFinder.findFirstMatch("Aug 29, 2009 12:03:57 AM org.apache.tomcat.util.http.Parameters processParameters", - NOOP_TIMEOUT_CHECKER)); - - assertEquals(new TimestampMatch(22, "", Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss"), - Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss"), - "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", "SYSLOGTIMESTAMP", " esxi1.acme.com Vpxa: " + - "[3CB3FB90 verbose 'vpxavpxaInvtVm' opID=WFU-33d82c31] [VpxaInvtVmChangeListener] Guest DiskInfo Changed"), - TimestampFormatFinder.findFirstMatch("Oct 19 17:04:44 esxi1.acme.com Vpxa: [3CB3FB90 verbose 'vpxavpxaInvtVm' " + - "opID=WFU-33d82c31] [VpxaInvtVmChangeListener] Guest DiskInfo Changed", NOOP_TIMEOUT_CHECKER)); - - assertEquals(new TimestampMatch(10, "559550912540598297\t", "ISO8601", "ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", - "TIMESTAMP_ISO8601", - "\t2016-04-20T21:06:53Z\t38545844\tserv02nw07\t192.168.114.28\tAuthpriv\tInfo\tsshd\tsubsystem request for sftp"), - TimestampFormatFinder.findFirstMatch("559550912540598297\t2016-04-20T14:06:53\t2016-04-20T21:06:53Z\t38545844\tserv02nw07\t" + - "192.168.114.28\tAuthpriv\tInfo\tsshd\tsubsystem request for sftp", NOOP_TIMEOUT_CHECKER)); - - assertEquals(new TimestampMatch(22, "", Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss"), - Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss"), - "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", "SYSLOGTIMESTAMP", - " dnsserv named[22529]: error (unexpected RCODE REFUSED) resolving 'www.elastic.co/A/IN': 95.110.68.206#53"), - TimestampFormatFinder.findFirstMatch("Sep 8 11:55:35 dnsserv named[22529]: error (unexpected RCODE REFUSED) resolving " + - "'www.elastic.co/A/IN': 95.110.68.206#53", NOOP_TIMEOUT_CHECKER)); - - assertEquals(new TimestampMatch(3, "", "YYYY-MM-dd HH:mm:ss.SSSSSS", "yyyy-MM-dd HH:mm:ss.SSSSSS", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\\.\\d{3}", "TIMESTAMP_ISO8601", - "|INFO |VirtualServer |1 |client 'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client " + - "'User1'(id:2) in channel '3er Instanz'(id:2)"), - TimestampFormatFinder.findFirstMatch("2018-01-06 19:22:20.106822|INFO |VirtualServer |1 |client " + - " 'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client 'User1'(id:2) in channel '3er Instanz'(id:2)", - NOOP_TIMEOUT_CHECKER)); + validateTimestampMatch("2018-05-15 17:14:56,374 +0100", "TOMCAT_DATESTAMP", + "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}[:.,]\\d{3}", "yyyy-MM-dd HH:mm:ss,SSS XX", 1526400896374L); + + validateTimestampMatch("Tue May 15 18 16:14:56 UTC", "DATESTAMP_RFC822", + "\\b[A-Z]\\S{2} [A-Z]\\S{2} \\d{1,2} \\d{2} \\d{2}:\\d{2}:\\d{2}\\b", + Arrays.asList("EEE MMM dd yy HH:mm:ss zzz", "EEE MMM d yy HH:mm:ss zzz"), 1526400896000L); + + validateTimestampMatch("Tue, 15 May 2018 17:14:56 +01:00", "DATESTAMP_RFC2822", + "\\b[A-Z]\\S{2}, \\d{1,2} [A-Z]\\S{2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", "EEE, dd MMM yyyy HH:mm:ss XXX", 1526400896000L); + validateTimestampMatch("Tue, 15 May 2018 17:14:56 +0100", "DATESTAMP_RFC2822", + "\\b[A-Z]\\S{2}, \\d{1,2} [A-Z]\\S{2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", "EEE, dd MMM yyyy HH:mm:ss XX", 1526400896000L); + + validateTimestampMatch("Tue May 15 16:14:56 UTC 2018", "DATESTAMP_OTHER", + "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", + Arrays.asList("EEE MMM dd HH:mm:ss zzz yyyy", "EEE MMM d HH:mm:ss zzz yyyy"), 1526400896000L); + + validateTimestampMatch("20180515171456", "DATESTAMP_EVENTLOG", "\\b\\d{14}\\b", "yyyyMMddHHmmss", 1526400896000L); + + validateTimestampMatch("Tue May 15 17:14:56 2018", "HTTPDERROR_DATE", + "\\b[A-Z]\\S{2} [A-Z]\\S{2} \\d{2} \\d{2}:\\d{2}:\\d{2} \\d{4}\\b", "EEE MMM dd HH:mm:ss yyyy", 1526400896000L); + + validateTimestampMatch("May 15 17:14:56.725", "SYSLOGTIMESTAMP", "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", + Arrays.asList("MMM dd HH:mm:ss.SSS", "MMM d HH:mm:ss.SSS"), 1526400896725L); + validateTimestampMatch("May 15 17:14:56", "SYSLOGTIMESTAMP", "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", + Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss"), 1526400896000L); + + validateTimestampMatch("15/May/2018:17:14:56 +0100", "HTTPDATE", "\\b\\d{2}/[A-Z]\\S{2}/\\d{4}:\\d{2}:\\d{2}:\\d{2} ", + "dd/MMM/yyyy:HH:mm:ss XX", 1526400896000L); + + validateTimestampMatch("May 15, 2018 5:14:56 PM", "CATALINA_DATESTAMP", + "\\b[A-Z]\\S{2} \\d{2}, \\d{4} \\d{1,2}:\\d{2}:\\d{2} [AP]M\\b", "MMM dd, yyyy h:mm:ss a", 1526400896000L); + + validateTimestampMatch("May 15 2018 17:14:56", "CISCOTIMESTAMP", "\\b[A-Z]\\S{2} {1,2}\\d{1,2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", + Arrays.asList("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss"), 1526400896000L); + + validateTimestampMatch("05/15/2018 17:14:56,374", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "MM/dd/yyyy HH:mm:ss,SSS", 1526400896374L); + validateTimestampMatch("05-15-2018-17:14:56.374", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "MM-dd-yyyy-HH:mm:ss.SSS", 1526400896374L); + validateTimestampMatch("15/05/2018 17:14:56.374", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "dd/MM/yyyy HH:mm:ss.SSS", 1526400896374L); + validateTimestampMatch("15-05-2018-17:14:56,374", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "dd-MM-yyyy-HH:mm:ss,SSS", 1526400896374L); + validateTimestampMatch("15.05.2018 17:14:56.374", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "dd.MM.yyyy HH:mm:ss.SSS", 1526400896374L); + validateTimestampMatch("05/15/2018 17:14:56", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "MM/dd/yyyy HH:mm:ss", 1526400896000L); + validateTimestampMatch("05-15-2018-17:14:56", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "MM-dd-yyyy-HH:mm:ss", 1526400896000L); + validateTimestampMatch("15/05/2018 17:14:56", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "dd/MM/yyyy HH:mm:ss", 1526400896000L); + validateTimestampMatch("15-05-2018-17:14:56", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "dd-MM-yyyy-HH:mm:ss", 1526400896000L); + validateTimestampMatch("15.05.2018 17:14:56", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "dd.MM.yyyy HH:mm:ss", 1526400896000L); + + validateTimestampMatch("05/15/2018", "DATE", "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}\\b", "MM/dd/yyyy", 1526338800000L); + validateTimestampMatch("05-15-2018", "DATE", "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}\\b", "MM-dd-yyyy", 1526338800000L); + validateTimestampMatch("15/05/2018", "DATE", "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}\\b", "dd/MM/yyyy", 1526338800000L); + validateTimestampMatch("15-05-2018", "DATE", "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}\\b", "dd-MM-yyyy", 1526338800000L); + validateTimestampMatch("15.05.2018", "DATE", "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}\\b", "dd.MM.yyyy", 1526338800000L); + } + + public void testFindFormatGivenOnlySystemDate() { + + validateTimestampMatch("1526400896374", "POSINT", "\\b\\d{13}\\b", "UNIX_MS", 1526400896374L); + + validateTimestampMatch("1526400896.736", "NUMBER", "\\b\\d{10}\\b", "UNIX", 1526400896736L); + validateTimestampMatch("1526400896", "NUMBER", "\\b\\d{10}\\b", "UNIX", 1526400896000L); + + validateTimestampMatch("400000005afb078a164ac980", "BASE16NUM", "\\b[0-9A-Fa-f]{24}\\b", "TAI64N", 1526400896374L); + } + + public void testCustomOverrideMatchingBuiltInFormat() { + + String overrideFormat = "yyyy-MM-dd HH:mm:ss,SSS"; + String text = "2018-05-15 17:14:56,374"; + String expectedSimpleRegex = "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}"; + String expectedGrokPatternName = "TIMESTAMP_ISO8601"; + + TimestampFormatFinder strictTimestampFormatFinder = new TimestampFormatFinder(explanation, overrideFormat, true, true, true, + NOOP_TIMEOUT_CHECKER); + strictTimestampFormatFinder.addSample(text); + assertEquals(expectedGrokPatternName, strictTimestampFormatFinder.getGrokPatternName()); + assertEquals(Collections.emptyMap(), strictTimestampFormatFinder.getCustomGrokPatternDefinitions()); + assertEquals(expectedSimpleRegex, strictTimestampFormatFinder.getSimplePattern().pattern()); + assertEquals(Collections.singletonList(overrideFormat), strictTimestampFormatFinder.getJavaTimestampFormats()); + assertEquals(1, strictTimestampFormatFinder.getNumMatchedFormats()); + + TimestampFormatFinder lenientTimestampFormatFinder = new TimestampFormatFinder(explanation, overrideFormat, false, false, false, + NOOP_TIMEOUT_CHECKER); + lenientTimestampFormatFinder.addSample(text); + lenientTimestampFormatFinder.selectBestMatch(); + assertEquals(expectedGrokPatternName, lenientTimestampFormatFinder.getGrokPatternName()); + assertEquals(Collections.emptyMap(), lenientTimestampFormatFinder.getCustomGrokPatternDefinitions()); + assertEquals(expectedSimpleRegex, lenientTimestampFormatFinder.getSimplePattern().pattern()); + assertEquals(Collections.singletonList(overrideFormat), lenientTimestampFormatFinder.getJavaTimestampFormats()); + assertEquals(1, lenientTimestampFormatFinder.getNumMatchedFormats()); + } + + public void testCustomOverrideNotMatchingBuiltInFormat() { + + String overrideFormat = "MM/dd HH.mm.ss,SSSSSS 'in' yyyy"; + String text = "05/15 17.14.56,374946 in 2018"; + String expectedSimpleRegex = "\\b\\d{2}/\\d{2} \\d{2}\\.\\d{2}\\.\\d{2},\\d{6} in \\d{4}\\b"; + String expectedGrokPatternName = "CUSTOM_TIMESTAMP"; + Map expectedCustomGrokPatternDefinitions = + Collections.singletonMap(TimestampFormatFinder.CUSTOM_TIMESTAMP_GROK_NAME, + "%{MONTHNUM2}/%{MONTHDAY} %{HOUR}\\.%{MINUTE}\\.%{SECOND} in %{YEAR}"); + + TimestampFormatFinder strictTimestampFormatFinder = new TimestampFormatFinder(explanation, overrideFormat, true, true, true, + NOOP_TIMEOUT_CHECKER); + strictTimestampFormatFinder.addSample(text); + assertEquals(expectedGrokPatternName, strictTimestampFormatFinder.getGrokPatternName()); + assertEquals(expectedCustomGrokPatternDefinitions, strictTimestampFormatFinder.getCustomGrokPatternDefinitions()); + assertEquals(expectedSimpleRegex, strictTimestampFormatFinder.getSimplePattern().pattern()); + assertEquals(Collections.singletonList(overrideFormat), strictTimestampFormatFinder.getJavaTimestampFormats()); + assertEquals(1, strictTimestampFormatFinder.getNumMatchedFormats()); + + TimestampFormatFinder lenientTimestampFormatFinder = new TimestampFormatFinder(explanation, overrideFormat, false, false, false, + NOOP_TIMEOUT_CHECKER); + lenientTimestampFormatFinder.addSample(text); + lenientTimestampFormatFinder.selectBestMatch(); + assertEquals(expectedGrokPatternName, lenientTimestampFormatFinder.getGrokPatternName()); + assertEquals(expectedCustomGrokPatternDefinitions, lenientTimestampFormatFinder.getCustomGrokPatternDefinitions()); + assertEquals(expectedSimpleRegex, lenientTimestampFormatFinder.getSimplePattern().pattern()); + assertEquals(Collections.singletonList(overrideFormat), lenientTimestampFormatFinder.getJavaTimestampFormats()); + assertEquals(1, lenientTimestampFormatFinder.getNumMatchedFormats()); + } + + public void testFindFormatGivenRealLogMessages() { + + validateFindInFullMessage("[2018-05-11T17:07:29,553][INFO ][o.e.e.NodeEnvironment ] [node-0] " + + "heap size [3.9gb], compressed ordinary object pointers [true]", "[", "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", "ISO8601"); + + validateFindInFullMessage("192.168.62.101 - - [29/Jun/2016:12:11:31 +0000] " + + "\"POST //apiserv:8080/engine/v2/jobs HTTP/1.1\" 201 42 \"-\" \"curl/7.46.0\" 384", "192.168.62.101 - - [", "HTTPDATE", + "\\b\\d{2}/[A-Z]\\S{2}/\\d{4}:\\d{2}:\\d{2}:\\d{2} ", "dd/MMM/yyyy:HH:mm:ss XX"); + + validateFindInFullMessage("Aug 29, 2009 12:03:57 AM org.apache.tomcat.util.http.Parameters processParameters", "", + "CATALINA_DATESTAMP", "\\b[A-Z]\\S{2} \\d{2}, \\d{4} \\d{1,2}:\\d{2}:\\d{2} [AP]M\\b", "MMM dd, yyyy h:mm:ss a"); + + validateFindInFullMessage("Oct 19 17:04:44 esxi1.acme.com Vpxa: [3CB3FB90 verbose 'vpxavpxaInvtVm' " + + "opID=WFU-33d82c31] [VpxaInvtVmChangeListener] Guest DiskInfo Changed", "", "SYSLOGTIMESTAMP", + "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss")); + + validateFindInFullMessage("559550912540598297\t2016-04-20T14:06:53\t2016-04-20T21:06:53Z\t38545844\tserv02nw07\t" + + "192.168.114.28\tAuthpriv\tInfo\tsshd\tsubsystem request for sftp", "559550912540598297\t", "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", "ISO8601"); + + validateFindInFullMessage("Sep 8 11:55:35 dnsserv named[22529]: error (unexpected RCODE REFUSED) resolving " + + "'www.elastic.co/A/IN': 95.110.68.206#53", "", "SYSLOGTIMESTAMP", "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", + Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss")); + + validateFindInFullMessage("10-28-2016 16:22:47.636 +0200 ERROR Network - " + + "Error encountered for connection from src=192.168.0.1:12345. Local side shutting down", "", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "MM-dd-yyyy HH:mm:ss.SSS"); + + validateFindInFullMessage("2018-01-06 19:22:20.106822|INFO |VirtualServer |1 |client " + + " 'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client 'User1'(id:2) in channel '3er Instanz'(id:2)", "", + "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", "yyyy-MM-dd HH:mm:ss.SSSSSS"); // Differs from the above as the required format is specified - assertEquals(new TimestampMatch(3, "", "YYYY-MM-dd HH:mm:ss.SSSSSS", "yyyy-MM-dd HH:mm:ss.SSSSSS", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\\.\\d{3}", "TIMESTAMP_ISO8601", - "|INFO |VirtualServer |1 |client 'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client " + - "'User1'(id:2) in channel '3er Instanz'(id:2)"), - TimestampFormatFinder.findFirstMatch("2018-01-06 19:22:20.106822|INFO |VirtualServer |1 |client " + - " 'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client 'User1'(id:2) in channel '3er Instanz'(id:2)", - randomFrom("YYYY-MM-dd HH:mm:ss.SSSSSS", "yyyy-MM-dd HH:mm:ss.SSSSSS"), NOOP_TIMEOUT_CHECKER)); + validateFindInFullMessage("yyyy-MM-dd HH:mm:ss.SSSSSS", "2018-01-06 19:22:20.106822|INFO |VirtualServer |1 |client " + + " 'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client 'User1'(id:2) in channel '3er Instanz'(id:2)", "", + "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", "yyyy-MM-dd HH:mm:ss.SSSSSS"); // Non-matching required format specified - assertNull(TimestampFormatFinder.findFirstMatch("2018-01-06 19:22:20.106822|INFO |VirtualServer |1 |client " + - " 'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client 'User1'(id:2) in channel '3er Instanz'(id:2)", - randomFrom("UNIX", "EEE MMM dd YYYY HH:mm zzz"), NOOP_TIMEOUT_CHECKER)); - } - - public void testAdjustRequiredFormat() { - assertEquals("YYYY-MM-dd HH:mm:ss,SSS Z", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss,SSS Z")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS Z", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss,SSSSSS Z")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS Z", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss,SSSSSSSSS Z")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS Z", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss.SSS Z")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS Z", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss.SSSSSS Z")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS Z", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss.SSSSSSSSS Z")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss,SSS")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss,SSSSSS")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss,SSSSSSSSS")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss.SSS")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss.SSSSSS")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss.SSSSSSSSS")); - } - - public void testInterpretFractionalSeconds() { - assertEquals(new Tuple<>(',', 0), TimestampFormatFinder.interpretFractionalSeconds("Sep 8 11:55:35")); - assertEquals(new Tuple<>(',', 0), TimestampFormatFinder.interpretFractionalSeconds("29/Jun/2016:12:11:31 +0000")); - assertEquals(new Tuple<>('.', 6), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06 17:21:25.764368")); - assertEquals(new Tuple<>(',', 9), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06T17:21:25,764363438")); - assertEquals(new Tuple<>(',', 3), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06T17:21:25,764")); - assertEquals(new Tuple<>('.', 3), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06T17:21:25.764")); - assertEquals(new Tuple<>('.', 6), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06 17:21:25.764368Z")); - assertEquals(new Tuple<>(',', 9), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06T17:21:25,764363438Z")); - assertEquals(new Tuple<>(',', 3), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06T17:21:25,764Z")); - assertEquals(new Tuple<>('.', 3), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06T17:21:25.764Z")); - assertEquals(new Tuple<>('.', 6), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06 17:21:25.764368 Z")); - assertEquals(new Tuple<>(',', 9), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06T17:21:25,764363438 Z")); - assertEquals(new Tuple<>(',', 3), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06T17:21:25,764 Z")); - assertEquals(new Tuple<>('.', 3), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06T17:21:25.764 Z")); - } - - private void validateTimestampMatch(TimestampMatch expected, String text, long expectedEpochMs) { - - assertEquals(expected, TimestampFormatFinder.findFirstMatch(text, NOOP_TIMEOUT_CHECKER)); - assertEquals(expected, TimestampFormatFinder.findFirstFullMatch(text, NOOP_TIMEOUT_CHECKER)); - assertEquals(expected, TimestampFormatFinder.findFirstMatch(text, expected.candidateIndex, NOOP_TIMEOUT_CHECKER)); - assertEquals(expected, TimestampFormatFinder.findFirstFullMatch(text, expected.candidateIndex, NOOP_TIMEOUT_CHECKER)); - assertNull(TimestampFormatFinder.findFirstMatch(text, Integer.MAX_VALUE, NOOP_TIMEOUT_CHECKER)); - assertNull(TimestampFormatFinder.findFirstFullMatch(text, Integer.MAX_VALUE, NOOP_TIMEOUT_CHECKER)); - assertEquals(expected, TimestampFormatFinder.findFirstMatch(text, randomFrom(expected.jodaTimestampFormats), NOOP_TIMEOUT_CHECKER)); - assertEquals(expected, TimestampFormatFinder.findFirstFullMatch(text, randomFrom(expected.jodaTimestampFormats), - NOOP_TIMEOUT_CHECKER)); - assertEquals(expected, TimestampFormatFinder.findFirstMatch(text, randomFrom(expected.javaTimestampFormats), NOOP_TIMEOUT_CHECKER)); - assertEquals(expected, TimestampFormatFinder.findFirstFullMatch(text, randomFrom(expected.javaTimestampFormats), - NOOP_TIMEOUT_CHECKER)); - assertNull(TimestampFormatFinder.findFirstMatch(text, "wrong format", NOOP_TIMEOUT_CHECKER)); - assertNull(TimestampFormatFinder.findFirstFullMatch(text, "wrong format", NOOP_TIMEOUT_CHECKER)); - - validateJodaTimestampFormats(expected.jodaTimestampFormats, text, expectedEpochMs); - validateJavaTimestampFormats(expected.javaTimestampFormats, text, expectedEpochMs); - - assertTrue(expected.simplePattern.matcher(text).find()); - } - - // This is because parsing timestamps using Joda formats generates warnings. - // Eventually we'll probably just remove the checks that the Joda formats - // are valid, and at that point this method can be removed too. - protected boolean enableWarningsCheck() { - return false; - } - - // This method is using the Joda BWC layer. When that's removed, this method - // can be deleted - we'll just validate the Java time formats after that. - // Also remove enableWarningsCheck() above if this method is removed. - private void validateJodaTimestampFormats(List jodaTimestampFormats, String text, long expectedEpochMs) { + TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder(explanation, + randomFrom("UNIX", "EEE MMM dd yyyy HH:mm zzz"), false, false, false, NOOP_TIMEOUT_CHECKER); + timestampFormatFinder.addSample("2018-01-06 19:22:20.106822|INFO |VirtualServer |1 |client " + + " 'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client 'User1'(id:2) in channel '3er Instanz'(id:2)"); + assertEquals(Collections.emptyList(), timestampFormatFinder.getJavaTimestampFormats()); + assertEquals(0, timestampFormatFinder.getNumMatchedFormats()); + } - // All the test times are for Tue May 15 2018 16:14:56 UTC, which is 17:14:56 in London. - // This is the timezone that will be used for any text representations that don't include it. - ZoneId defaultZone = ZoneId.of("Europe/London"); - long actualEpochMs; - for (int i = 0; i < jodaTimestampFormats.size(); ++i) { - try { - String timestampFormat = jodaTimestampFormats.get(i); - switch (timestampFormat) { - case "ISO8601": - actualEpochMs = Joda.forPattern("date_optional_time").withZone(defaultZone).parseMillis(text); - break; - default: - actualEpochMs = Joda.forPattern(timestampFormat).withYear(2018).withZone(defaultZone).parseMillis(text); - break; - } - if (expectedEpochMs == actualEpochMs) { - break; - } - // If the last one isn't right then propagate - if (i == jodaTimestampFormats.size() - 1) { - assertEquals(expectedEpochMs, actualEpochMs); - } - } catch (RuntimeException e) { - // If the last one throws then propagate - if (i == jodaTimestampFormats.size() - 1) { - throw e; - } - } + + public void testSelectBestMatchGivenAllSame() { + String sample = "[2018-06-27T11:59:22,125][INFO ][o.e.n.Node ] [node-0] initializing ...\n" + + "[2018-06-27T11:59:22,201][INFO ][o.e.e.NodeEnvironment ] [node-0] using [1] data paths, mounts [[/ (/dev/disk1)]], " + + "net usable_space [216.1gb], net total_space [464.7gb], types [hfs]\n" + + "[2018-06-27T11:59:22,202][INFO ][o.e.e.NodeEnvironment ] [node-0] heap size [494.9mb], " + + "compressed ordinary object pointers [true]\n" + + "[2018-06-27T11:59:22,204][INFO ][o.e.n.Node ] [node-0] node name [node-0], node ID [Ha1gD8nNSDqjd6PIyu3DJA]\n" + + "[2018-06-27T11:59:22,204][INFO ][o.e.n.Node ] [node-0] version[6.4.0-SNAPSHOT], pid[2785], " + + "build[default/zip/3c60efa/2018-06-26T14:55:15.206676Z], OS[Mac OS X/10.12.6/x86_64], " + + "JVM[\"Oracle Corporation\"/Java HotSpot(TM) 64-Bit Server VM/10/10+46]\n" + + "[2018-06-27T11:59:22,205][INFO ][o.e.n.Node ] [node-0] JVM arguments [-Xms1g, -Xmx1g, " + + "-XX:+UseConcMarkSweepGC, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, " + + "-XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, " + + "-XX:-OmitStackTraceInFastThrow, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, " + + "-Dio.netty.recycler.maxCapacityPerThread=0, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, " + + "-Djava.io.tmpdir=/var/folders/k5/5sqcdlps5sg3cvlp783gcz740000h0/T/elasticsearch.nFUyeMH1, " + + "-XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=data, -XX:ErrorFile=logs/hs_err_pid%p.log, " + + "-Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m, " + + "-Djava.locale.providers=COMPAT, -Dio.netty.allocator.type=unpooled, -ea, -esa, -Xms512m, -Xmx512m, " + + "-Des.path.home=/Users/dave/elasticsearch/distribution/build/cluster/run node0/elasticsearch-6.4.0-SNAPSHOT, " + + "-Des.path.conf=/Users/dave/elasticsearch/distribution/build/cluster/run node0/elasticsearch-6.4.0-SNAPSHOT/config, " + + "-Des.distribution.flavor=default, -Des.distribution.type=zip]\n" + + "[2018-06-27T11:59:22,205][WARN ][o.e.n.Node ] [node-0] version [6.4.0-SNAPSHOT] is a pre-release version of " + + "Elasticsearch and is not suitable for production\n" + + "[2018-06-27T11:59:23,585][INFO ][o.e.p.PluginsService ] [node-0] loaded module [aggs-matrix-stats]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [analysis-common]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [ingest-common]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [lang-expression]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [lang-mustache]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [lang-painless]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [mapper-extras]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [parent-join]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [percolator]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [rank-eval]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [reindex]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [repository-url]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [transport-netty4]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-core]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-deprecation]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-graph]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-logstash]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-ml]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-monitoring]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-rollup]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-security]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-sql]\n" + + "[2018-06-27T11:59:23,588][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-upgrade]\n" + + "[2018-06-27T11:59:23,588][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-watcher]\n" + + "[2018-06-27T11:59:23,588][INFO ][o.e.p.PluginsService ] [node-0] no plugins loaded\n"; + + TimestampFormatFinder timestampFormatFinder = TextLogFileStructureFinder.populateTimestampFormatFinder(explanation, + sample.split("\n"), FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); + timestampFormatFinder.selectBestMatch(); + assertEquals(Collections.singletonList("ISO8601"), timestampFormatFinder.getJavaTimestampFormats()); + assertEquals("TIMESTAMP_ISO8601", timestampFormatFinder.getGrokPatternName()); + assertEquals("\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", timestampFormatFinder.getSimplePattern().pattern()); + for (String preface : timestampFormatFinder.getPrefaces()) { + assertEquals("[", preface); + } + assertEquals(1, timestampFormatFinder.getNumMatchedFormats()); + } + + public void testSelectBestMatchGivenExceptionTrace() { + + TimestampFormatFinder timestampFormatFinder = TextLogFileStructureFinder.populateTimestampFormatFinder(explanation, + EXCEPTION_TRACE_SAMPLE.split("\n"), FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); + + // Even though many lines have a timestamp near the end (in the Lucene version information), + // these are so far along the lines that the weight of the timestamp near the beginning of the + // first line should take precedence + timestampFormatFinder.selectBestMatch(); + assertEquals(Collections.singletonList("ISO8601"), timestampFormatFinder.getJavaTimestampFormats()); + assertEquals("TIMESTAMP_ISO8601", timestampFormatFinder.getGrokPatternName()); + assertEquals("\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", timestampFormatFinder.getSimplePattern().pattern()); + for (String preface : timestampFormatFinder.getPrefaces()) { + assertEquals("[", preface); } + assertEquals(2, timestampFormatFinder.getNumMatchedFormats()); + } + + public void testSelectBestMatchGivenExceptionTraceAndTimestampFormatOverride() { + + FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampFormat("yyyy-MM-dd HH:mm:ss").build(); + + TimestampFormatFinder timestampFormatFinder = TextLogFileStructureFinder.populateTimestampFormatFinder(explanation, + EXCEPTION_TRACE_SAMPLE.split("\n"), overrides, NOOP_TIMEOUT_CHECKER); + + // The override should force the seemingly inferior choice of timestamp + // TODO - this won't work any more :-( + } + + public void testSelectBestMatchGivenExceptionTraceAndImpossibleTimestampFormatOverride() { + + FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampFormat("MMM dd HH:mm:ss").build(); + + TimestampFormatFinder timestampFormatFinder = TextLogFileStructureFinder.populateTimestampFormatFinder(explanation, + EXCEPTION_TRACE_SAMPLE.split("\n"), overrides, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.selectBestMatch(); + assertEquals(Collections.emptyList(), timestampFormatFinder.getJavaTimestampFormats()); + assertNull(timestampFormatFinder.getGrokPatternName()); + assertNull(timestampFormatFinder.getSimplePattern()); + assertEquals(Collections.emptyList(), timestampFormatFinder.getPrefaces()); + assertEquals(0, timestampFormatFinder.getNumMatchedFormats()); + } + + private void validateNoTimestampMatch(String text) { + + TimestampFormatFinder strictTimestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + expectThrows(IllegalArgumentException.class, () -> strictTimestampFormatFinder.addSample(text)); + assertEquals(0, strictTimestampFormatFinder.getNumMatchedFormats()); + + TimestampFormatFinder lenientTimestampFormatFinder = new TimestampFormatFinder(explanation, false, false, false, + NOOP_TIMEOUT_CHECKER); + lenientTimestampFormatFinder.addSample(text); + lenientTimestampFormatFinder.selectBestMatch(); + assertNull(lenientTimestampFormatFinder.getGrokPatternName()); + assertEquals(0, lenientTimestampFormatFinder.getNumMatchedFormats()); + } + + private void validateTimestampMatch(String text, String expectedGrokPatternName, String expectedSimpleRegex, + String expectedJavaTimestampFormat, long expectedEpochMs) { + validateTimestampMatch(text, expectedGrokPatternName, expectedSimpleRegex, Collections.singletonList(expectedJavaTimestampFormat), + expectedEpochMs); + } + + private void validateTimestampMatch(String text, String expectedGrokPatternName, String expectedSimpleRegex, + List expectedJavaTimestampFormats, long expectedEpochMs) { + + Pattern expectedSimplePattern = Pattern.compile(expectedSimpleRegex); + assertTrue(expectedSimplePattern.matcher(text).find()); + validateJavaTimestampFormats(expectedJavaTimestampFormats, text, expectedEpochMs); + + TimestampFormatFinder strictTimestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + strictTimestampFormatFinder.addSample(text); + assertEquals(expectedGrokPatternName, strictTimestampFormatFinder.getGrokPatternName()); + assertEquals(expectedSimplePattern.pattern(), strictTimestampFormatFinder.getSimplePattern().pattern()); + assertEquals(expectedJavaTimestampFormats, strictTimestampFormatFinder.getJavaTimestampFormats()); + assertEquals(1, strictTimestampFormatFinder.getNumMatchedFormats()); + + TimestampFormatFinder lenientTimestampFormatFinder = new TimestampFormatFinder(explanation, false, false, false, + NOOP_TIMEOUT_CHECKER); + lenientTimestampFormatFinder.addSample(text); + lenientTimestampFormatFinder.selectBestMatch(); + assertEquals(expectedGrokPatternName, lenientTimestampFormatFinder.getGrokPatternName()); + assertEquals(expectedSimplePattern.pattern(), lenientTimestampFormatFinder.getSimplePattern().pattern()); + assertEquals(expectedJavaTimestampFormats, lenientTimestampFormatFinder.getJavaTimestampFormats()); + assertEquals(1, lenientTimestampFormatFinder.getNumMatchedFormats()); + } + + private void validateFindInFullMessage(String message, String expectedPreface, String expectedGrokPatternName, + String expectedSimpleRegex, String expectedJavaTimestampFormat) { + validateFindInFullMessage(message, expectedPreface, expectedGrokPatternName, expectedSimpleRegex, + Collections.singletonList(expectedJavaTimestampFormat)); + } + + private void validateFindInFullMessage(String timestampFormatOverride, String message, String expectedPreface, + String expectedGrokPatternName, String expectedSimpleRegex, + String expectedJavaTimestampFormat) { + validateFindInFullMessage(timestampFormatOverride, message, expectedPreface, expectedGrokPatternName, expectedSimpleRegex, + Collections.singletonList(expectedJavaTimestampFormat)); + } + + private void validateFindInFullMessage(String message, String expectedPreface, String expectedGrokPatternName, + String expectedSimpleRegex, List expectedJavaTimestampFormats) { + validateFindInFullMessage(null, message, expectedPreface, expectedGrokPatternName, expectedSimpleRegex, + expectedJavaTimestampFormats); + } + + private void validateFindInFullMessage(String timestampFormatOverride, String message, String expectedPreface, + String expectedGrokPatternName, String expectedSimpleRegex, + List expectedJavaTimestampFormats) { + + Pattern expectedSimplePattern = Pattern.compile(expectedSimpleRegex); + assertTrue(expectedSimplePattern.matcher(message).find()); + + TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder(explanation, timestampFormatOverride, false, false, false, + NOOP_TIMEOUT_CHECKER); + timestampFormatFinder.addSample(message); + timestampFormatFinder.selectBestMatch(); + assertEquals(expectedGrokPatternName, timestampFormatFinder.getGrokPatternName()); + assertEquals(expectedSimplePattern.pattern(), timestampFormatFinder.getSimplePattern().pattern()); + assertEquals(expectedJavaTimestampFormats, timestampFormatFinder.getJavaTimestampFormats()); + assertEquals(Collections.singletonList(expectedPreface), timestampFormatFinder.getPrefaces()); + assertEquals(1, timestampFormatFinder.getNumMatchedFormats()); } private void validateJavaTimestampFormats(List javaTimestampFormats, String text, long expectedEpochMs) { @@ -325,18 +998,35 @@ private void validateJavaTimestampFormats(List javaTimestampFormats, Str // All the test times are for Tue May 15 2018 16:14:56 UTC, which is 17:14:56 in London. // This is the timezone that will be used for any text representations that don't include it. ZoneId defaultZone = ZoneId.of("Europe/London"); - TemporalAccessor parsed; + long actualEpochMs; for (int i = 0; i < javaTimestampFormats.size(); ++i) { try { String timestampFormat = javaTimestampFormats.get(i); switch (timestampFormat) { case "ISO8601": - parsed = DateFormatter.forPattern("strict_date_optional_time_nanos").withZone(defaultZone).parse(text); + actualEpochMs = DateFormatter.forPattern("iso8601").withZone(defaultZone).parseMillis(text); + break; + case "UNIX_MS": + actualEpochMs = Long.parseLong(text); + break; + case "UNIX": + actualEpochMs = (long) (Double.parseDouble(text) * 1000.0); + break; + case "TAI64N": + actualEpochMs = parseMillisFromTai64n(text); break; default: - DateTimeFormatter parser = new DateTimeFormatterBuilder() - .appendPattern(timestampFormat).parseDefaulting(ChronoField.YEAR_OF_ERA, 2018) - .toFormatter(Locale.ROOT); + DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder().appendPattern(timestampFormat); + if (timestampFormat.indexOf('y') == -1) { + builder.parseDefaulting(ChronoField.YEAR_OF_ERA, 2018); + } + if (timestampFormat.indexOf('m') == -1) { + // All formats tested have either both or neither of hour and minute + builder.parseDefaulting(ChronoField.HOUR_OF_DAY, 0); + builder.parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0); + // Seconds automatically defaults to 0 + } + DateTimeFormatter parser = builder.toFormatter(Locale.ROOT); // This next line parses the textual date without any default timezone, so if // the text doesn't contain the timezone then the resulting temporal accessor // will be incomplete (i.e. impossible to convert to an Instant). You would @@ -346,15 +1036,15 @@ private void validateJavaTimestampFormats(List javaTimestampFormats, Str // from the text. The solution is to parse twice, once without a default // timezone and then again with a default timezone if the first parse didn't // find one in the text. - parsed = parser.parse(text); + TemporalAccessor parsed = parser.parse(text); if (parsed.query(TemporalQueries.zone()) == null) { // TODO: when Java 8 is no longer supported remove the two // lines and comment above and the closing brace below parsed = parser.withZone(defaultZone).parse(text); } + actualEpochMs = Instant.from(parsed).toEpochMilli(); break; } - long actualEpochMs = Instant.from(parsed).toEpochMilli(); if (expectedEpochMs == actualEpochMs) { break; } @@ -370,4 +1060,17 @@ private void validateJavaTimestampFormats(List javaTimestampFormats, Str } } } + + /** + * Logic copied from {@code org.elasticsearch.ingest.common.DateFormat.Tai64n.parseMillis}. + */ + private long parseMillisFromTai64n(String tai64nDate) { + if (tai64nDate.startsWith("@")) { + tai64nDate = tai64nDate.substring(1); + } + assertEquals(24, tai64nDate.length()); + long seconds = Long.parseLong(tai64nDate.substring(1, 16), 16); + long nanos = Long.parseLong(tai64nDate.substring(16, 24), 16); + return (seconds * 1000) - 10000 + nanos / 1000000; + } } From 6e307d9fee2cf6472a2877116aa7e0e419aec907 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 23 May 2019 14:05:38 -0700 Subject: [PATCH 080/224] [DOCS] Removes inclusion of java.asciidoc (#42459) --- .../separating-node-client-traffic.asciidoc | 3 +-- .../security/securing-communications/setting-up-ssl.asciidoc | 3 --- docs/reference/setup/setup-xclient.asciidoc | 3 --- x-pack/docs/en/security/ccs-clients-integrations.asciidoc | 4 ---- x-pack/docs/en/watcher/index.asciidoc | 3 --- 5 files changed, 1 insertion(+), 15 deletions(-) diff --git a/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc b/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc index 2eab8e0ae5adb..61ebd3e682594 100644 --- a/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc +++ b/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc @@ -65,5 +65,4 @@ transport.profiles.client.xpack.security.ssl.client_authentication: none This setting keeps certificate authentication active for node-to-node traffic, but removes the requirement to distribute a signed certificate to transport -clients. For more information, see -{stack-ov}/java-clients.html#transport-client[Configuring the Transport Client to work with a Secured Cluster]. +clients. \ No newline at end of file diff --git a/docs/reference/security/securing-communications/setting-up-ssl.asciidoc b/docs/reference/security/securing-communications/setting-up-ssl.asciidoc index 68eda2cdc3e09..30d206c41b1ba 100644 --- a/docs/reference/security/securing-communications/setting-up-ssl.asciidoc +++ b/docs/reference/security/securing-communications/setting-up-ssl.asciidoc @@ -32,8 +32,5 @@ the {kib} server and to connect to {es} via HTTPS. See . Configure Beats to use encrypted connections. See <>. -. Configure the Java transport client to use encrypted communications. -See <>. - . Configure {es} for Apache Hadoop to use secured transport. See {hadoop-ref}/security.html[{es} for Apache Hadoop Security]. diff --git a/docs/reference/setup/setup-xclient.asciidoc b/docs/reference/setup/setup-xclient.asciidoc index 24cef9c736966..a192aeb6ea39a 100644 --- a/docs/reference/setup/setup-xclient.asciidoc +++ b/docs/reference/setup/setup-xclient.asciidoc @@ -111,6 +111,3 @@ Then in your project's `pom.xml` if using maven, add the following repositories -------------------------------------------------------------- -- - -. If you are using {stack} {security-features}, there are more configuration -steps. See {stack-ov}/java-clients.html[Java Client and Security]. diff --git a/x-pack/docs/en/security/ccs-clients-integrations.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations.asciidoc index 1a52a9dab7a87..e0de25d44ef1c 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations.asciidoc @@ -11,7 +11,6 @@ clusters. You will need to update the configuration for several clients to work with a secured cluster: -* <> * <> @@ -35,9 +34,6 @@ be secured as well, or at least communicate with the cluster in a secured way: :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc include::ccs-clients-integrations/cross-cluster.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc -include::ccs-clients-integrations/java.asciidoc[] - :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc include::ccs-clients-integrations/http.asciidoc[] diff --git a/x-pack/docs/en/watcher/index.asciidoc b/x-pack/docs/en/watcher/index.asciidoc index 5f51c948ebf3a..782f0886affc2 100644 --- a/x-pack/docs/en/watcher/index.asciidoc +++ b/x-pack/docs/en/watcher/index.asciidoc @@ -89,9 +89,6 @@ include::actions.asciidoc[] :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/transform.asciidoc include::transform.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java.asciidoc -include::java.asciidoc[] - :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/managing-watches.asciidoc include::managing-watches.asciidoc[] From c87ea81573557783d25a1023abcb92f7e9ec88aa Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Thu, 23 May 2019 18:59:30 -0400 Subject: [PATCH 081/224] Bug fix to allow access to top level params in reduce script (#42096) --- .../ScriptedMetricAggregatorFactory.java | 9 ++-- .../ScriptedMetricAggregatorTests.java | 41 +++++++++++++++++-- 2 files changed, 40 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java index e08835f0bea14..01084ee0b7f8b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java @@ -89,7 +89,7 @@ public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBu final ScriptedMetricAggContexts.CombineScript combineScript = this.combineScript.newInstance( mergeParams(aggParams, combineScriptParams), aggState); - final Script reduceScript = deepCopyScript(this.reduceScript, context); + final Script reduceScript = deepCopyScript(this.reduceScript, context, aggParams); if (initScript != null) { initScript.execute(); CollectionUtils.ensureNoSelfReferences(aggState, "Scripted metric aggs init script"); @@ -99,12 +99,9 @@ public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBu pipelineAggregators, metaData); } - private static Script deepCopyScript(Script script, SearchContext context) { + private static Script deepCopyScript(Script script, SearchContext context, Map aggParams) { if (script != null) { - Map params = script.getParams(); - if (params != null) { - params = deepCopyParams(params, context); - } + Map params = mergeParams(aggParams, deepCopyParams(script.getParams(), context)); return new Script(script.getType(), script.getLang(), script.getIdOrCode(), params); } else { return null; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java index 05115a03e300f..5f74937f6610b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java @@ -71,7 +71,9 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase { private static final Script MAP_SCRIPT_PARAMS = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "mapScriptParams", Collections.singletonMap("itemValue", 12)); private static final Script COMBINE_SCRIPT_PARAMS = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "combineScriptParams", - Collections.singletonMap("divisor", 4)); + Collections.singletonMap("multiplier", 4)); + private static final Script REDUCE_SCRIPT_PARAMS = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "reduceScriptParams", + Collections.singletonMap("additional", 2)); private static final String CONFLICTING_PARAM_NAME = "initialValue"; private static final Script INIT_SCRIPT_SELF_REF = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "initScriptSelfRef", @@ -140,9 +142,14 @@ public static void initMockScripts() { }); SCRIPTS.put("combineScriptParams", params -> { Map state = (Map) params.get("state"); - int divisor = ((Integer) params.get("divisor")); - return ((List) state.get("collector")).stream().mapToInt(Integer::intValue).map(i -> i / divisor).sum(); + int multiplier = ((Integer) params.get("multiplier")); + return ((List) state.get("collector")).stream().mapToInt(Integer::intValue).map(i -> i * multiplier).sum(); }); + SCRIPTS.put("reduceScriptParams", params -> + ((List)params.get("states")).stream().mapToInt(i -> (int)i).sum() + + (int)params.get("aggs_param") + (int)params.get("additional") - + ((List)params.get("states")).size()*24*4 + ); SCRIPTS.put("initScriptSelfRef", params -> { Map state = (Map) params.get("state"); @@ -279,7 +286,33 @@ public void testScriptParamsPassedThrough() throws IOException { ScriptedMetric scriptedMetric = search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder); // The result value depends on the script params. - assertEquals(306, scriptedMetric.aggregation()); + assertEquals(4896, scriptedMetric.aggregation()); + } + } + } + + public void testAggParamsPassedToReduceScript() throws IOException { + MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME, SCRIPTS, Collections.emptyMap()); + Map engines = Collections.singletonMap(scriptEngine.getType(), scriptEngine); + ScriptService scriptService = new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); + + try (Directory directory = newDirectory()) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + for (int i = 0; i < 100; i++) { + indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i))); + } + } + + try (IndexReader indexReader = DirectoryReader.open(directory)) { + ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME); + aggregationBuilder.params(Collections.singletonMap("aggs_param", 1)) + .initScript(INIT_SCRIPT_PARAMS).mapScript(MAP_SCRIPT_PARAMS) + .combineScript(COMBINE_SCRIPT_PARAMS).reduceScript(REDUCE_SCRIPT_PARAMS); + ScriptedMetric scriptedMetric = searchAndReduce( + newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder, 0, scriptService); + + // The result value depends on the script params. + assertEquals(4803, scriptedMetric.aggregation()); } } } From ca7b80a7fdc76e7d8263057b3f7ac0c545fa39b5 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 23 May 2019 16:54:23 -0700 Subject: [PATCH 082/224] Reenable bwc tests (#42478) This commit reenables bwc tests now that the backport of #38373 is complete. --- build.gradle | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.gradle b/build.gradle index 037d3242dc4b7..7de02b814da86 100644 --- a/build.gradle +++ b/build.gradle @@ -162,8 +162,8 @@ task verifyVersions { * after the backport of the backcompat code is complete. */ -boolean bwc_tests_enabled = false -final String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/38373" /* place a PR link here when committing bwc changes */ +boolean bwc_tests_enabled = true +final String bwc_tests_disabled_issue = "" /* place a PR link here when committing bwc changes */ if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") From 5db76677366de5bb588b7ecc4b1b921ac7663603 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 23 May 2019 20:59:35 -0700 Subject: [PATCH 083/224] Gradle init script for enabling remote build cache (#42484) --- .ci/build-cache.gradle | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 .ci/build-cache.gradle diff --git a/.ci/build-cache.gradle b/.ci/build-cache.gradle new file mode 100644 index 0000000000000..b180314b40f4b --- /dev/null +++ b/.ci/build-cache.gradle @@ -0,0 +1,18 @@ +if (System.getenv('GRADLE_BUILD_CACHE_URL')) { + gradle.settingsEvaluated { settings -> + settings.buildCache { + remote(HttpBuildCache) { + url = System.getenv('GRADLE_BUILD_CACHE_URL') + push = Boolean.valueOf(System.getenv('GRADLE_BUILD_CACHE_PUSH') ?: 'false') + if (System.getenv('GRADLE_BUILD_CACHE_USERNAME') && System.getenv('GRADLE_BUILD_CACHE_PASSWORD')) { + credentials { + username = System.getenv('GRADLE_BUILD_CACHE_USERNAME') + password = System.getenv('GRADLE_BUILD_CACHE_PASSWORD') + } + } + } + } + } +} else { + throw new GradleException("You must supply a value for GRADLE_BUILD_CACHE_URL environment variable when applying build-cache.gradle init script") +} \ No newline at end of file From 2d43dd680b21738dbffd51d29f4b462d125706b5 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 24 May 2019 08:27:06 +0100 Subject: [PATCH 084/224] Add more logging to MockDiskUsagesIT (#42424) This commit adds a log message containing the routing table, emitted on each iteration of the failing assertBusy() in #40174. It also modernizes the code a bit. --- .../allocation/decider/MockDiskUsagesIT.java | 78 ++++++++----------- 1 file changed, 31 insertions(+), 47 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java index 1ad18c1f69f54..8565beb1b89d7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java @@ -19,10 +19,9 @@ package org.elasticsearch.cluster.routing.allocation.decider; -import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfoService; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.MockInternalClusterInfoService; import org.elasticsearch.cluster.routing.RoutingNode; @@ -33,10 +32,9 @@ import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; @@ -50,21 +48,15 @@ public class MockDiskUsagesIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { // Use the mock internal cluster info service, which has fake-able disk usages - return Arrays.asList(MockInternalClusterInfoService.TestPlugin.class); + return Collections.singletonList(MockInternalClusterInfoService.TestPlugin.class); } public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception { List nodes = internalCluster().startNodes(3); - // Wait for all 3 nodes to be up - assertBusy(() -> { - NodesStatsResponse resp = client().admin().cluster().prepareNodesStats().get(); - assertThat(resp.getNodes().size(), equalTo(3)); - }); - // Start with all nodes at 50% usage final MockInternalClusterInfoService cis = (MockInternalClusterInfoService) - internalCluster().getInstance(ClusterInfoService.class, internalCluster().getMasterName()); + internalCluster().getInstance(ClusterInfoService.class, internalCluster().getMasterName()); cis.setUpdateFrequency(TimeValue.timeValueMillis(200)); cis.onMaster(); cis.setN1Usage(nodes.get(0), new DiskUsage(nodes.get(0), "n1", "/dev/null", 100, 50)); @@ -73,34 +65,32 @@ public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception { final boolean watermarkBytes = randomBoolean(); // we have to consistently use bytes or percentage for the disk watermark settings client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "20b" : "80%") - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "10b" : "90%") - .put( - DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), - watermarkBytes ? "0b" : "100%") - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "1ms")).get(); + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "20b" : "80%") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "10b" : "90%") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), + watermarkBytes ? "0b" : "100%") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "1ms")).get(); // Create an index with 10 shards so we can check allocation for it prepareCreate("test").setSettings(Settings.builder() - .put("number_of_shards", 10) - .put("number_of_replicas", 0) - .put("index.routing.allocation.exclude._name", "")).get(); + .put("number_of_shards", 10) + .put("number_of_replicas", 0)).get(); ensureGreen("test"); // Block until the "fake" cluster info is retrieved at least once assertBusy(() -> { - ClusterInfo info = cis.getClusterInfo(); + final ClusterInfo info = cis.getClusterInfo(); logger.info("--> got: {} nodes", info.getNodeLeastAvailableDiskUsages().size()); assertThat(info.getNodeLeastAvailableDiskUsages().size(), greaterThan(0)); }); final List realNodeNames = new ArrayList<>(); - ClusterStateResponse resp = client().admin().cluster().prepareState().get(); - Iterator iter = resp.getState().getRoutingNodes().iterator(); - while (iter.hasNext()) { - RoutingNode node = iter.next(); - realNodeNames.add(node.nodeId()); - logger.info("--> node {} has {} shards", - node.nodeId(), resp.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + { + final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + for (final RoutingNode node : clusterState.getRoutingNodes()) { + realNodeNames.add(node.nodeId()); + logger.info("--> node {} has {} shards", + node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + } } // Update the disk usages so one node has now passed the high watermark @@ -108,17 +98,15 @@ public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception { cis.setN2Usage(realNodeNames.get(1), new DiskUsage(nodes.get(1), "n2", "_na_", 100, 50)); cis.setN3Usage(realNodeNames.get(2), new DiskUsage(nodes.get(2), "n3", "_na_", 100, 0)); // nothing free on node3 - // Retrieve the count of shards on each node - final Map nodesToShardCount = new HashMap<>(); - assertBusy(() -> { - ClusterStateResponse resp12 = client().admin().cluster().prepareState().get(); - Iterator iter12 = resp12.getState().getRoutingNodes().iterator(); - while (iter12.hasNext()) { - RoutingNode node = iter12.next(); + final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + logger.info("--> {}", clusterState.routingTable()); + + final Map nodesToShardCount = new HashMap<>(); + for (final RoutingNode node : clusterState.getRoutingNodes()) { logger.info("--> node {} has {} shards", - node.nodeId(), resp12.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); - nodesToShardCount.put(node.nodeId(), resp12.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + nodesToShardCount.put(node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); } assertThat("node1 has 5 shards", nodesToShardCount.get(realNodeNames.get(0)), equalTo(5)); assertThat("node2 has 5 shards", nodesToShardCount.get(realNodeNames.get(1)), equalTo(5)); @@ -130,17 +118,13 @@ public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception { cis.setN2Usage(realNodeNames.get(1), new DiskUsage(nodes.get(1), "n2", "_na_", 100, 50)); cis.setN3Usage(realNodeNames.get(2), new DiskUsage(nodes.get(2), "n3", "_na_", 100, 50)); // node3 has free space now - // Retrieve the count of shards on each node - nodesToShardCount.clear(); - assertBusy(() -> { - ClusterStateResponse resp1 = client().admin().cluster().prepareState().get(); - Iterator iter1 = resp1.getState().getRoutingNodes().iterator(); - while (iter1.hasNext()) { - RoutingNode node = iter1.next(); + final Map nodesToShardCount = new HashMap<>(); + final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + for (final RoutingNode node : clusterState.getRoutingNodes()) { logger.info("--> node {} has {} shards", - node.nodeId(), resp1.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); - nodesToShardCount.put(node.nodeId(), resp1.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + nodesToShardCount.put(node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); } assertThat("node1 has at least 3 shards", nodesToShardCount.get(realNodeNames.get(0)), greaterThanOrEqualTo(3)); assertThat("node2 has at least 3 shards", nodesToShardCount.get(realNodeNames.get(1)), greaterThanOrEqualTo(3)); From 360939f9d8b7ec8aa2ac1a493e0bd992ae03a0ab Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 24 May 2019 08:37:22 +0100 Subject: [PATCH 085/224] Add stack traces to RetentionLeasesIT failures (#42425) Today `RetentionLeaseIT` calls `fail(e.toString())` on some exceptions, losing the stack trace that came with the exception. This commit adjusts this to re-throw the exception wrapped in an `AssertionError` so we can see more details about failures such as #41430. --- .../index/seqno/RetentionLeaseIT.java | 30 ++++++++++++------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java index cb40a0726d42f..bbe05accb2813 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java @@ -108,7 +108,7 @@ public void testRetentionLeasesSyncedOnAdd() throws Exception { final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); final String source = randomAlphaOfLength(8); final CountDownLatch latch = new CountDownLatch(1); - final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); + final ActionListener listener = countDownLatchListener(latch); // simulate a peer recovery which locks the soft deletes policy on the primary final Closeable retentionLock = randomBoolean() ? primary.acquireRetentionLock() : () -> {}; currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener)); @@ -155,7 +155,7 @@ public void testRetentionLeaseSyncedOnRemove() throws Exception { final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); final String source = randomAlphaOfLength(8); final CountDownLatch latch = new CountDownLatch(1); - final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); + final ActionListener listener = countDownLatchListener(latch); // simulate a peer recovery which locks the soft deletes policy on the primary final Closeable retentionLock = randomBoolean() ? primary.acquireRetentionLock() : () -> {}; currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener)); @@ -166,7 +166,7 @@ public void testRetentionLeaseSyncedOnRemove() throws Exception { for (int i = 0; i < length; i++) { final String id = randomFrom(currentRetentionLeases.keySet()); final CountDownLatch latch = new CountDownLatch(1); - primary.removeRetentionLease(id, ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString()))); + primary.removeRetentionLease(id, countDownLatchListener(latch)); // simulate a peer recovery which locks the soft deletes policy on the primary final Closeable retentionLock = randomBoolean() ? primary.acquireRetentionLock() : () -> {}; currentRetentionLeases.remove(id); @@ -228,7 +228,7 @@ public void testRetentionLeasesSyncOnExpiration() throws Exception { final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); final String source = randomAlphaOfLength(8); final CountDownLatch latch = new CountDownLatch(1); - final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); + final ActionListener listener = countDownLatchListener(latch); final RetentionLease currentRetentionLease = primary.addRetentionLease(id, retainingSequenceNumber, source, listener); final long now = System.nanoTime(); latch.await(); @@ -390,7 +390,7 @@ public void testRetentionLeasesSyncOnRecovery() throws Exception { final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); final String source = randomAlphaOfLength(8); final CountDownLatch latch = new CountDownLatch(1); - final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); + final ActionListener listener = countDownLatchListener(latch); currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener)); latch.await(); currentRetentionLeases.put(id, primary.renewRetentionLease(id, retainingSequenceNumber, source)); @@ -479,7 +479,7 @@ public void testCanRenewRetentionLeaseUnderBlock() throws InterruptedException { */ assertBusy(() -> assertThat(primary.loadRetentionLeases().leases(), contains(retentionLease.get()))); } catch (final Exception e) { - fail(e.toString()); + failWithException(e); } }); @@ -516,7 +516,7 @@ private void runUnderBlockTest( final String source = randomAlphaOfLength(8); final CountDownLatch latch = new CountDownLatch(1); - final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); + final ActionListener listener = countDownLatchListener(latch); primary.addRetentionLease(idForInitialRetentionLease, initialRetainingSequenceNumber, source, listener); latch.await(); @@ -545,7 +545,7 @@ public void onResponse(final ReplicationResponse replicationResponse) { @Override public void onFailure(final Exception e) { - fail(e.toString()); + failWithException(e); } }); @@ -598,7 +598,7 @@ public void testCanRenewRetentionLeaseWithoutWaitingForShards() throws Interrupt */ assertBusy(() -> assertThat(primary.loadRetentionLeases().leases(), contains(retentionLease.get()))); } catch (final Exception e) { - fail(e.toString()); + failWithException(e); } }); @@ -637,7 +637,7 @@ private void runWaitForShardsTest( final String source = randomAlphaOfLength(8); final CountDownLatch latch = new CountDownLatch(1); - final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); + final ActionListener listener = countDownLatchListener(latch); primary.addRetentionLease(idForInitialRetentionLease, initialRetainingSequenceNumber, source, listener); latch.await(); @@ -665,7 +665,7 @@ public void onResponse(final ReplicationResponse replicationResponse) { @Override public void onFailure(final Exception e) { - fail(e.toString()); + failWithException(e); } }); @@ -674,4 +674,12 @@ public void onFailure(final Exception e) { afterSync.accept(primary); } + private static void failWithException(Exception e) { + throw new AssertionError("unexpected", e); + } + + private static ActionListener countDownLatchListener(CountDownLatch latch) { + return ActionListener.wrap(r -> latch.countDown(), RetentionLeaseIT::failWithException); + } + } From c1de8c29db228bb633f6766684874ed3321d7200 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 24 May 2019 08:43:16 +0100 Subject: [PATCH 086/224] Cluster state from API should always have a master (#42454) Today the `TransportClusterStateAction` ignores the state passed by the `TransportMasterNodeAction` and obtains its state from the cluster applier. This might be inconsistent, showing a different node as the master or maybe even having no master. This change adjusts the action to use the passed-in state directly, and adds tests showing that the state returned is consistent with our expectations even if there is a concurrent master failover. Fixes #38331 Relates #38432 --- .../state/ClusterStateRequestBuilder.java | 18 ++ .../state/TransportClusterStateAction.java | 78 ++++---- ...ansportClusterStateActionDisruptionIT.java | 182 ++++++++++++++++++ .../test/InternalTestCluster.java | 7 +- 4 files changed, 241 insertions(+), 44 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java index 35020556b1ed3..da5074b41aa4f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.unit.TimeValue; public class ClusterStateRequestBuilder extends MasterNodeReadOperationRequestBuilder { @@ -100,4 +101,21 @@ public ClusterStateRequestBuilder setIndicesOptions(IndicesOptions indicesOption request.indicesOptions(indicesOptions); return this; } + + /** + * Causes the request to wait for the metadata version to advance to at least the given version. + * @param waitForMetaDataVersion The metadata version for which to wait + */ + public ClusterStateRequestBuilder setWaitForMetaDataVersion(long waitForMetaDataVersion) { + request.waitForMetaDataVersion(waitForMetaDataVersion); + return this; + } + + /** + * If {@link ClusterStateRequest#waitForMetaDataVersion()} is set then this determines how long to wait + */ + public ClusterStateRequestBuilder setWaitForTimeOut(TimeValue waitForTimeout) { + request.waitForTimeout(waitForTimeout); + return this; + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index 3248ac167fcbe..cedca2d77e192 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; +import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -79,50 +80,50 @@ protected ClusterStateResponse newResponse() { protected void masterOperation(final ClusterStateRequest request, final ClusterState state, final ActionListener listener) throws IOException { - if (request.waitForMetaDataVersion() != null) { - final Predicate metadataVersionPredicate = clusterState -> { - return clusterState.metaData().version() >= request.waitForMetaDataVersion(); - }; - final ClusterStateObserver observer = - new ClusterStateObserver(clusterService, request.waitForTimeout(), logger, threadPool.getThreadContext()); - final ClusterState clusterState = observer.setAndGetObservedState(); - if (metadataVersionPredicate.test(clusterState)) { - buildResponse(request, clusterState, listener); - } else { - observer.waitForNextChange(new ClusterStateObserver.Listener() { - @Override - public void onNewClusterState(ClusterState state) { - try { - buildResponse(request, state, listener); - } catch (Exception e) { - listener.onFailure(e); - } - } + final Predicate acceptableClusterStatePredicate + = request.waitForMetaDataVersion() == null ? clusterState -> true + : clusterState -> clusterState.metaData().version() >= request.waitForMetaDataVersion(); + + final Predicate acceptableClusterStateOrNotMasterPredicate = request.local() + ? acceptableClusterStatePredicate + : acceptableClusterStatePredicate.or(clusterState -> clusterState.nodes().isLocalNodeElectedMaster() == false); - @Override - public void onClusterServiceClose() { - listener.onFailure(new NodeClosedException(clusterService.localNode())); + if (acceptableClusterStatePredicate.test(state)) { + ActionListener.completeWith(listener, () -> buildResponse(request, state)); + } else { + assert acceptableClusterStateOrNotMasterPredicate.test(state) == false; + new ClusterStateObserver(state, clusterService, request.waitForTimeout(), logger, threadPool.getThreadContext()) + .waitForNextChange(new ClusterStateObserver.Listener() { + + @Override + public void onNewClusterState(ClusterState newState) { + if (acceptableClusterStatePredicate.test(newState)) { + ActionListener.completeWith(listener, () -> buildResponse(request, newState)); + } else { + listener.onFailure(new NotMasterException( + "master stepped down waiting for metadata version " + request.waitForMetaDataVersion())); } + } - @Override - public void onTimeout(TimeValue timeout) { - try { - listener.onResponse(new ClusterStateResponse(clusterState.getClusterName(), null, true)); - } catch (Exception e) { - listener.onFailure(e); - } + @Override + public void onClusterServiceClose() { + listener.onFailure(new NodeClosedException(clusterService.localNode())); + } + + @Override + public void onTimeout(TimeValue timeout) { + try { + listener.onResponse(new ClusterStateResponse(state.getClusterName(), null, true)); + } catch (Exception e) { + listener.onFailure(e); } - }, metadataVersionPredicate); - } - } else { - ClusterState currentState = clusterService.state(); - buildResponse(request, currentState, listener); + } + }, acceptableClusterStateOrNotMasterPredicate); } } - private void buildResponse(final ClusterStateRequest request, - final ClusterState currentState, - final ActionListener listener) throws IOException { + private ClusterStateResponse buildResponse(final ClusterStateRequest request, + final ClusterState currentState) { logger.trace("Serving cluster state request using version {}", currentState.version()); ClusterState.Builder builder = ClusterState.builder(currentState.getClusterName()); builder.version(currentState.version()); @@ -184,8 +185,7 @@ private void buildResponse(final ClusterStateRequest request, } } - listener.onResponse(new ClusterStateResponse(currentState.getClusterName(), builder.build(), false)); + return new ClusterStateResponse(currentState.getClusterName(), builder.build(), false); } - } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java new file mode 100644 index 0000000000000..0d51f647ee28c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java @@ -0,0 +1,182 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.cluster.state; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.coordination.ClusterBootstrapService; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.discovery.MasterNotDiscoveredException; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.TransportService; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.not; + +@ESIntegTestCase.ClusterScope(numDataNodes = 0, scope = ESIntegTestCase.Scope.TEST, transportClientRatio = 0) +public class TransportClusterStateActionDisruptionIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(MockTransportService.TestPlugin.class); + } + + public void testNonLocalRequestAlwaysFindsMaster() throws Exception { + runRepeatedlyWhileChangingMaster(() -> { + final ClusterStateRequestBuilder clusterStateRequestBuilder = client().admin().cluster().prepareState() + .clear().setNodes(true).setMasterNodeTimeout("100ms"); + final ClusterStateResponse clusterStateResponse; + try { + clusterStateResponse = clusterStateRequestBuilder.get(); + } catch (MasterNotDiscoveredException e) { + return; // ok, we hit the disconnected node + } + assertNotNull("should always contain a master node", clusterStateResponse.getState().nodes().getMasterNodeId()); + }); + } + + public void testLocalRequestAlwaysSucceeds() throws Exception { + runRepeatedlyWhileChangingMaster(() -> { + final String node = randomFrom(internalCluster().getNodeNames()); + final DiscoveryNodes discoveryNodes = client(node).admin().cluster().prepareState() + .clear().setLocal(true).setNodes(true).setMasterNodeTimeout("100ms").get().getState().nodes(); + for (DiscoveryNode discoveryNode : discoveryNodes) { + if (discoveryNode.getName().equals(node)) { + return; + } + } + fail("nodes did not contain [" + node + "]: " + discoveryNodes); + }); + } + + public void testNonLocalRequestAlwaysFindsMasterAndWaitsForMetadata() throws Exception { + runRepeatedlyWhileChangingMaster(() -> { + final String node = randomFrom(internalCluster().getNodeNames()); + final long metadataVersion + = internalCluster().getInstance(ClusterService.class, node).getClusterApplierService().state().metaData().version(); + final long waitForMetaDataVersion = randomLongBetween(Math.max(1, metadataVersion - 3), metadataVersion + 5); + final ClusterStateRequestBuilder clusterStateRequestBuilder = client(node).admin().cluster().prepareState() + .clear().setNodes(true).setMetaData(true) + .setMasterNodeTimeout(TimeValue.timeValueMillis(100)).setWaitForTimeOut(TimeValue.timeValueMillis(100)) + .setWaitForMetaDataVersion(waitForMetaDataVersion); + final ClusterStateResponse clusterStateResponse; + try { + clusterStateResponse = clusterStateRequestBuilder.get(); + } catch (MasterNotDiscoveredException e) { + return; // ok, we hit the disconnected node + } + if (clusterStateResponse.isWaitForTimedOut() == false) { + final ClusterState state = clusterStateResponse.getState(); + assertNotNull("should always contain a master node", state.nodes().getMasterNodeId()); + assertThat("waited for metadata version", state.metaData().version(), greaterThanOrEqualTo(waitForMetaDataVersion)); + } + }); + } + + public void testLocalRequestWaitsForMetadata() throws Exception { + runRepeatedlyWhileChangingMaster(() -> { + final String node = randomFrom(internalCluster().getNodeNames()); + final long metadataVersion + = internalCluster().getInstance(ClusterService.class, node).getClusterApplierService().state().metaData().version(); + final long waitForMetaDataVersion = randomLongBetween(Math.max(1, metadataVersion - 3), metadataVersion + 5); + final ClusterStateResponse clusterStateResponse = client(node).admin().cluster() + .prepareState().clear().setLocal(true).setMetaData(true).setWaitForMetaDataVersion(waitForMetaDataVersion) + .setMasterNodeTimeout(TimeValue.timeValueMillis(100)).setWaitForTimeOut(TimeValue.timeValueMillis(100)) + .get(); + if (clusterStateResponse.isWaitForTimedOut() == false) { + final MetaData metaData = clusterStateResponse.getState().metaData(); + assertThat("waited for metadata version " + waitForMetaDataVersion + " with node " + node, + metaData.version(), greaterThanOrEqualTo(waitForMetaDataVersion)); + } + }); + } + + public void runRepeatedlyWhileChangingMaster(Runnable runnable) throws Exception { + internalCluster().startNodes(3); + + assertBusy(() -> assertThat(client().admin().cluster().prepareState().clear().setMetaData(true) + .get().getState().getLastCommittedConfiguration().getNodeIds().stream() + .filter(n -> ClusterBootstrapService.isBootstrapPlaceholder(n) == false).collect(Collectors.toSet()), hasSize(3))); + + final String masterName = internalCluster().getMasterName(); + + final AtomicBoolean shutdown = new AtomicBoolean(); + final Thread assertingThread = new Thread(() -> { + while (shutdown.get() == false) { + runnable.run(); + } + }, "asserting thread"); + + final Thread updatingThread = new Thread(() -> { + String value = "none"; + while (shutdown.get() == false) { + value = "none".equals(value) ? "all" : "none"; + final String nonMasterNode = randomValueOtherThan(masterName, () -> randomFrom(internalCluster().getNodeNames())); + assertAcked(client(nonMasterNode).admin().cluster().prepareUpdateSettings().setPersistentSettings( + Settings.builder().put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), value))); + } + }, "updating thread"); + + final List mockTransportServices + = StreamSupport.stream(internalCluster().getInstances(TransportService.class).spliterator(), false) + .map(ts -> (MockTransportService) ts).collect(Collectors.toList()); + + assertingThread.start(); + updatingThread.start(); + + final MockTransportService masterTransportService + = (MockTransportService) internalCluster().getInstance(TransportService.class, masterName); + + for (MockTransportService mockTransportService : mockTransportServices) { + if (masterTransportService != mockTransportService) { + masterTransportService.addFailToSendNoConnectRule(mockTransportService); + mockTransportService.addFailToSendNoConnectRule(masterTransportService); + } + } + + assertBusy(() -> { + final String nonMasterNode = randomValueOtherThan(masterName, () -> randomFrom(internalCluster().getNodeNames())); + final String claimedMasterName = internalCluster().getMasterName(nonMasterNode); + assertThat(claimedMasterName, not(equalTo(masterName))); + }); + + shutdown.set(true); + assertingThread.join(); + updatingThread.join(); + internalCluster().close(); + } + +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index cc071df9769ca..3b4f8c8f55d4c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -149,8 +149,8 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_TYPE_SETTING; import static org.elasticsearch.discovery.DiscoveryModule.ZEN2_DISCOVERY_TYPE; -import static org.elasticsearch.node.Node.INITIAL_STATE_TIMEOUT_SETTING; import static org.elasticsearch.discovery.FileBasedSeedHostsProvider.UNICAST_HOSTS_FILE; +import static org.elasticsearch.node.Node.INITIAL_STATE_TIMEOUT_SETTING; import static org.elasticsearch.test.ESTestCase.assertBusy; import static org.elasticsearch.test.ESTestCase.awaitBusy; import static org.elasticsearch.test.ESTestCase.getTestTransportType; @@ -161,7 +161,6 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -1884,9 +1883,7 @@ public String getMasterName() { public String getMasterName(@Nullable String viaNode) { try { Client client = viaNode != null ? client(viaNode) : client(); - final DiscoveryNode masterNode = client.admin().cluster().prepareState().get().getState().nodes().getMasterNode(); - assertNotNull(masterNode); - return masterNode.getName(); + return client.admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(); } catch (Exception e) { logger.warn("Can't fetch cluster state", e); throw new RuntimeException("Can't get master node " + e.getMessage(), e); From 4b21100178cdc584193df32dc6b8e2e6fde902d2 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 24 May 2019 10:44:59 +0200 Subject: [PATCH 087/224] Remove IndexStore and DirectoryService (#42446) Both of these classes are basically a bloated wrapper around a simple construct that can simply be a DirectoryFactory interface. This change removes both classes and replaces them with a simple stateless interface that creates a new `Directory` per shard. The concept of `index.store` is preserved since it makes sense from a configuration perspective. --- ...ce.java => SmbMmapFsDirectoryFactory.java} | 14 +--- .../store/smbmmapfs/SmbMmapFsIndexStore.java | 37 ---------- ....java => SmbSimpleFsDirectoryFactory.java} | 12 +-- .../smbsimplefs/SmbSimpleFsIndexStore.java | 38 ---------- .../plugin/store/smb/SMBStorePlugin.java | 13 ++-- .../common/settings/IndexScopedSettings.java | 4 +- .../elasticsearch/env/NodeEnvironment.java | 4 +- .../org/elasticsearch/index/IndexModule.java | 35 +++++---- .../org/elasticsearch/index/IndexService.java | 19 +++-- ...ryService.java => FsDirectoryFactory.java} | 18 ++--- .../elasticsearch/index/store/IndexStore.java | 39 ---------- .../elasticsearch/indices/IndicesService.java | 14 ++-- .../java/org/elasticsearch/node/Node.java | 5 +- .../plugins/IndexStorePlugin.java | 30 ++++++-- .../elasticsearch/index/IndexModuleTests.java | 18 +++-- .../index/shard/IndexShardTests.java | 4 +- ...ests.java => FsDirectoryFactoryTests.java} | 56 +++++++++++--- .../index/store/FsDirectoryServiceTests.java | 73 ------------------- .../plugins/IndexStorePluginTests.java | 22 +++--- .../basic/SearchWithRandomIOExceptionsIT.java | 14 ++-- ...rvice.java => MockFSDirectoryFactory.java} | 63 +++++----------- .../test/store/MockFSIndexStore.java | 29 ++------ 22 files changed, 182 insertions(+), 379 deletions(-) rename plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/{SmbMmapFsDirectoryService.java => SmbMmapFsDirectoryFactory.java} (74%) delete mode 100644 plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStore.java rename plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/{SmbSimpleFsDirectoryService.java => SmbSimpleFsDirectoryFactory.java} (76%) delete mode 100644 plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStore.java rename server/src/main/java/org/elasticsearch/index/store/{FsDirectoryService.java => FsDirectoryFactory.java} (94%) delete mode 100644 server/src/main/java/org/elasticsearch/index/store/IndexStore.java rename server/src/test/java/org/elasticsearch/index/store/{IndexStoreTests.java => FsDirectoryFactoryTests.java} (59%) delete mode 100644 server/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java rename test/framework/src/main/java/org/elasticsearch/test/store/{MockFSDirectoryService.java => MockFSDirectoryFactory.java} (78%) diff --git a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsDirectoryService.java b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsDirectoryFactory.java similarity index 74% rename from plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsDirectoryService.java rename to plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsDirectoryFactory.java index 1264464cf0071..13b6f9401abc5 100644 --- a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsDirectoryService.java +++ b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsDirectoryFactory.java @@ -23,22 +23,16 @@ import org.apache.lucene.store.LockFactory; import org.apache.lucene.store.MMapDirectory; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.FsDirectoryService; +import org.elasticsearch.index.store.FsDirectoryFactory; import org.elasticsearch.index.store.SmbDirectoryWrapper; import java.io.IOException; import java.nio.file.Path; -public class SmbMmapFsDirectoryService extends FsDirectoryService { - - public SmbMmapFsDirectoryService(IndexSettings indexSettings, ShardPath path) { - super(indexSettings, path); - } +public final class SmbMmapFsDirectoryFactory extends FsDirectoryFactory { @Override - protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { - logger.debug("wrapping MMapDirectory for SMB"); - return new SmbDirectoryWrapper(new MMapDirectory(location, indexSettings.getValue(INDEX_LOCK_FACTOR_SETTING))); + protected Directory newFSDirectory(Path location, LockFactory lockFactory, IndexSettings indexSettings) throws IOException { + return new SmbDirectoryWrapper(new MMapDirectory(location, lockFactory)); } } diff --git a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStore.java b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStore.java deleted file mode 100644 index 0399348966361..0000000000000 --- a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStore.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.smbmmapfs; - -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.IndexStore; - -public class SmbMmapFsIndexStore extends IndexStore { - - public SmbMmapFsIndexStore(IndexSettings indexSettings) { - super(indexSettings); - } - - @Override - public DirectoryService newDirectoryService(ShardPath path) { - return new SmbMmapFsDirectoryService(indexSettings, path); - } -} diff --git a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsDirectoryService.java b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsDirectoryFactory.java similarity index 76% rename from plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsDirectoryService.java rename to plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsDirectoryFactory.java index 87e45a02cf6cb..e5e9025f82d85 100644 --- a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsDirectoryService.java +++ b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsDirectoryFactory.java @@ -23,22 +23,16 @@ import org.apache.lucene.store.LockFactory; import org.apache.lucene.store.SimpleFSDirectory; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.FsDirectoryService; +import org.elasticsearch.index.store.FsDirectoryFactory; import org.elasticsearch.index.store.SmbDirectoryWrapper; import java.io.IOException; import java.nio.file.Path; -public class SmbSimpleFsDirectoryService extends FsDirectoryService { - - public SmbSimpleFsDirectoryService(IndexSettings indexSettings, ShardPath path) { - super(indexSettings, path); - } +public final class SmbSimpleFsDirectoryFactory extends FsDirectoryFactory { @Override - protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { - logger.debug("wrapping SimpleFSDirectory for SMB"); + protected Directory newFSDirectory(Path location, LockFactory lockFactory, IndexSettings indexSettings) throws IOException { return new SmbDirectoryWrapper(new SimpleFSDirectory(location, lockFactory)); } } diff --git a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStore.java b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStore.java deleted file mode 100644 index 3b6b3c3c8990f..0000000000000 --- a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStore.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.smbsimplefs; - -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.IndexStore; - -public class SmbSimpleFsIndexStore extends IndexStore { - - public SmbSimpleFsIndexStore(IndexSettings indexSettings) { - super(indexSettings); - } - - @Override - public DirectoryService newDirectoryService(ShardPath path) { - return new SmbSimpleFsDirectoryService(indexSettings, path); - } -} - diff --git a/plugins/store-smb/src/main/java/org/elasticsearch/plugin/store/smb/SMBStorePlugin.java b/plugins/store-smb/src/main/java/org/elasticsearch/plugin/store/smb/SMBStorePlugin.java index 111100a2f1580..bb818e9b53d38 100644 --- a/plugins/store-smb/src/main/java/org/elasticsearch/plugin/store/smb/SMBStorePlugin.java +++ b/plugins/store-smb/src/main/java/org/elasticsearch/plugin/store/smb/SMBStorePlugin.java @@ -19,23 +19,20 @@ package org.elasticsearch.plugin.store.smb; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.store.IndexStore; -import org.elasticsearch.index.store.smbmmapfs.SmbMmapFsIndexStore; -import org.elasticsearch.index.store.smbsimplefs.SmbSimpleFsIndexStore; +import org.elasticsearch.index.store.smbmmapfs.SmbMmapFsDirectoryFactory; +import org.elasticsearch.index.store.smbsimplefs.SmbSimpleFsDirectoryFactory; import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.plugins.Plugin; import java.util.Map; -import java.util.function.Function; public class SMBStorePlugin extends Plugin implements IndexStorePlugin { @Override - public Map> getIndexStoreFactories() { + public Map getDirectoryFactories() { return Map.of( - "smb_mmap_fs", SmbMmapFsIndexStore::new, - "smb_simple_fs", SmbSimpleFsIndexStore::new); + "smb_mmap_fs", new SmbMmapFsDirectoryFactory(), + "smb_simple_fs", new SmbSimpleFsDirectoryFactory()); } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 2acbbec3f8171..907277b53dde9 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -38,7 +38,7 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.index.store.FsDirectoryService; +import org.elasticsearch.index.store.FsDirectoryFactory; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.IndicesRequestCache; @@ -157,7 +157,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexModule.INDEX_STORE_TYPE_SETTING, IndexModule.INDEX_STORE_PRE_LOAD_SETTING, IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING, - FsDirectoryService.INDEX_LOCK_FACTOR_SETTING, + FsDirectoryFactory.INDEX_LOCK_FACTOR_SETTING, Store.FORCE_RAM_TERM_DICT, EngineConfig.INDEX_CODEC_SETTING, IndexMetaData.SETTING_WAIT_FOR_ACTIVE_SHARDS, diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 497c6a9e06459..4d19dd66732fc 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -52,7 +52,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.FsDirectoryService; +import org.elasticsearch.index.store.FsDirectoryFactory; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.fs.FsProbe; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -430,7 +430,7 @@ public static void acquireFSLockForPaths(IndexSettings indexSettings, Path... sh // resolve the directory the shard actually lives in Path p = shardPaths[i].resolve("index"); // open a directory (will be immediately closed) on the shard's location - dirs[i] = new SimpleFSDirectory(p, indexSettings.getValue(FsDirectoryService.INDEX_LOCK_FACTOR_SETTING)); + dirs[i] = new SimpleFSDirectory(p, indexSettings.getValue(FsDirectoryFactory.INDEX_LOCK_FACTOR_SETTING)); // create a lock for the "write.lock" file try { locks[i] = dirs[i].obtainLock(IndexWriter.WRITE_LOCK_NAME); diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index acec458b8b0cd..ca0f34803cc0c 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -45,7 +45,8 @@ import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.index.store.DirectoryService; +import org.elasticsearch.index.store.FsDirectoryFactory; import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -75,7 +76,7 @@ * {@link #addSimilarity(String, TriFunction)} while existing Providers can be referenced through Settings under the * {@link IndexModule#SIMILARITY_SETTINGS_PREFIX} prefix along with the "type" value. For example, to reference the * {@link BM25Similarity}, the configuration {@code "index.similarity.my_similarity.type : "BM25"} can be used. - *
  • {@link IndexStore} - Custom {@link IndexStore} instances can be registered via {@link IndexStorePlugin}
  • + *
  • {@link DirectoryService} - Custom {@link DirectoryService} instances can be registered via {@link IndexStorePlugin}
  • *
  • {@link IndexEventListener} - Custom {@link IndexEventListener} instances can be registered via * {@link #addIndexEventListener(IndexEventListener)}
  • *
  • Settings update listener - Custom settings update listener can be registered via @@ -86,6 +87,8 @@ public final class IndexModule { public static final Setting NODE_STORE_ALLOW_MMAP = Setting.boolSetting("node.store.allow_mmap", true, Property.NodeScope); + private static final FsDirectoryFactory DEFAULT_DIRECTORY_FACTORY = new FsDirectoryFactory(); + public static final Setting INDEX_STORE_TYPE_SETTING = new Setting<>("index.store.type", "", Function.identity(), Property.IndexScope, Property.NodeScope); @@ -112,7 +115,7 @@ public final class IndexModule { private SetOnce indexSearcherWrapper = new SetOnce<>(); private final Set indexEventListeners = new HashSet<>(); private final Map> similarities = new HashMap<>(); - private final Map> indexStoreFactories; + private final Map directoryFactories; private final SetOnce> forceQueryCacheProvider = new SetOnce<>(); private final List searchOperationListeners = new ArrayList<>(); private final List indexOperationListeners = new ArrayList<>(); @@ -125,19 +128,19 @@ public final class IndexModule { * @param indexSettings the index settings * @param analysisRegistry the analysis registry * @param engineFactory the engine factory - * @param indexStoreFactories the available store types + * @param directoryFactories the available store types */ public IndexModule( final IndexSettings indexSettings, final AnalysisRegistry analysisRegistry, final EngineFactory engineFactory, - final Map> indexStoreFactories) { + final Map directoryFactories) { this.indexSettings = indexSettings; this.analysisRegistry = analysisRegistry; this.engineFactory = Objects.requireNonNull(engineFactory); this.searchOperationListeners.add(new SearchSlowLog(indexSettings)); this.indexOperationListeners.add(new IndexingSlowLog(indexSettings)); - this.indexStoreFactories = Collections.unmodifiableMap(indexStoreFactories); + this.directoryFactories = Collections.unmodifiableMap(directoryFactories); } /** @@ -384,7 +387,7 @@ public IndexService newIndexService( IndexSearcherWrapperFactory searcherWrapperFactory = indexSearcherWrapper.get() == null ? (shard) -> null : indexSearcherWrapper.get(); eventListener.beforeIndexCreated(indexSettings.getIndex(), indexSettings.getSettings()); - final IndexStore store = getIndexStore(indexSettings, indexStoreFactories); + final IndexStorePlugin.DirectoryFactory directoryFactory = getDirectoryFactory(indexSettings, directoryFactories); final QueryCache queryCache; if (indexSettings.getValue(INDEX_QUERY_CACHE_ENABLED_SETTING)) { BiFunction queryCacheProvider = forceQueryCacheProvider.get(); @@ -399,12 +402,12 @@ public IndexService newIndexService( return new IndexService(indexSettings, indexCreationContext, environment, xContentRegistry, new SimilarityService(indexSettings, scriptService, similarities), shardStoreDeleter, analysisRegistry, engineFactory, circuitBreakerService, bigArrays, threadPool, scriptService, - client, queryCache, store, eventListener, searcherWrapperFactory, mapperRegistry, + client, queryCache, directoryFactory, eventListener, searcherWrapperFactory, mapperRegistry, indicesFieldDataCache, searchOperationListeners, indexOperationListeners, namedWriteableRegistry); } - private static IndexStore getIndexStore( - final IndexSettings indexSettings, final Map> indexStoreFactories) { + private static IndexStorePlugin.DirectoryFactory getDirectoryFactory( + final IndexSettings indexSettings, final Map indexStoreFactories) { final String storeType = indexSettings.getValue(INDEX_STORE_TYPE_SETTING); final Type type; final Boolean allowMmap = NODE_STORE_ALLOW_MMAP.get(indexSettings.getNodeSettings()); @@ -420,20 +423,16 @@ private static IndexStore getIndexStore( if (allowMmap == false && (type == Type.MMAPFS || type == Type.HYBRIDFS)) { throw new IllegalArgumentException("store type [" + storeType + "] is not allowed because mmap is disabled"); } - final IndexStore store; + final IndexStorePlugin.DirectoryFactory factory; if (storeType.isEmpty() || isBuiltinType(storeType)) { - store = new IndexStore(indexSettings); + factory = DEFAULT_DIRECTORY_FACTORY; } else { - Function factory = indexStoreFactories.get(storeType); + factory = indexStoreFactories.get(storeType); if (factory == null) { throw new IllegalArgumentException("Unknown store type [" + storeType + "]"); } - store = factory.apply(indexSettings); - if (store == null) { - throw new IllegalStateException("store must not be null"); - } } - return store; + return factory; } /** diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index f5deb99c80d80..2d86a2b436d9d 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.Sort; import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.store.Directory; import org.apache.lucene.util.Accountable; import org.elasticsearch.Assertions; import org.elasticsearch.Version; @@ -66,14 +67,13 @@ import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.index.shard.ShardPath; import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.mapper.MapperRegistry; +import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ThreadPool; @@ -103,7 +103,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final BitsetFilterCache bitsetFilterCache; private final NodeEnvironment nodeEnv; private final ShardStoreDeleter shardStoreDeleter; - private final IndexStore indexStore; + private final IndexStorePlugin.DirectoryFactory directoryFactory; private final IndexSearcherWrapper searcherWrapper; private final IndexCache indexCache; private final MapperService mapperService; @@ -149,7 +149,7 @@ public IndexService( ScriptService scriptService, Client client, QueryCache queryCache, - IndexStore indexStore, + IndexStorePlugin.DirectoryFactory directoryFactory, IndexEventListener eventListener, IndexModule.IndexSearcherWrapperFactory wrapperFactory, MapperRegistry mapperRegistry, @@ -200,7 +200,7 @@ public IndexService( this.client = client; this.eventListener = eventListener; this.nodeEnv = nodeEnv; - this.indexStore = indexStore; + this.directoryFactory = directoryFactory; this.engineFactory = Objects.requireNonNull(engineFactory); // initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE this.searcherWrapper = wrapperFactory.newWrapper(this); @@ -401,9 +401,8 @@ public synchronized IndexShard createShard( warmer.warm(searcher, shard, IndexService.this.indexSettings); } }; - // TODO we can remove either IndexStore or DirectoryService. All we need is a simple Supplier - DirectoryService directoryService = indexStore.newDirectoryService(path); - store = new Store(shardId, this.indexSettings, directoryService.newDirectory(), lock, + Directory directory = directoryFactory.newDirectory(this.indexSettings, path); + store = new Store(shardId, this.indexSettings, directory, lock, new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId))); eventListener.onStoreCreated(shardId); indexShard = new IndexShard( @@ -753,8 +752,8 @@ final IndexSearcherWrapper getSearcherWrapper() { return searcherWrapper; } // pkg private for testing - final IndexStore getIndexStore() { - return indexStore; + final IndexStorePlugin.DirectoryFactory getDirectoryFactory() { + return directoryFactory; } // pkg private for testing private void maybeFSyncTranslogs() { diff --git a/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java b/server/src/main/java/org/elasticsearch/index/store/FsDirectoryFactory.java similarity index 94% rename from server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java rename to server/src/main/java/org/elasticsearch/index/store/FsDirectoryFactory.java index a8b50fcc53895..84bb4c49b27d4 100644 --- a/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java +++ b/server/src/main/java/org/elasticsearch/index/store/FsDirectoryFactory.java @@ -30,13 +30,13 @@ import org.apache.lucene.store.NativeFSLockFactory; import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.store.SimpleFSLockFactory; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.plugins.IndexStorePlugin; import java.io.IOException; import java.nio.file.Files; @@ -44,7 +44,8 @@ import java.util.HashSet; import java.util.Set; -public class FsDirectoryService extends DirectoryService { +public class FsDirectoryFactory implements IndexStorePlugin.DirectoryFactory { + public static final Setting INDEX_LOCK_FACTOR_SETTING = new Setting<>("index.store.fs.fs_lock", "native", (s) -> { switch (s) { case "native": @@ -56,27 +57,20 @@ public class FsDirectoryService extends DirectoryService { } // can we set on both - node and index level, some nodes might be running on NFS so they might need simple rather than native }, Property.IndexScope, Property.NodeScope); - private final ShardPath path; - - @Inject - public FsDirectoryService(IndexSettings indexSettings, ShardPath path) { - super(path.getShardId(), indexSettings); - this.path = path; - } @Override - public Directory newDirectory() throws IOException { + public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throws IOException { final Path location = path.resolveIndex(); final LockFactory lockFactory = indexSettings.getValue(INDEX_LOCK_FACTOR_SETTING); Files.createDirectories(location); - Directory wrapped = newFSDirectory(location, lockFactory); + Directory wrapped = newFSDirectory(location, lockFactory, indexSettings); Set preLoadExtensions = new HashSet<>( indexSettings.getValue(IndexModule.INDEX_STORE_PRE_LOAD_SETTING)); wrapped = setPreload(wrapped, location, lockFactory, preLoadExtensions); return wrapped; } - protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { + protected Directory newFSDirectory(Path location, LockFactory lockFactory, IndexSettings indexSettings) throws IOException { final String storeType = indexSettings.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.FS.getSettingsKey()); IndexModule.Type type; diff --git a/server/src/main/java/org/elasticsearch/index/store/IndexStore.java b/server/src/main/java/org/elasticsearch/index/store/IndexStore.java deleted file mode 100644 index 0d41b1ac95d18..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/store/IndexStore.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store; - -import org.elasticsearch.index.AbstractIndexComponent; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.shard.ShardPath; - -public class IndexStore extends AbstractIndexComponent { - - public IndexStore(IndexSettings indexSettings) { - super(indexSettings); - } - - /** - * The shard store class that should be used for each shard. - */ - public DirectoryService newDirectoryService(ShardPath path) { - return new FsDirectoryService(indexSettings, path); - } - -} diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index be5e1cae4fa8e..16382d15cd325 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -110,13 +110,13 @@ import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.indices.recovery.RecoveryState; +import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; @@ -200,7 +200,7 @@ public class IndicesService extends AbstractLifecycleComponent private final IndicesQueryCache indicesQueryCache; private final MetaStateService metaStateService; private final Collection>> engineFactoryProviders; - private final Map> indexStoreFactories; + private final Map directoryFactories; final AbstractRefCounted indicesRefCount; // pkg-private for testing private final CountDownLatch closeLatch = new CountDownLatch(1); @@ -216,7 +216,7 @@ public IndicesService(Settings settings, PluginsService pluginsService, NodeEnvi IndexScopedSettings indexScopedSettings, CircuitBreakerService circuitBreakerService, BigArrays bigArrays, ScriptService scriptService, Client client, MetaStateService metaStateService, Collection>> engineFactoryProviders, - Map> indexStoreFactories) { + Map directoryFactories) { this.settings = settings; this.threadPool = threadPool; this.pluginsService = pluginsService; @@ -251,13 +251,13 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon this.engineFactoryProviders = engineFactoryProviders; // do not allow any plugin-provided index store type to conflict with a built-in type - for (final String indexStoreType : indexStoreFactories.keySet()) { + for (final String indexStoreType : directoryFactories.keySet()) { if (IndexModule.isBuiltinType(indexStoreType)) { throw new IllegalStateException("registered index store type [" + indexStoreType + "] conflicts with a built-in type"); } } - this.indexStoreFactories = indexStoreFactories; + this.directoryFactories = directoryFactories; // doClose() is called when shutting down a node, yet there might still be ongoing requests // that we need to wait for before closing some resources such as the caches. In order to // avoid closing these resources while ongoing requests are still being processed, we use a @@ -547,7 +547,7 @@ private synchronized IndexService createIndexService(IndexService.IndexCreationC idxSettings.getNumberOfReplicas(), indexCreationContext); - final IndexModule indexModule = new IndexModule(idxSettings, analysisRegistry, getEngineFactory(idxSettings), indexStoreFactories); + final IndexModule indexModule = new IndexModule(idxSettings, analysisRegistry, getEngineFactory(idxSettings), directoryFactories); for (IndexingOperationListener operationListener : indexingOperationListeners) { indexModule.addIndexOperationListener(operationListener); } @@ -614,7 +614,7 @@ private EngineFactory getEngineFactory(final IndexSettings idxSettings) { */ public synchronized MapperService createIndexMapperService(IndexMetaData indexMetaData) throws IOException { final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexScopedSettings); - final IndexModule indexModule = new IndexModule(idxSettings, analysisRegistry, getEngineFactory(idxSettings), indexStoreFactories); + final IndexModule indexModule = new IndexModule(idxSettings, analysisRegistry, getEngineFactory(idxSettings), directoryFactories); pluginsService.onIndexModule(indexModule); return indexModule.newIndexMapperService(xContentRegistry, mapperRegistry, scriptService); } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index fab08ab1c03f7..6e592ba324fb4 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -99,7 +99,6 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.engine.EngineFactory; -import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.AnalysisModule; @@ -413,10 +412,10 @@ protected Node( .collect(Collectors.toList()); - final Map> indexStoreFactories = + final Map indexStoreFactories = pluginsService.filterPlugins(IndexStorePlugin.class) .stream() - .map(IndexStorePlugin::getIndexStoreFactories) + .map(IndexStorePlugin::getDirectoryFactories) .flatMap(m -> m.entrySet().stream()) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); diff --git a/server/src/main/java/org/elasticsearch/plugins/IndexStorePlugin.java b/server/src/main/java/org/elasticsearch/plugins/IndexStorePlugin.java index 16eec535e4b4a..2beaf1935e409 100644 --- a/server/src/main/java/org/elasticsearch/plugins/IndexStorePlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/IndexStorePlugin.java @@ -19,24 +19,40 @@ package org.elasticsearch.plugins; +import org.apache.lucene.store.Directory; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.index.shard.ShardPath; +import java.io.IOException; import java.util.Map; -import java.util.function.Function; /** - * A plugin that provides alternative index store implementations. + * A plugin that provides alternative directory implementations. */ public interface IndexStorePlugin { /** - * The index store factories for this plugin. When an index is created the store type setting + * An interface that describes how to create a new directory instance per shard. + */ + @FunctionalInterface + interface DirectoryFactory { + /** + * Creates a new directory per shard. This method is called once per shard on shard creation. + * @param indexSettings the shards index settings + * @param shardPath the path the shard is using + * @return a new lucene directory instance + * @throws IOException if an IOException occurs while opening the directory + */ + Directory newDirectory(IndexSettings indexSettings, ShardPath shardPath) throws IOException; + } + + /** + * The {@link DirectoryFactory} mappings for this plugin. When an index is created the store type setting * {@link org.elasticsearch.index.IndexModule#INDEX_STORE_TYPE_SETTING} on the index will be examined and either use the default or a - * built-in type, or looked up among all the index store factories from {@link IndexStore} plugins. + * built-in type, or looked up among all the directory factories from {@link IndexStorePlugin} plugins. * - * @return a map from store type to an index store factory + * @return a map from store type to an directory factory */ - Map> getIndexStoreFactories(); + Map getDirectoryFactories(); } diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 351cccdff4aa0..d0f811007a6fa 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -29,6 +29,7 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.Similarity; +import org.apache.lucene.store.Directory; import org.apache.lucene.util.SetOnce.AlreadySetException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -60,9 +61,10 @@ import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardPath; import org.elasticsearch.index.similarity.NonNegativeScoresSimilarity; import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.index.store.FsDirectoryFactory; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.breaker.CircuitBreakerService; @@ -70,6 +72,7 @@ import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.mapper.MapperRegistry; +import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ClusterServiceUtils; @@ -86,7 +89,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Function; import static java.util.Collections.emptyMap; import static org.elasticsearch.index.IndexService.IndexCreationContext.CREATE_INDEX; @@ -174,11 +176,12 @@ public void testRegisterIndexStore() throws IOException { .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), "foo_store") .build(); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, settings); - final Map> indexStoreFactories = Collections.singletonMap("foo_store", FooStore::new); + final Map indexStoreFactories = Collections.singletonMap( + "foo_store", new FooFunction()); final IndexModule module = new IndexModule(indexSettings, emptyAnalysisRegistry, new InternalEngineFactory(), indexStoreFactories); final IndexService indexService = newIndexService(module); - assertThat(indexService.getIndexStore(), instanceOf(FooStore.class)); + assertThat(indexService.getDirectoryFactory(), instanceOf(FooFunction.class)); indexService.close("simon says", false); } @@ -444,10 +447,11 @@ public SimScorer scorer(float boost, CollectionStatistics collectionStats, TermS } } - public static final class FooStore extends IndexStore { + public static final class FooFunction implements IndexStorePlugin.DirectoryFactory { - public FooStore(IndexSettings indexSettings) { - super(indexSettings); + @Override + public Directory newDirectory(IndexSettings indexSettings, ShardPath shardPath) throws IOException { + return new FsDirectoryFactory().newDirectory(indexSettings, shardPath); } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 1710154f72f94..5187ef37fcdf8 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -125,7 +125,7 @@ import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.FieldMaskingReader; import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.store.MockFSDirectoryService; +import org.elasticsearch.test.store.MockFSDirectoryFactory; import org.elasticsearch.threadpool.ThreadPool; import org.junit.Assert; @@ -3819,7 +3819,7 @@ public InternalEngine recoverFromTranslog(TranslogRecoveryRunner translogRecover readyToCloseLatch.await(); shard.close("testing", false); // in integration tests, this is done as a listener on IndexService. - MockFSDirectoryService.checkIndex(logger, shard.store(), shard.shardId); + MockFSDirectoryFactory.checkIndex(logger, shard.store(), shard.shardId); } catch (InterruptedException | IOException e) { throw new AssertionError(e); } finally { diff --git a/server/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java b/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java similarity index 59% rename from server/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java rename to server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java index 21c8d1c1d78a4..0f24f8f3a5a4f 100644 --- a/server/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java @@ -19,10 +19,12 @@ package org.elasticsearch.index.store; import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FileSwitchDirectory; import org.apache.lucene.store.MMapDirectory; import org.apache.lucene.store.NIOFSDirectory; import org.apache.lucene.store.NoLockFactory; import org.apache.lucene.store.SimpleFSDirectory; +import org.apache.lucene.store.SleepingLockWrapper; import org.apache.lucene.util.Constants; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -36,32 +38,68 @@ import org.elasticsearch.test.IndexSettingsModule; import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; +import java.util.Arrays; import java.util.Locale; -public class IndexStoreTests extends ESTestCase { +public class FsDirectoryFactoryTests extends ESTestCase { + + public void testPreload() throws IOException { + doTestPreload(); + doTestPreload("nvd", "dvd", "tim"); + doTestPreload("*"); + } + + private void doTestPreload(String...preload) throws IOException { + Settings build = Settings.builder() + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), "mmapfs") + .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), preload) + .build(); + IndexSettings settings = IndexSettingsModule.newIndexSettings("foo", build); + Path tempDir = createTempDir().resolve(settings.getUUID()).resolve("0"); + Files.createDirectories(tempDir); + ShardPath path = new ShardPath(false, tempDir, tempDir, new ShardId(settings.getIndex(), 0)); + FsDirectoryFactory fsDirectoryFactory = new FsDirectoryFactory(); + Directory directory = fsDirectoryFactory.newDirectory(settings, path); + assertFalse(directory instanceof SleepingLockWrapper); + if (preload.length == 0) { + assertTrue(directory.toString(), directory instanceof MMapDirectory); + assertFalse(((MMapDirectory) directory).getPreload()); + } else if (Arrays.asList(preload).contains("*")) { + assertTrue(directory.toString(), directory instanceof MMapDirectory); + assertTrue(((MMapDirectory) directory).getPreload()); + } else { + assertTrue(directory.toString(), directory instanceof FileSwitchDirectory); + FileSwitchDirectory fsd = (FileSwitchDirectory) directory; + assertTrue(fsd.getPrimaryDir() instanceof MMapDirectory); + assertTrue(((MMapDirectory) fsd.getPrimaryDir()).getPreload()); + assertTrue(fsd.getSecondaryDir() instanceof MMapDirectory); + assertFalse(((MMapDirectory) fsd.getSecondaryDir()).getPreload()); + } + } public void testStoreDirectory() throws IOException { Index index = new Index("foo", "fooUUID"); final Path tempDir = createTempDir().resolve(index.getUUID()).resolve("0"); // default - doTestStoreDirectory(index, tempDir, null, IndexModule.Type.FS); + doTestStoreDirectory(tempDir, null, IndexModule.Type.FS); // explicit directory impls for (IndexModule.Type type : IndexModule.Type.values()) { - doTestStoreDirectory(index, tempDir, type.name().toLowerCase(Locale.ROOT), type); + doTestStoreDirectory(tempDir, type.name().toLowerCase(Locale.ROOT), type); } } - private void doTestStoreDirectory(Index index, Path tempDir, String typeSettingValue, IndexModule.Type type) throws IOException { + private void doTestStoreDirectory(Path tempDir, String typeSettingValue, IndexModule.Type type) throws IOException { Settings.Builder settingsBuilder = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT); + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT); if (typeSettingValue != null) { settingsBuilder.put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), typeSettingValue); } Settings settings = settingsBuilder.build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("foo", settings); - FsDirectoryService service = new FsDirectoryService(indexSettings, new ShardPath(false, tempDir, tempDir, new ShardId(index, 0))); - try (Directory directory = service.newFSDirectory(tempDir, NoLockFactory.INSTANCE)) { + FsDirectoryFactory service = new FsDirectoryFactory(); + try (Directory directory = service.newFSDirectory(tempDir, NoLockFactory.INSTANCE, indexSettings)) { switch (type) { case HYBRIDFS: assertHybridDirectory(directory); @@ -91,8 +129,8 @@ private void doTestStoreDirectory(Index index, Path tempDir, String typeSettingV } private void assertHybridDirectory(Directory directory) { - assertTrue(directory.toString(), directory instanceof FsDirectoryService.HybridDirectory); - Directory randomAccessDirectory = ((FsDirectoryService.HybridDirectory) directory).getRandomAccessDirectory(); + assertTrue(directory.toString(), directory instanceof FsDirectoryFactory.HybridDirectory); + Directory randomAccessDirectory = ((FsDirectoryFactory.HybridDirectory) directory).getRandomAccessDirectory(); assertTrue("randomAccessDirectory: " + randomAccessDirectory.toString(), randomAccessDirectory instanceof MMapDirectory); } } diff --git a/server/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java b/server/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java deleted file mode 100644 index e84ff3f32841b..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.store; - -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.FileSwitchDirectory; -import org.apache.lucene.store.MMapDirectory; -import org.apache.lucene.store.SleepingLockWrapper; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexModule; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.IndexSettingsModule; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Arrays; - -public class FsDirectoryServiceTests extends ESTestCase { - - public void testPreload() throws IOException { - doTestPreload(); - doTestPreload("nvd", "dvd", "tim"); - doTestPreload("*"); - } - - private void doTestPreload(String...preload) throws IOException { - Settings build = Settings.builder() - .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), "mmapfs") - .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), preload) - .build(); - IndexSettings settings = IndexSettingsModule.newIndexSettings("foo", build); - Path tempDir = createTempDir().resolve(settings.getUUID()).resolve("0"); - Files.createDirectories(tempDir); - ShardPath path = new ShardPath(false, tempDir, tempDir, new ShardId(settings.getIndex(), 0)); - FsDirectoryService fsDirectoryService = new FsDirectoryService(settings, path); - Directory directory = fsDirectoryService.newDirectory(); - assertFalse(directory instanceof SleepingLockWrapper); - if (preload.length == 0) { - assertTrue(directory.toString(), directory instanceof MMapDirectory); - assertFalse(((MMapDirectory) directory).getPreload()); - } else if (Arrays.asList(preload).contains("*")) { - assertTrue(directory.toString(), directory instanceof MMapDirectory); - assertTrue(((MMapDirectory) directory).getPreload()); - } else { - assertTrue(directory.toString(), directory instanceof FileSwitchDirectory); - FileSwitchDirectory fsd = (FileSwitchDirectory) directory; - assertTrue(fsd.getPrimaryDir() instanceof MMapDirectory); - assertTrue(((MMapDirectory) fsd.getPrimaryDir()).getPreload()); - assertTrue(fsd.getSecondaryDir() instanceof MMapDirectory); - assertFalse(((MMapDirectory) fsd.getSecondaryDir()).getPreload()); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java b/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java index d413c0f0be229..fac270172b079 100644 --- a/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java @@ -22,15 +22,13 @@ import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexModule; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.index.store.FsDirectoryFactory; import org.elasticsearch.node.MockNode; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; import java.util.Collections; import java.util.Map; -import java.util.function.Function; import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; import static org.hamcrest.Matchers.containsString; @@ -41,8 +39,8 @@ public class IndexStorePluginTests extends ESTestCase { public static class BarStorePlugin extends Plugin implements IndexStorePlugin { @Override - public Map> getIndexStoreFactories() { - return Collections.singletonMap("store", IndexStore::new); + public Map getDirectoryFactories() { + return Collections.singletonMap("store", new FsDirectoryFactory()); } } @@ -50,8 +48,8 @@ public Map> getIndexStoreFactories() public static class FooStorePlugin extends Plugin implements IndexStorePlugin { @Override - public Map> getIndexStoreFactories() { - return Collections.singletonMap("store", IndexStore::new); + public Map getDirectoryFactories() { + return Collections.singletonMap("store", new FsDirectoryFactory()); } } @@ -65,8 +63,8 @@ public static class ConflictingStorePlugin extends Plugin implements IndexStoreP } @Override - public Map> getIndexStoreFactories() { - return Collections.singletonMap(TYPE, IndexStore::new); + public Map getDirectoryFactories() { + return Collections.singletonMap(TYPE, new FsDirectoryFactory()); } } @@ -86,11 +84,11 @@ public void testDuplicateIndexStoreFactories() { if (JavaVersion.current().compareTo(JavaVersion.parse("9")) >= 0) { assertThat(e, hasToString(matches( "java.lang.IllegalStateException: Duplicate key store \\(attempted merging values " + - "org.elasticsearch.plugins.IndexStorePluginTests\\$BarStorePlugin.* " + - "and org.elasticsearch.plugins.IndexStorePluginTests\\$FooStorePlugin.*\\)"))); + "org.elasticsearch.index.store.FsDirectoryFactory@[\\w\\d]+ " + + "and org.elasticsearch.index.store.FsDirectoryFactory@[\\w\\d]+\\)"))); } else { assertThat(e, hasToString(matches( - "java.lang.IllegalStateException: Duplicate key org.elasticsearch.plugins.IndexStorePluginTests\\$BarStorePlugin.*"))); + "java.lang.IllegalStateException: Duplicate key org.elasticsearch.index.store.FsDirectoryFactory@[\\w\\d]+"))); } } diff --git a/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java index b90d84e61f183..0a4d3201f5cc3 100644 --- a/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -37,7 +37,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.store.MockFSDirectoryService; +import org.elasticsearch.test.store.MockFSDirectoryFactory; import org.elasticsearch.test.store.MockFSIndexStore; import java.io.IOException; import java.util.Arrays; @@ -107,16 +107,16 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc client().admin().indices().prepareFlush("test").execute().get(); client().admin().indices().prepareClose("test").execute().get(); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() - .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), exceptionRate) - .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), exceptionOnOpenRate)); + .put(MockFSDirectoryFactory.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), exceptionRate) + .put(MockFSDirectoryFactory.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), exceptionOnOpenRate)); client().admin().indices().prepareOpen("test").execute().get(); } else { Settings.Builder settings = Settings.builder() .put("index.number_of_replicas", randomIntBetween(0, 1)) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) - .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), exceptionRate) + .put(MockFSDirectoryFactory.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), exceptionRate) // we cannot expect that the index will be valid - .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), exceptionOnOpenRate); + .put(MockFSDirectoryFactory.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), exceptionOnOpenRate); logger.info("creating index: [test] using settings: [{}]", settings.build()); client().admin().indices().prepareCreate("test") .setSettings(settings) @@ -198,8 +198,8 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc // check the index still contains the records that we indexed without errors client().admin().indices().prepareClose("test").execute().get(); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() - .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), 0) - .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), 0)); + .put(MockFSDirectoryFactory.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), 0) + .put(MockFSDirectoryFactory.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), 0)); client().admin().indices().prepareOpen("test").execute().get(); ensureGreen(); SearchResponse searchResponse = client().prepareSearch().setTypes("type") diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryFactory.java similarity index 78% rename from test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java rename to test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryFactory.java index 65a66989cdd97..58e881b296a7d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryFactory.java @@ -19,19 +19,16 @@ package org.elasticsearch.test.store; -import com.carrotsearch.randomizedtesting.SeedUtils; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.Directory; -import org.apache.lucene.store.LockFactory; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestRuleMarkFailure; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Setting; @@ -41,8 +38,9 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.FsDirectoryService; +import org.elasticsearch.index.store.FsDirectoryFactory; import org.elasticsearch.index.store.Store; +import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESTestCase; import org.junit.Assert; @@ -51,11 +49,10 @@ import java.io.IOException; import java.io.PrintStream; import java.nio.charset.StandardCharsets; -import java.nio.file.Path; import java.util.Arrays; import java.util.Random; -public class MockFSDirectoryService extends FsDirectoryService { +public class MockFSDirectoryFactory implements IndexStorePlugin.DirectoryFactory { public static final Setting RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING = Setting.doubleSetting("index.store.mock.random.io_exception_rate_on_open", 0.0d, 0.0d, Property.IndexScope, Property.NodeScope); @@ -64,42 +61,12 @@ public class MockFSDirectoryService extends FsDirectoryService { public static final Setting CRASH_INDEX_SETTING = Setting.boolSetting("index.store.mock.random.crash_index", true, Property.IndexScope, Property.NodeScope); - private final FsDirectoryService delegateService; - private final Random random; - private final double randomIOExceptionRate; - private final double randomIOExceptionRateOnOpen; - private final MockDirectoryWrapper.Throttling throttle; - private final boolean crashIndex; - - @Inject - public MockFSDirectoryService(IndexSettings idxSettings, final ShardPath path) { - super(idxSettings, path); - Settings indexSettings = idxSettings.getSettings(); - final long seed = idxSettings.getValue(ESIntegTestCase.INDEX_TEST_SEED_SETTING); - this.random = new Random(seed); - - randomIOExceptionRate = RANDOM_IO_EXCEPTION_RATE_SETTING.get(indexSettings); - randomIOExceptionRateOnOpen = RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.get(indexSettings); - random.nextInt(shardId.getId() + 1); // some randomness per shard - throttle = MockDirectoryWrapper.Throttling.NEVER; - crashIndex = CRASH_INDEX_SETTING.get(indexSettings); - - if (logger.isDebugEnabled()) { - logger.debug("Using MockDirWrapper with seed [{}] throttle: [{}] crashIndex: [{}]", SeedUtils.formatSeed(seed), - throttle, crashIndex); - } - delegateService = randomDirectoryService(idxSettings, path); - } - - - @Override - public Directory newDirectory() throws IOException { - return wrap(delegateService.newDirectory()); - } - @Override - protected synchronized Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { - throw new UnsupportedOperationException(); + public Directory newDirectory(IndexSettings idxSettings, ShardPath path) throws IOException { + Settings indexSettings = idxSettings.getSettings(); + Random random = new Random(idxSettings.getValue(ESIntegTestCase.INDEX_TEST_SEED_SETTING)); + return wrap(randomDirectoryService(random, idxSettings, path), random, indexSettings, + path.getShardId()); } public static void checkIndex(Logger logger, Store store, ShardId shardId) { @@ -137,8 +104,14 @@ public static void checkIndex(Logger logger, Store store, ShardId shardId) { } } - private Directory wrap(Directory dir) { - final ElasticsearchMockDirectoryWrapper w = new ElasticsearchMockDirectoryWrapper(random, dir, this.crashIndex); + private Directory wrap(Directory dir, Random random, Settings indexSettings, ShardId shardId) { + + double randomIOExceptionRate = RANDOM_IO_EXCEPTION_RATE_SETTING.get(indexSettings); + double randomIOExceptionRateOnOpen = RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.get(indexSettings); + random.nextInt(shardId.getId() + 1); // some randomness per shard + MockDirectoryWrapper.Throttling throttle = MockDirectoryWrapper.Throttling.NEVER; + boolean crashIndex = CRASH_INDEX_SETTING.get(indexSettings); + final ElasticsearchMockDirectoryWrapper w = new ElasticsearchMockDirectoryWrapper(random, dir, crashIndex); w.setRandomIOExceptionRate(randomIOExceptionRate); w.setRandomIOExceptionRateOnOpen(randomIOExceptionRateOnOpen); w.setThrottling(throttle); @@ -150,7 +123,7 @@ private Directory wrap(Directory dir) { return w; } - private FsDirectoryService randomDirectoryService(IndexSettings indexSettings, ShardPath path) { + private Directory randomDirectoryService(Random random, IndexSettings indexSettings, ShardPath path) throws IOException { final IndexMetaData build = IndexMetaData.builder(indexSettings.getIndexMetaData()) .settings(Settings.builder() // don't use the settings from indexSettings#getSettings() they are merged with node settings and might contain @@ -160,7 +133,7 @@ private FsDirectoryService randomDirectoryService(IndexSettings indexSettings, S RandomPicks.randomFrom(random, IndexModule.Type.values()).getSettingsKey())) .build(); final IndexSettings newIndexSettings = new IndexSettings(build, indexSettings.getNodeSettings()); - return new FsDirectoryService(newIndexSettings, path); + return new FsDirectoryFactory().newDirectory(newIndexSettings, path); } public static final class ElasticsearchMockDirectoryWrapper extends MockDirectoryWrapper { diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java index 1ec5087605539..47a20803f7ac7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java @@ -26,14 +26,10 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexModule; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.plugins.Plugin; @@ -43,9 +39,8 @@ import java.util.IdentityHashMap; import java.util.List; import java.util.Map; -import java.util.function.Function; -public class MockFSIndexStore extends IndexStore { +public final class MockFSIndexStore { public static final Setting INDEX_CHECK_INDEX_ON_CLOSE_SETTING = Setting.boolSetting("index.store.mock.check_index_on_close", true, Property.IndexScope, Property.NodeScope); @@ -59,14 +54,14 @@ public Settings additionalSettings() { @Override public List> getSettings() { return Arrays.asList(INDEX_CHECK_INDEX_ON_CLOSE_SETTING, - MockFSDirectoryService.CRASH_INDEX_SETTING, - MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING, - MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING); + MockFSDirectoryFactory.CRASH_INDEX_SETTING, + MockFSDirectoryFactory.RANDOM_IO_EXCEPTION_RATE_SETTING, + MockFSDirectoryFactory.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING); } @Override - public Map> getIndexStoreFactories() { - return Collections.singletonMap("mock", MockFSIndexStore::new); + public Map getDirectoryFactories() { + return Collections.singletonMap("mock", new MockFSDirectoryFactory()); } @Override @@ -80,15 +75,6 @@ public void onIndexModule(IndexModule indexModule) { } } - MockFSIndexStore(IndexSettings indexSettings) { - super(indexSettings); - } - - @Override - public DirectoryService newDirectoryService(ShardPath path) { - return new MockFSDirectoryService(indexSettings, path); - } - private static final EnumSet validCheckIndexStates = EnumSet.of( IndexShardState.STARTED, IndexShardState.POST_RECOVERY ); @@ -101,7 +87,7 @@ public void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSha Boolean remove = shardSet.remove(indexShard); if (remove == Boolean.TRUE) { Logger logger = Loggers.getLogger(getClass(), indexShard.shardId()); - MockFSDirectoryService.checkIndex(logger, indexShard.store(), indexShard.shardId()); + MockFSDirectoryFactory.checkIndex(logger, indexShard.store(), indexShard.shardId()); } } } @@ -115,5 +101,4 @@ public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardSt } } - } From 93f3d12c759f63c12f6e773cfc514345627a4c16 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Fri, 24 May 2019 10:27:21 +0100 Subject: [PATCH 088/224] [ML] Reenable ml distributed failure test after issue resolution (#42431) Relates to issue #37117 --- .../xpack/ml/integration/MlDistributedFailureIT.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java index 480f85798800b..40249c0bc771e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java @@ -62,7 +62,6 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37117") public void testFailOver() throws Exception { internalCluster().ensureAtLeastNumDataNodes(3); ensureStableClusterOnAllNodes(3); @@ -108,7 +107,6 @@ public void testLoseDedicatedMasterNode() throws Exception { }); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37117") public void testFullClusterRestart() throws Exception { internalCluster().ensureAtLeastNumDataNodes(3); ensureStableClusterOnAllNodes(3); From 306faa41e5337089d6e26d2d4be08d2a55257744 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Fri, 24 May 2019 12:41:49 +0100 Subject: [PATCH 089/224] [ML Data Frame] Reenable muted integration tests (#42373) Reverts muting of tests and simplifies the test teardown so that all data frames will be stopped This reverts commit 4a9438762a562d20e938d2ea82538805f33e85b1 --- .../xpack/dataframe/integration/DataFrameTransformIT.java | 1 - .../xpack/dataframe/integration/DataFrameAuditorIT.java | 2 -- .../integration/DataFrameConfigurationIndexIT.java | 2 -- .../dataframe/integration/DataFrameGetAndGetStatsIT.java | 2 -- .../xpack/dataframe/integration/DataFrameMetaDataIT.java | 2 -- .../xpack/dataframe/integration/DataFramePivotRestIT.java | 2 -- .../dataframe/integration/DataFrameRestTestCase.java | 8 +++++--- .../dataframe/integration/DataFrameTaskFailedStateIT.java | 2 -- .../integration/DataFrameTransformProgressIT.java | 1 - .../xpack/dataframe/integration/DataFrameUsageIT.java | 2 -- 10 files changed, 5 insertions(+), 19 deletions(-) diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java index 363218d1b0f14..bce4a4a3b503b 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java @@ -30,7 +30,6 @@ public void cleanTransforms() throws IOException { cleanUp(); } - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public void testDataFrameTransformCrud() throws Exception { createReviewsIndex(); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java index 7dc79c1ae8fbe..9884c9bb6793b 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; import org.junit.Before; @@ -23,7 +22,6 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.is; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameAuditorIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_admin_plus_data"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java index d7e12cf2bee4d..681599331c8af 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java @@ -8,7 +8,6 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -23,7 +22,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameConfigurationIndexIT extends DataFrameRestTestCase { /** diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java index 9bac6ca0b4049..d9927cd09ed8f 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.xpack.core.dataframe.DataFrameField; @@ -22,7 +21,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_user"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java index 5b95d1daead53..26a957ea055c2 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -16,7 +15,6 @@ import java.io.IOException; import java.util.Map; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameMetaDataIT extends DataFrameRestTestCase { private boolean indicesCreated = false; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index dab7e819881d2..770eaec7bd141 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.junit.Before; @@ -22,7 +21,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFramePivotRestIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_admin_plus_data"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java index 7ffa5391b7a4a..23bff163031ce 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java @@ -301,10 +301,12 @@ public void wipeDataFrameTransforms() throws IOException { request.addParameter("timeout", "10s"); request.addParameter("ignore", "404"); adminClient().performRequest(request); + } + + for (Map transformConfig : transformConfigs) { + String transformId = (String) transformConfig.get("id"); String state = getDataFrameIndexerState(transformId); - if (state != null) { - assertEquals("stopped", getDataFrameIndexerState(transformId)); - } + assertEquals("Transform [" + transformId + "] indexer is not in the stopped state", "stopped", state); } for (Map transformConfig : transformConfigs) { diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java index 7b63644dd34ad..96aeeda8755f4 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestStatus; @@ -20,7 +19,6 @@ import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase { public void testDummy() { diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java index d6ef3cc641be2..fea225ced3bd9 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java @@ -46,7 +46,6 @@ @LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameTransformProgressIT extends ESRestTestCase { - protected void createReviewsIndex() throws Exception { final int numDocs = 1000; final RestHighLevelClient restClient = new TestRestHighLevelClient(); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java index f98fa6a271365..4f209c5a9f3f4 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -23,7 +22,6 @@ import static org.elasticsearch.xpack.core.dataframe.DataFrameField.INDEX_DOC_TYPE; import static org.elasticsearch.xpack.dataframe.DataFrameFeatureSet.PROVIDED_STATS; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameUsageIT extends DataFrameRestTestCase { private boolean indicesCreated = false; From 3907a6d1ea5d519fbfcbfde87ba58534b5cae82b Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 24 May 2019 14:17:21 +0100 Subject: [PATCH 090/224] Drain master task queue when stabilising (#42504) Today the default stabilisation time is calculated on the assumption that the elected master has no pending tasks to process when it is elected, but this is not a safe assumption to make. This can result in a cluster reaching the end of its stabilisation time without having stabilised. Furthermore in #36943 we increased the probability that each step in `runRandomly()` enqueues another task, vastly increasing the chance that we hit such a situation. This change extends the stabilisation process to allow time for all pending tasks, plus a task that might currently be in flight. Fixes #41967, in which the master entered the stabilisation phase with over 800 tasks to process. --- .../org/elasticsearch/cluster/coordination/Coordinator.java | 2 +- .../cluster/coordination/CoordinatorTests.java | 6 ++++++ .../indices/cluster/FakeThreadPoolMasterService.java | 4 ++++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 6304588e3121a..1e7b38e50d1e9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -1231,7 +1231,7 @@ public void run() { @Override public String toString() { - return "scheduled timeout for " + this; + return "scheduled timeout for " + CoordinatorPublication.this; } }, publishTimeout, Names.GENERIC); } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index b4d337a1bf57e..5daa863402b2a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -1515,6 +1515,10 @@ void stabilise(long stabilisationDurationMillis) { final ClusterNode leader = getAnyLeader(); final long leaderTerm = leader.coordinator.getCurrentTerm(); + + final int pendingTaskCount = leader.masterService.getFakeMasterServicePendingTaskCount(); + runFor((pendingTaskCount + 1) * DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "draining task queue"); + final Matcher isEqualToLeaderVersion = equalTo(leader.coordinator.getLastAcceptedState().getVersion()); final String leaderId = leader.getId(); @@ -1527,6 +1531,8 @@ void stabilise(long stabilisationDurationMillis) { assertFalse(nodeId + " should not have an active publication", clusterNode.coordinator.publicationInProgress()); if (clusterNode == leader) { + assertThat(nodeId + " is still the leader", clusterNode.coordinator.getMode(), is(LEADER)); + assertThat(nodeId + " did not change term", clusterNode.coordinator.getCurrentTerm(), is(leaderTerm)); continue; } diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/FakeThreadPoolMasterService.java b/server/src/test/java/org/elasticsearch/indices/cluster/FakeThreadPoolMasterService.java index d535e9e00ee53..e1c7c3fafd274 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/FakeThreadPoolMasterService.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/FakeThreadPoolMasterService.java @@ -84,6 +84,10 @@ public void execute(Runnable command) { }; } + public int getFakeMasterServicePendingTaskCount() { + return pendingTasks.size(); + } + private void scheduleNextTaskIfNecessary() { if (taskInProgress == false && pendingTasks.isEmpty() == false && scheduledNextTask == false) { scheduledNextTask = true; From 631142d5dd088a10de8dcd939b50a14301173283 Mon Sep 17 00:00:00 2001 From: Marios Trivyzas Date: Fri, 24 May 2019 15:20:14 +0200 Subject: [PATCH 091/224] Fix sorting on nested field with unmapped (#42451) Previously sorting on a missing nested field would fail with an Exception: `[nested_field] failed to find nested object under path [nested_path]` despite `unmapped_type` being set on the query. Fixes: #33644 --- .../search/sort/FieldSortBuilder.java | 30 +++++++++++-------- .../search/sort/FieldSortIT.java | 16 ++++++++++ 2 files changed, 33 insertions(+), 13 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index 2be73a0da9cb6..8abd4b9f40d5c 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -373,8 +373,10 @@ public SortFieldAndFormat build(QueryShardContext context) throws IOException { return SORT_DOC; } } else { + boolean isUnmapped = false; MappedFieldType fieldType = context.fieldMapper(fieldName); if (fieldType == null) { + isUnmapped = true; if (unmappedType != null) { fieldType = context.getMapperService().unmappedFieldType(unmappedType); } else { @@ -392,20 +394,22 @@ public SortFieldAndFormat build(QueryShardContext context) throws IOException { localSortMode = reverse ? MultiValueMode.MAX : MultiValueMode.MIN; } - final Nested nested; - if (nestedSort != null) { - if (context.indexVersionCreated().before(Version.V_6_5_0) && nestedSort.getMaxChildren() != Integer.MAX_VALUE) { - throw new QueryShardException(context, - "max_children is only supported on v6.5.0 or higher"); - } - if (nestedSort.getNestedSort() != null && nestedSort.getMaxChildren() != Integer.MAX_VALUE) { - throw new QueryShardException(context, - "max_children is only supported on last level of nested sort"); + Nested nested = null; + if (isUnmapped == false) { + if (nestedSort != null) { + if (context.indexVersionCreated().before(Version.V_6_5_0) && nestedSort.getMaxChildren() != Integer.MAX_VALUE) { + throw new QueryShardException(context, + "max_children is only supported on v6.5.0 or higher"); + } + if (nestedSort.getNestedSort() != null && nestedSort.getMaxChildren() != Integer.MAX_VALUE) { + throw new QueryShardException(context, + "max_children is only supported on last level of nested sort"); + } + // new nested sorts takes priority + nested = resolveNested(context, nestedSort); + } else { + nested = resolveNested(context, nestedPath, nestedFilter); } - // new nested sorts takes priority - nested = resolveNested(context, nestedSort); - } else { - nested = resolveNested(context, nestedPath, nestedFilter); } IndexFieldData fieldData = context.getForField(fieldType); diff --git a/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java b/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java index 526fe0a48b575..d3f21867ab1d1 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java +++ b/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java @@ -903,6 +903,22 @@ public void testIgnoreUnmapped() throws Exception { .addSort(SortBuilders.fieldSort("kkk").unmappedType("keyword")) .get(); assertNoFailures(searchResponse); + + // nested field + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("nested.foo").unmappedType("keyword") + .setNestedSort(new NestedSortBuilder("nested").setNestedSort(new NestedSortBuilder("nested.foo")))) + .get(); + assertNoFailures(searchResponse); + + // nestedQuery + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("nested.foo").unmappedType("keyword") + .setNestedSort(new NestedSortBuilder("nested").setFilter(QueryBuilders.termQuery("nested.foo", "abc")))) + .get(); + assertNoFailures(searchResponse); } public void testSortMVField() throws Exception { From cbf1150d845faafe32e005b0a8749a97803b1b34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 24 May 2019 15:27:10 +0200 Subject: [PATCH 092/224] Small internal AnalysisRegistry changes (#42500) Some internal refactorings to the AnalysisRegistry, spin-off from #40782. --- .../index/analysis/AnalysisRegistry.java | 79 ++++++++----------- 1 file changed, 33 insertions(+), 46 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index d9c4b2c510bc9..684d36c311f8b 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; -import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.indices.analysis.AnalysisModule; @@ -39,6 +38,7 @@ import java.util.Locale; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Function; import java.util.stream.Collectors; import static java.util.Collections.unmodifiableMap; @@ -156,19 +156,18 @@ public void close() throws IOException { * Creates an index-level {@link IndexAnalyzers} from this registry using the given index settings */ public IndexAnalyzers build(IndexSettings indexSettings) throws IOException { - final Map charFilterFactories = buildCharFilterFactories(indexSettings); final Map tokenizerFactories = buildTokenizerFactories(indexSettings); final Map tokenFilterFactories = buildTokenFilterFactories(indexSettings); - final Map> analyzierFactories = buildAnalyzerFactories(indexSettings); + final Map> analyzerFactories = buildAnalyzerFactories(indexSettings); final Map> normalizerFactories = buildNormalizerFactories(indexSettings); - return build(indexSettings, analyzierFactories, normalizerFactories, tokenizerFactories, charFilterFactories, tokenFilterFactories); + return build(indexSettings, analyzerFactories, normalizerFactories, tokenizerFactories, charFilterFactories, tokenFilterFactories); } public Map buildTokenFilterFactories(IndexSettings indexSettings) throws IOException { final Map tokenFiltersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_FILTER); - return buildMapping(Component.FILTER, indexSettings, tokenFiltersSettings, - Collections.unmodifiableMap(this.tokenFilters), prebuiltAnalysis.preConfiguredTokenFilters); + return buildMapping(Component.FILTER, indexSettings, tokenFiltersSettings, this.tokenFilters, + prebuiltAnalysis.preConfiguredTokenFilters); } public Map buildTokenizerFactories(IndexSettings indexSettings) throws IOException { @@ -202,13 +201,8 @@ public Map> buildNormalizerFactories(IndexSettings i * @return {@link TokenizerFactory} provider or null */ public AnalysisProvider getTokenizerProvider(String tokenizer, IndexSettings indexSettings) { - final Map tokenizerSettings = indexSettings.getSettings().getGroups("index.analysis.tokenizer"); - if (tokenizerSettings.containsKey(tokenizer)) { - Settings currentSettings = tokenizerSettings.get(tokenizer); - return getAnalysisProvider(Component.TOKENIZER, tokenizers, tokenizer, currentSettings.get("type")); - } else { - return getTokenizerProvider(tokenizer); - } + return getProvider(Component.TOKENIZER, tokenizer, indexSettings, "index.analysis.tokenizer", tokenizers, + this::getTokenizerProvider); } /** @@ -220,14 +214,8 @@ public AnalysisProvider getTokenizerProvider(String tokenizer, * @return {@link TokenFilterFactory} provider or null */ public AnalysisProvider getTokenFilterProvider(String tokenFilter, IndexSettings indexSettings) { - final Map tokenFilterSettings = indexSettings.getSettings().getGroups("index.analysis.filter"); - if (tokenFilterSettings.containsKey(tokenFilter)) { - Settings currentSettings = tokenFilterSettings.get(tokenFilter); - String typeName = currentSettings.get("type"); - return getAnalysisProvider(Component.FILTER, tokenFilters, tokenFilter, typeName); - } else { - return getTokenFilterProvider(tokenFilter); - } + return getProvider(Component.FILTER, tokenFilter, indexSettings, "index.analysis.filter", tokenFilters, + this::getTokenFilterProvider); } /** @@ -239,12 +227,18 @@ public AnalysisProvider getTokenFilterProvider(String tokenF * @return {@link CharFilterFactory} provider or null */ public AnalysisProvider getCharFilterProvider(String charFilter, IndexSettings indexSettings) { - final Map tokenFilterSettings = indexSettings.getSettings().getGroups("index.analysis.char_filter"); - if (tokenFilterSettings.containsKey(charFilter)) { - Settings currentSettings = tokenFilterSettings.get(charFilter); - return getAnalysisProvider(Component.CHAR_FILTER, charFilters, charFilter, currentSettings.get("type")); + return getProvider(Component.CHAR_FILTER, charFilter, indexSettings, "index.analysis.char_filter", charFilters, + this::getCharFilterProvider); + } + + private AnalysisProvider getProvider(Component componentType, String componentName, IndexSettings indexSettings, + String componentSettings, Map> providers, Function> providerFunction) { + final Map subSettings = indexSettings.getSettings().getGroups(componentSettings); + if (subSettings.containsKey(componentName)) { + Settings currentSettings = subSettings.get(componentName); + return getAnalysisProvider(componentType, providers, componentName, currentSettings.get("type")); } else { - return getCharFilterProvider(charFilter); + return providerFunction.apply(componentName); } } @@ -323,9 +317,9 @@ private Map buildMapping(Component component, IndexSettings setti } // go over the char filters in the bindings and register the ones that are not configured - for (Map.Entry> entry : providerMap.entrySet()) { + for (Map.Entry> entry : providerMap.entrySet()) { String name = entry.getKey(); - AnalysisModule.AnalysisProvider provider = entry.getValue(); + AnalysisProvider provider = entry.getValue(); // we don't want to re-register one that already exists if (settingsMap.containsKey(name)) { continue; @@ -334,7 +328,7 @@ private Map buildMapping(Component component, IndexSettings setti if (provider.requiresAnalysisSettings()) { continue; } - AnalysisModule.AnalysisProvider defaultProvider = defaultInstance.get(name); + AnalysisProvider defaultProvider = defaultInstance.get(name); final T instance; if (defaultProvider == null) { instance = provider.get(settings, environment, name, defaultSettings); @@ -344,20 +338,15 @@ private Map buildMapping(Component component, IndexSettings setti factories.put(name, instance); } - for (Map.Entry> entry : defaultInstance.entrySet()) { + for (Map.Entry> entry : defaultInstance.entrySet()) { final String name = entry.getKey(); - final AnalysisModule.AnalysisProvider provider = entry.getValue(); - if (factories.containsKey(name) == false) { - final T instance = provider.get(settings, environment, name, defaultSettings); - if (factories.containsKey(name) == false) { - factories.put(name, instance); - } - } + final AnalysisProvider provider = entry.getValue(); + factories.putIfAbsent(name, provider.get(settings, environment, name, defaultSettings)); } return factories; } - private AnalysisProvider getAnalysisProvider(Component component, Map> providerMap, + private static AnalysisProvider getAnalysisProvider(Component component, Map> providerMap, String name, String typeName) { if (typeName == null) { throw new IllegalArgumentException(component + " [" + name + "] must specify either an analyzer type, or a tokenizer"); @@ -371,7 +360,7 @@ private AnalysisProvider getAnalysisProvider(Component component, Map>> analyzerProviderFactories; + final Map>> analyzerProviderFactories; final Map> preConfiguredTokenFilters; final Map> preConfiguredTokenizers; final Map> preConfiguredCharFilterFactories; @@ -396,19 +385,19 @@ private PrebuiltAnalysis( this.preConfiguredTokenizers = preConfiguredTokenizers; } - public AnalysisModule.AnalysisProvider getCharFilterFactory(String name) { + public AnalysisProvider getCharFilterFactory(String name) { return preConfiguredCharFilterFactories.get(name); } - public AnalysisModule.AnalysisProvider getTokenFilterFactory(String name) { + public AnalysisProvider getTokenFilterFactory(String name) { return preConfiguredTokenFilters.get(name); } - public AnalysisModule.AnalysisProvider getTokenizerFactory(String name) { + public AnalysisProvider getTokenizerFactory(String name) { return preConfiguredTokenizers.get(name); } - public AnalysisModule.AnalysisProvider> getAnalyzerProvider(String name) { + public AnalysisProvider> getAnalyzerProvider(String name) { return analyzerProviderFactories.get(name); } @@ -426,8 +415,6 @@ public IndexAnalyzers build(IndexSettings indexSettings, Map charFilterFactoryFactories, Map tokenFilterFactoryFactories) { - Index index = indexSettings.getIndex(); - analyzerProviders = new HashMap<>(analyzerProviders); Map analyzers = new HashMap<>(); Map normalizers = new HashMap<>(); Map whitespaceNormalizers = new HashMap<>(); @@ -458,7 +445,7 @@ public IndexAnalyzers build(IndexSettings indexSettings, if (analyzers.containsKey("default_index")) { throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use " + - "[index.analysis.analyzer.default] instead for index [" + index.getName() + "]"); + "[index.analysis.analyzer.default] instead for index [" + indexSettings.getIndex().getName() + "]"); } for (Map.Entry analyzer : analyzers.entrySet()) { From da1ba685b16018053b28847cf4618d0ddf9c40fb Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Fri, 24 May 2019 07:44:56 -0700 Subject: [PATCH 093/224] remove 6.3.x constants (#42087) relates to refactoring effort #41164. --- .../client/ml/job/process/ModelSnapshot.java | 2 +- .../index/rankeval/RankEvalRequest.java | 28 +-- .../main/java/org/elasticsearch/Build.java | 35 +-- .../main/java/org/elasticsearch/Version.java | 14 -- .../index/mapper/DynamicTemplate.java | 26 +-- .../index/mapper/RootObjectMapper.java | 2 +- .../indices/flush/SyncedFlushService.java | 13 +- .../search/slice/SliceBuilder.java | 14 +- .../java/org/elasticsearch/BuildTests.java | 2 +- .../java/org/elasticsearch/VersionTests.java | 36 +-- .../health/ClusterHealthRequestTests.java | 5 +- .../common/geo/GeoWKTShapeParserTests.java | 6 +- .../index/mapper/DynamicTemplateTests.java | 19 +- .../mapper/LegacyDynamicMappingTests.java | 67 ------ .../mapper/LegacyMapperServiceTests.java | 92 -------- .../similarity/LegacySimilarityTests.java | 93 -------- .../LegacyUpdateMappingIntegrationIT.java | 212 ------------------ .../search/slice/SliceBuilderTests.java | 15 -- .../rest/yaml/section/SetupSectionTests.java | 3 +- .../license/PostStartTrialRequest.java | 31 +-- .../license/PostStartTrialResponse.java | 60 ++--- .../elasticsearch/xpack/core/XPackPlugin.java | 7 +- .../ml/action/PostCalendarEventsAction.java | 9 - .../core/ml/action/PutCalendarAction.java | 9 - .../core/ml/action/PutDatafeedAction.java | 9 - .../xpack/core/ml/action/PutJobAction.java | 9 - .../ml/action/RevertModelSnapshotAction.java | 9 - .../xpack/core/ml/action/UpdateJobAction.java | 7 - .../xpack/core/ml/job/config/JobUpdate.java | 14 +- .../autodetect/state/ModelSnapshot.java | 2 +- .../monitoring/MonitoringFeatureSetUsage.java | 9 +- .../action/MonitoringBulkResponse.java | 11 +- .../xpack/core/rollup/job/RollupJob.java | 2 +- .../core/security/user/BeatsSystemUser.java | 2 - .../license/XPackLicenseStateTests.java | 2 +- .../xpack/core/XPackPluginTests.java | 4 +- .../xpack/ml/MlConfigMigratorTests.java | 3 +- .../action/TransportOpenJobActionTests.java | 3 +- .../monitoring/MonitoringFeatureSetTests.java | 7 +- .../action/MonitoringBulkResponseTests.java | 7 +- .../authc/esnative/ReservedRealm.java | 24 +- .../support/SecurityIndexManager.java | 6 - .../user/TransportGetUsersActionTests.java | 1 - .../authc/esnative/ReservedRealmTests.java | 11 - .../support/SecurityIndexManagerTests.java | 8 - .../xpack/restart/FullClusterRestartIT.java | 1 - 46 files changed, 110 insertions(+), 841 deletions(-) delete mode 100644 server/src/test/java/org/elasticsearch/index/mapper/LegacyDynamicMappingTests.java delete mode 100644 server/src/test/java/org/elasticsearch/index/mapper/LegacyMapperServiceTests.java delete mode 100644 server/src/test/java/org/elasticsearch/index/similarity/LegacySimilarityTests.java delete mode 100644 server/src/test/java/org/elasticsearch/indices/mapping/LegacyUpdateMappingIntegrationIT.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSnapshot.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSnapshot.java index 5d95e091d40b1..6a92eaf019021 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSnapshot.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSnapshot.java @@ -226,7 +226,7 @@ public static class Builder { private String jobId; // Stored snapshot documents created prior to 6.3.0 will have no value for min_version. - private Version minVersion = Version.V_6_3_0; + private Version minVersion = Version.fromString("6.3.0"); private Date timestamp; private String description; diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java index 0dbbb9f90f1fa..f02ce8fe23496 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.rankeval; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -51,18 +50,8 @@ public RankEvalRequest(RankEvalSpec rankingEvaluationSpec, String[] indices) { RankEvalRequest(StreamInput in) throws IOException { super.readFrom(in); rankingEvaluationSpec = new RankEvalSpec(in); - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - indices = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - } else { - // readStringArray uses readVInt for size, we used readInt in 6.2 - int indicesSize = in.readInt(); - String[] indices = new String[indicesSize]; - for (int i = 0; i < indicesSize; i++) { - indices[i] = in.readString(); - } - // no indices options yet - } + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); } RankEvalRequest() { @@ -131,17 +120,8 @@ public void readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); rankingEvaluationSpec.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_6_3_0)) { - out.writeStringArray(indices); - indicesOptions.writeIndicesOptions(out); - } else { - // writeStringArray uses writeVInt for size, we used writeInt in 6.2 - out.writeInt(indices.length); - for (String index : indices) { - out.writeString(index); - } - // no indices options yet - } + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); } @Override diff --git a/server/src/main/java/org/elasticsearch/Build.java b/server/src/main/java/org/elasticsearch/Build.java index 1a1ee2744f77a..bc62c3a3ddd27 100644 --- a/server/src/main/java/org/elasticsearch/Build.java +++ b/server/src/main/java/org/elasticsearch/Build.java @@ -224,45 +224,26 @@ public String date() { public static Build readBuild(StreamInput in) throws IOException { final Flavor flavor; final Type type; - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know - flavor = Flavor.fromDisplayName(in.readString(), false); - } else { - flavor = Flavor.OSS; - } - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know - type = Type.fromDisplayName(in.readString(), false); - } else { - type = Type.UNKNOWN; - } + // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know + flavor = Flavor.fromDisplayName(in.readString(), false); + // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know + type = Type.fromDisplayName(in.readString(), false); String hash = in.readString(); String date = in.readString(); boolean snapshot = in.readBoolean(); final String version; - if (in.getVersion().onOrAfter(Version.V_7_0_0)) { - version = in.readString(); - } else { - version = in.getVersion().toString(); - } + version = in.readString(); return new Build(flavor, type, hash, date, snapshot, version); } public static void writeBuild(Build build, StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_6_3_0)) { - out.writeString(build.flavor().displayName()); - } - if (out.getVersion().onOrAfter(Version.V_6_3_0)) { - final Type buildType = build.type(); - out.writeString(buildType.displayName()); - } + out.writeString(build.flavor().displayName()); + out.writeString(build.type().displayName()); out.writeString(build.shortHash()); out.writeString(build.date()); out.writeBoolean(build.isSnapshot()); - if (out.getVersion().onOrAfter(Version.V_7_0_0)) { - out.writeString(build.getQualifiedVersion()); - } + out.writeString(build.getQualifiedVersion()); } /** diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 7f939ca627a95..c685d39c7562f 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -46,14 +46,6 @@ public class Version implements Comparable, ToXContentFragment { */ public static final int V_EMPTY_ID = 0; public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST); - // The below version is missing from the 7.3 JAR - private static final org.apache.lucene.util.Version LUCENE_7_2_1 = org.apache.lucene.util.Version.fromBits(7, 2, 1); - public static final int V_6_3_0_ID = 6030099; - public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); - public static final int V_6_3_1_ID = 6030199; - public static final Version V_6_3_1 = new Version(V_6_3_1_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); - public static final int V_6_3_2_ID = 6030299; - public static final Version V_6_3_2 = new Version(V_6_3_2_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); public static final int V_6_4_0_ID = 6040099; public static final Version V_6_4_0 = new Version(V_6_4_0_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_6_4_1_ID = 6040199; @@ -152,12 +144,6 @@ public static Version fromId(int id) { return V_6_4_1; case V_6_4_0_ID: return V_6_4_0; - case V_6_3_2_ID: - return V_6_3_2; - case V_6_3_1_ID: - return V_6_3_1; - case V_6_3_0_ID: - return V_6_3_0; case V_EMPTY_ID: return V_EMPTY; default: diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java b/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java index 30c9606acd928..b271084a0d293 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.mapper; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.Version; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -160,8 +159,7 @@ public static XContentFieldType fromString(String value) { public abstract String defaultMappingType(); } - public static DynamicTemplate parse(String name, Map conf, - Version indexVersionCreated) throws MapperParsingException { + public static DynamicTemplate parse(String name, Map conf) throws MapperParsingException { String match = null; String pathMatch = null; String unmatch = null; @@ -207,18 +205,16 @@ public static DynamicTemplate parse(String name, Map conf, final MatchType matchType = MatchType.fromString(matchPattern); - if (indexVersionCreated.onOrAfter(Version.V_6_3_0)) { - // Validate that the pattern - for (String regex : new String[] { pathMatch, match, pathUnmatch, unmatch }) { - if (regex == null) { - continue; - } - try { - matchType.matches(regex, ""); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException("Pattern [" + regex + "] of type [" + matchType - + "] is invalid. Cannot create dynamic template [" + name + "].", e); - } + // Validate that the pattern + for (String regex : new String[] { pathMatch, match, pathUnmatch, unmatch }) { + if (regex == null) { + continue; + } + try { + matchType.matches(regex, ""); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Pattern [" + regex + "] of type [" + matchType + + "] is invalid. Cannot create dynamic template [" + name + "].", e); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index 6d2f0fddd86c2..89b1810bf393c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -173,7 +173,7 @@ protected boolean processField(RootObjectMapper.Builder builder, String fieldNam Map.Entry entry = tmpl.entrySet().iterator().next(); String templateName = entry.getKey(); Map templateParams = (Map) entry.getValue(); - DynamicTemplate template = DynamicTemplate.parse(templateName, templateParams, indexVersionCreated); + DynamicTemplate template = DynamicTemplate.parse(templateName, templateParams); if (template != null) { templates.add(template); } diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index 6291531b7f907..fc8c6fcef98c6 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.StepListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; @@ -594,18 +593,12 @@ static final class PreSyncedFlushResponse extends TransportResponse { this.existingSyncId = existingSyncId; } - boolean includeExistingSyncId(Version version) { - return version.onOrAfter(Version.V_6_3_0); - } - @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); commitId = new Engine.CommitId(in); numDocs = in.readInt(); - if (includeExistingSyncId(in.getVersion())) { - existingSyncId = in.readOptionalString(); - } + existingSyncId = in.readOptionalString(); } @Override @@ -613,9 +606,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); commitId.writeTo(out); out.writeInt(numDocs); - if (includeExistingSyncId(out.getVersion())) { - out.writeOptionalString(existingSyncId); - } + out.writeOptionalString(existingSyncId); } } diff --git a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java index 08f042aa69650..3c86b21a0873d 100644 --- a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java @@ -54,7 +54,7 @@ /** * A slice builder allowing to split a scroll in multiple partitions. - * If the provided field is the "_uid" it uses a {@link org.elasticsearch.search.slice.TermsSliceQuery} + * If the provided field is the "_id" it uses a {@link org.elasticsearch.search.slice.TermsSliceQuery} * to do the slicing. The slicing is done at the shard level first and then each shard is split into multiple slices. * For instance if the number of shards is equal to 2 and the user requested 4 slices * then the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. @@ -79,7 +79,7 @@ public class SliceBuilder implements Writeable, ToXContentObject { PARSER.declareInt(SliceBuilder::setMax, MAX_FIELD); } - /** Name of field to slice against (_uid by default) */ + /** Name of field to slice against (_id by default) */ private String field = IdFieldMapper.NAME; /** The id of the slice */ private int id = -1; @@ -249,15 +249,7 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request, String field = this.field; boolean useTermQuery = false; - if ("_uid".equals(field)) { - // on new indices, the _id acts as a _uid - field = IdFieldMapper.NAME; - if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_7_0_0)) { - throw new IllegalArgumentException("Computing slices on the [_uid] field is illegal for 7.x indices, use [_id] instead"); - } - DEPRECATION_LOG.deprecated("Computing slices on the [_uid] field is deprecated for 6.x indices, use [_id] instead"); - useTermQuery = true; - } else if (IdFieldMapper.NAME.equals(field)) { + if (IdFieldMapper.NAME.equals(field)) { useTermQuery = true; } else if (type.hasDocValues() == false) { throw new IllegalArgumentException("cannot load numeric doc values on " + field); diff --git a/server/src/test/java/org/elasticsearch/BuildTests.java b/server/src/test/java/org/elasticsearch/BuildTests.java index 1945c51d1514f..7a6f581bd7369 100644 --- a/server/src/test/java/org/elasticsearch/BuildTests.java +++ b/server/src/test/java/org/elasticsearch/BuildTests.java @@ -192,7 +192,7 @@ public void testSerialization() { throw new AssertionError(); }); } - + public void testFlavorParsing() { for (final Build.Flavor flavor : Build.Flavor.values()) { // strict or not should not impact parsing at all here diff --git a/server/src/test/java/org/elasticsearch/VersionTests.java b/server/src/test/java/org/elasticsearch/VersionTests.java index 66d7af0a4b20e..e5149b9bce515 100644 --- a/server/src/test/java/org/elasticsearch/VersionTests.java +++ b/server/src/test/java/org/elasticsearch/VersionTests.java @@ -36,8 +36,6 @@ import java.util.Map; import java.util.Set; -import static org.elasticsearch.Version.V_6_3_0; -import static org.elasticsearch.Version.V_7_0_0; import static org.elasticsearch.test.VersionUtils.allVersions; import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.hamcrest.CoreMatchers.equalTo; @@ -50,30 +48,32 @@ public class VersionTests extends ESTestCase { public void testVersionComparison() throws Exception { - assertThat(V_6_3_0.before(V_7_0_0), is(true)); - assertThat(V_6_3_0.before(V_6_3_0), is(false)); - assertThat(V_7_0_0.before(V_6_3_0), is(false)); + Version V_7_2_0 = Version.fromString("7.2.0"); + Version V_8_0_0 = Version.fromString("8.0.0"); + assertThat(V_7_2_0.before(V_8_0_0), is(true)); + assertThat(V_7_2_0.before(V_7_2_0), is(false)); + assertThat(V_8_0_0.before(V_7_2_0), is(false)); - assertThat(V_6_3_0.onOrBefore(V_7_0_0), is(true)); - assertThat(V_6_3_0.onOrBefore(V_6_3_0), is(true)); - assertThat(V_7_0_0.onOrBefore(V_6_3_0), is(false)); + assertThat(V_7_2_0.onOrBefore(V_8_0_0), is(true)); + assertThat(V_7_2_0.onOrBefore(V_7_2_0), is(true)); + assertThat(V_8_0_0.onOrBefore(V_7_2_0), is(false)); - assertThat(V_6_3_0.after(V_7_0_0), is(false)); - assertThat(V_6_3_0.after(V_6_3_0), is(false)); - assertThat(V_7_0_0.after(V_6_3_0), is(true)); + assertThat(V_7_2_0.after(V_8_0_0), is(false)); + assertThat(V_7_2_0.after(V_7_2_0), is(false)); + assertThat(V_8_0_0.after(V_7_2_0), is(true)); - assertThat(V_6_3_0.onOrAfter(V_7_0_0), is(false)); - assertThat(V_6_3_0.onOrAfter(V_6_3_0), is(true)); - assertThat(V_7_0_0.onOrAfter(V_6_3_0), is(true)); + assertThat(V_7_2_0.onOrAfter(V_8_0_0), is(false)); + assertThat(V_7_2_0.onOrAfter(V_7_2_0), is(true)); + assertThat(V_8_0_0.onOrAfter(V_7_2_0), is(true)); assertTrue(Version.fromString("5.0.0-alpha2").onOrAfter(Version.fromString("5.0.0-alpha1"))); assertTrue(Version.fromString("5.0.0").onOrAfter(Version.fromString("5.0.0-beta2"))); assertTrue(Version.fromString("5.0.0-rc1").onOrAfter(Version.fromString("5.0.0-beta24"))); assertTrue(Version.fromString("5.0.0-alpha24").before(Version.fromString("5.0.0-beta0"))); - assertThat(V_6_3_0, is(lessThan(V_7_0_0))); - assertThat(V_6_3_0.compareTo(V_6_3_0), is(0)); - assertThat(V_7_0_0, is(greaterThan(V_6_3_0))); + assertThat(V_7_2_0, is(lessThan(V_8_0_0))); + assertThat(V_7_2_0.compareTo(V_7_2_0), is(0)); + assertThat(V_8_0_0, is(greaterThan(V_7_2_0))); } public void testMin() { @@ -182,7 +182,7 @@ public void testMinCompatVersion() { Version major56x = Version.fromString("5.6.0"); assertThat(Version.V_6_5_0.minimumCompatibilityVersion(), equalTo(major56x)); - assertThat(Version.V_6_3_1.minimumCompatibilityVersion(), equalTo(major56x)); + assertThat(Version.fromString("6.3.1").minimumCompatibilityVersion(), equalTo(major56x)); // from 7.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is // released since we need to bump the supported minor in Version#minimumCompatibilityVersion() diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestTests.java index e532d245ec8e3..a901c8c9bc2e1 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import java.util.Locale; @@ -59,7 +60,7 @@ public void testBwcSerialization() throws Exception { for (int runs = 0; runs < randomIntBetween(5, 20); runs++) { // Generate a random cluster health request in version < 7.2.0 and serializes it final BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(randomVersionBetween(random(), Version.V_6_3_0, getPreviousVersion(Version.V_7_2_0))); + out.setVersion(randomVersionBetween(random(), VersionUtils.getFirstVersion(), getPreviousVersion(Version.V_7_2_0))); final ClusterHealthRequest expected = randomRequest(); { @@ -114,7 +115,7 @@ public void testBwcSerialization() throws Exception { // Serialize to node in version < 7.2.0 final BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(randomVersionBetween(random(), Version.V_6_3_0, getPreviousVersion(Version.V_7_2_0))); + out.setVersion(randomVersionBetween(random(), VersionUtils.getFirstVersion(), getPreviousVersion(Version.V_7_2_0))); expected.writeTo(out); // Deserialize and check the cluster health request diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java index 286e1ce6ee7c5..5835ab6a06c14 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java @@ -342,7 +342,7 @@ public void testParseMixedDimensionPolyWithHoleStoredZ() throws IOException { parser.nextToken(); Settings indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); @@ -372,7 +372,7 @@ public void testParsePolyWithStoredZ() throws IOException { parser.nextToken(); Settings indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); @@ -393,7 +393,7 @@ public void testParseOpenPolygon() throws IOException { parser.nextToken(); Settings indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java index 5604f4240ce53..c41c242317d9c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -40,7 +39,7 @@ public void testParseUnknownParam() throws Exception { templateDef.put("random_param", "random_value"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion())); + () -> DynamicTemplate.parse("my_template", templateDef)); assertEquals("Illegal dynamic template parameter: [random_param]", e.getMessage()); } @@ -50,7 +49,7 @@ public void testParseUnknownMatchType() { templateDef2.put("mapping", Collections.singletonMap("store", true)); // if a wrong match type is specified, we ignore the template IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> DynamicTemplate.parse("my_template", templateDef2, Version.CURRENT.minimumIndexCompatibilityVersion())); + () -> DynamicTemplate.parse("my_template", templateDef2)); assertEquals("No field type matched on [text], possible values are [object, string, long, double, boolean, date, binary]", e.getMessage()); } @@ -63,7 +62,7 @@ public void testParseInvalidRegex() { templateDef.put("match_pattern", "regex"); templateDef.put("mapping", Collections.singletonMap("store", true)); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion())); + () -> DynamicTemplate.parse("my_template", templateDef)); assertEquals("Pattern [*a] of type [regex] is invalid. Cannot create dynamic template [my_template].", e.getMessage()); } } @@ -72,7 +71,7 @@ public void testMatchAllTemplate() { Map templateDef = new HashMap<>(); templateDef.put("match_mapping_type", "*"); templateDef.put("mapping", Collections.singletonMap("store", true)); - DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion()); + DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef); assertTrue(template.match("a.b", "b", randomFrom(XContentFieldType.values()))); } @@ -80,7 +79,7 @@ public void testMatchTypeTemplate() { Map templateDef = new HashMap<>(); templateDef.put("match_mapping_type", "string"); templateDef.put("mapping", Collections.singletonMap("store", true)); - DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion()); + DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef); assertTrue(template.match("a.b", "b", XContentFieldType.STRING)); assertFalse(template.match("a.b", "b", XContentFieldType.BOOLEAN)); } @@ -90,7 +89,7 @@ public void testSerialization() throws Exception { Map templateDef = new HashMap<>(); templateDef.put("match_mapping_type", "string"); templateDef.put("mapping", Collections.singletonMap("store", true)); - DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion()); + DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef); XContentBuilder builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"match_mapping_type\":\"string\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); @@ -100,7 +99,7 @@ public void testSerialization() throws Exception { templateDef.put("match", "*name"); templateDef.put("unmatch", "first_name"); templateDef.put("mapping", Collections.singletonMap("store", true)); - template = DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion()); + template = DynamicTemplate.parse("my_template", templateDef); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"match\":\"*name\",\"unmatch\":\"first_name\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); @@ -110,7 +109,7 @@ public void testSerialization() throws Exception { templateDef.put("path_match", "*name"); templateDef.put("path_unmatch", "first_name"); templateDef.put("mapping", Collections.singletonMap("store", true)); - template = DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion()); + template = DynamicTemplate.parse("my_template", templateDef); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"path_match\":\"*name\",\"path_unmatch\":\"first_name\",\"mapping\":{\"store\":true}}", @@ -121,7 +120,7 @@ public void testSerialization() throws Exception { templateDef.put("match", "^a$"); templateDef.put("match_pattern", "regex"); templateDef.put("mapping", Collections.singletonMap("store", true)); - template = DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion()); + template = DynamicTemplate.parse("my_template", templateDef); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"match\":\"^a$\",\"match_pattern\":\"regex\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/LegacyDynamicMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/LegacyDynamicMappingTests.java deleted file mode 100644 index 42d6aa8951c67..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/mapper/LegacyDynamicMappingTests.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper; - -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.ESSingleNodeTestCase; - -import java.io.IOException; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; - -public class LegacyDynamicMappingTests extends ESSingleNodeTestCase { - - @Override - protected boolean forbidPrivateIndexSettings() { - return false; - } - - public void testTypeNotCreatedOnIndexFailure() throws IOException { - final Settings settings = Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.V_6_3_0).build(); - try (XContentBuilder mapping = jsonBuilder()) { - mapping.startObject(); - { - mapping.startObject("_default_"); - { - mapping.field("dynamic", "strict"); - } - mapping.endObject(); - } - mapping.endObject(); - createIndex("test", settings, "_default_", mapping); - } - try (XContentBuilder sourceBuilder = jsonBuilder().startObject().field("test", "test").endObject()) { - expectThrows(StrictDynamicMappingException.class, () -> client() - .prepareIndex() - .setIndex("test") - .setType("type") - .setSource(sourceBuilder) - .get()); - - GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").get(); - assertNull(getMappingsResponse.getMappings().get("test").get("type")); - } - } - -} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/LegacyMapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/LegacyMapperServiceTests.java deleted file mode 100644 index 33f9bd51f33db..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/mapper/LegacyMapperServiceTests.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper; - -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.test.ESSingleNodeTestCase; - -import java.io.IOException; - -public class LegacyMapperServiceTests extends ESSingleNodeTestCase { - - @Override - protected boolean forbidPrivateIndexSettings() { - return false; - } - - public void testIndexMetaDataUpdateDoesNotLoseDefaultMapper() throws IOException { - final IndexService indexService = - createIndex("test", Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0).build()); - try (XContentBuilder builder = JsonXContent.contentBuilder()) { - builder.startObject(); - { - builder.startObject(MapperService.DEFAULT_MAPPING); - { - builder.field("date_detection", false); - } - builder.endObject(); - } - builder.endObject(); - final PutMappingRequest putMappingRequest = new PutMappingRequest(); - putMappingRequest.indices("test"); - putMappingRequest.type(MapperService.DEFAULT_MAPPING); - putMappingRequest.source(builder); - client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource(builder).get(); - } - assertNotNull(indexService.mapperService().documentMapper(MapperService.DEFAULT_MAPPING)); - final Settings zeroReplicasSettings = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build(); - client().admin().indices().prepareUpdateSettings("test").setSettings(zeroReplicasSettings).get(); - /* - * This assertion is a guard against a previous bug that would lose the default mapper when applying a metadata update that did not - * update the default mapping. - */ - assertNotNull(indexService.mapperService().documentMapper(MapperService.DEFAULT_MAPPING)); - } - - public void testDefaultMappingIsDeprecatedOn6() throws IOException { - final Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0).build(); - final String mapping; - try (XContentBuilder defaultMapping = XContentFactory.jsonBuilder()) { - defaultMapping.startObject(); - { - defaultMapping.startObject("_default_"); - { - - } - defaultMapping.endObject(); - } - defaultMapping.endObject(); - mapping = Strings.toString(defaultMapping); - } - final MapperService mapperService = createIndex("test", settings).mapperService(); - mapperService.merge("_default_", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); - assertWarnings("[_default_] mapping is deprecated since it is not useful anymore now that indexes cannot have more than one type"); - } - -} diff --git a/server/src/test/java/org/elasticsearch/index/similarity/LegacySimilarityTests.java b/server/src/test/java/org/elasticsearch/index/similarity/LegacySimilarityTests.java deleted file mode 100644 index 13398d8791437..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/similarity/LegacySimilarityTests.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.BooleanSimilarity; -import org.apache.lucene.search.similarities.ClassicSimilarity; -import org.apache.lucene.search.similarity.LegacyBM25Similarity; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.test.ESSingleNodeTestCase; - -import java.io.IOException; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.instanceOf; - -public class LegacySimilarityTests extends ESSingleNodeTestCase { - - @Override - protected boolean forbidPrivateIndexSettings() { - return false; - } - - public void testResolveDefaultSimilaritiesOn6xIndex() { - final Settings indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) // otherwise classic is forbidden - .build(); - final SimilarityService similarityService = createIndex("foo", indexSettings).similarityService(); - assertThat(similarityService.getSimilarity("classic").get(), instanceOf(ClassicSimilarity.class)); - assertWarnings("The [classic] similarity is now deprecated in favour of BM25, which is generally " - + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " - + "instead."); - assertThat(similarityService.getSimilarity("BM25").get(), instanceOf(LegacyBM25Similarity.class)); - assertThat(similarityService.getSimilarity("boolean").get(), instanceOf(BooleanSimilarity.class)); - assertThat(similarityService.getSimilarity("default"), equalTo(null)); - } - - public void testResolveSimilaritiesFromMappingClassic() throws IOException { - try (XContentBuilder mapping = XContentFactory.jsonBuilder()) { - mapping.startObject(); - { - mapping.startObject("type"); - { - mapping.startObject("properties"); - { - mapping.startObject("field1"); - { - mapping.field("type", "text"); - mapping.field("similarity", "my_similarity"); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - - final Settings indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.V_6_3_0) // otherwise classic is forbidden - .put("index.similarity.my_similarity.type", "classic") - .put("index.similarity.my_similarity.discount_overlaps", false) - .build(); - final MapperService mapperService = createIndex("foo", indexSettings, "type", mapping).mapperService(); - assertThat(mapperService.fullName("field1").similarity().get(), instanceOf(ClassicSimilarity.class)); - - final ClassicSimilarity similarity = (ClassicSimilarity) mapperService.fullName("field1").similarity().get(); - assertThat(similarity.getDiscountOverlaps(), equalTo(false)); - } - } - -} diff --git a/server/src/test/java/org/elasticsearch/indices/mapping/LegacyUpdateMappingIntegrationIT.java b/server/src/test/java/org/elasticsearch/indices/mapping/LegacyUpdateMappingIntegrationIT.java deleted file mode 100644 index 1bf95f612ce9f..0000000000000 --- a/server/src/test/java/org/elasticsearch/indices/mapping/LegacyUpdateMappingIntegrationIT.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.indices.mapping; - -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.test.ESIntegTestCase; - -import java.util.Map; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasEntry; -import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.not; - -public class LegacyUpdateMappingIntegrationIT extends ESIntegTestCase { - - @Override - protected boolean forbidPrivateIndexSettings() { - return false; - } - - @SuppressWarnings("unchecked") - public void testUpdateDefaultMappingSettings() throws Exception { - logger.info("Creating index with _default_ mappings"); - try (XContentBuilder defaultMapping = JsonXContent.contentBuilder()) { - defaultMapping.startObject(); - { - defaultMapping.startObject(MapperService.DEFAULT_MAPPING); - { - defaultMapping.field("date_detection", false); - } - defaultMapping.endObject(); - } - defaultMapping.endObject(); - client() - .admin() - .indices() - .prepareCreate("test") - .setSettings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0).build()) - .addMapping(MapperService.DEFAULT_MAPPING, defaultMapping) - .get(); - } - - { - final GetMappingsResponse getResponse = - client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get(); - final Map defaultMapping = - getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap(); - assertThat(defaultMapping, hasKey("date_detection")); - } - - logger.info("Emptying _default_ mappings"); - // now remove it - try (XContentBuilder mappingBuilder = - JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING).endObject().endObject()) { - final AcknowledgedResponse putResponse = - client() - .admin() - .indices() - .preparePutMapping("test") - .setType(MapperService.DEFAULT_MAPPING) - .setSource(mappingBuilder) - .get(); - assertThat(putResponse.isAcknowledged(), equalTo(true)); - } - logger.info("Done Emptying _default_ mappings"); - - { - final GetMappingsResponse getResponse = - client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get(); - final Map defaultMapping = - getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap(); - assertThat(defaultMapping, not(hasKey("date_detection"))); - } - - // now test you can change stuff that are normally unchangeable - logger.info("Creating _default_ mappings with an analyzed field"); - try (XContentBuilder defaultMapping = JsonXContent.contentBuilder()) { - - defaultMapping.startObject(); - { - defaultMapping.startObject(MapperService.DEFAULT_MAPPING); - { - defaultMapping.startObject("properties"); - { - defaultMapping.startObject("f"); - { - defaultMapping.field("type", "text"); - defaultMapping.field("index", true); - } - defaultMapping.endObject(); - } - defaultMapping.endObject(); - } - defaultMapping.endObject(); - } - defaultMapping.endObject(); - - final AcknowledgedResponse putResponse = - client() - .admin() - .indices() - .preparePutMapping("test") - .setType(MapperService.DEFAULT_MAPPING).setSource(defaultMapping) - .get(); - assertThat(putResponse.isAcknowledged(), equalTo(true)); - } - - logger.info("Changing _default_ mappings field from analyzed to non-analyzed"); - { - try (XContentBuilder mappingBuilder = JsonXContent.contentBuilder()) { - mappingBuilder.startObject(); - { - mappingBuilder.startObject(MapperService.DEFAULT_MAPPING); - { - mappingBuilder.startObject("properties"); - { - mappingBuilder.startObject("f"); - { - mappingBuilder.field("type", "keyword"); - } - mappingBuilder.endObject(); - } - mappingBuilder.endObject(); - } - mappingBuilder.endObject(); - } - mappingBuilder.endObject(); - - final AcknowledgedResponse putResponse = - client() - .admin() - .indices() - .preparePutMapping("test") - .setType(MapperService.DEFAULT_MAPPING) - .setSource(mappingBuilder) - .get(); - assertThat(putResponse.isAcknowledged(), equalTo(true)); - } - } - logger.info("Done changing _default_ mappings field from analyzed to non-analyzed"); - - { - final GetMappingsResponse getResponse = - client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get(); - final Map defaultMapping = - getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap(); - final Map fieldSettings = (Map) ((Map) defaultMapping.get("properties")).get("f"); - assertThat(fieldSettings, hasEntry("type", "keyword")); - } - - // but we still validate the _default_ type - logger.info("Confirming _default_ mappings validation"); - try (XContentBuilder mappingBuilder = JsonXContent.contentBuilder()) { - - mappingBuilder.startObject(); - { - mappingBuilder.startObject(MapperService.DEFAULT_MAPPING); - { - mappingBuilder.startObject("properites"); - { - mappingBuilder.startObject("f"); - { - mappingBuilder.field("type", "non-existent"); - } - mappingBuilder.endObject(); - } - mappingBuilder.endObject(); - } - mappingBuilder.endObject(); - } - mappingBuilder.endObject(); - - expectThrows( - MapperParsingException.class, - () -> client() - .admin() - .indices() - .preparePutMapping("test") - .setType(MapperService.DEFAULT_MAPPING) - .setSource(mappingBuilder) - .get()); - } - - } - -} diff --git a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java index fffa501cc4be4..6128f8d39fcf6 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java @@ -440,21 +440,6 @@ public void testInvalidField() throws IOException { } } - public void testToFilterDeprecationMessage() throws IOException { - Directory dir = new RAMDirectory(); - try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) { - writer.commit(); - } - try (IndexReader reader = DirectoryReader.open(dir)) { - QueryShardContext context = createShardContext(Version.V_6_3_0, reader, "_uid", null, 1,0); - SliceBuilder builder = new SliceBuilder("_uid", 5, 10); - Query query = builder.toFilter(null, createRequest(0), context, Version.CURRENT); - assertThat(query, instanceOf(TermsSliceQuery.class)); - assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query)); - assertWarnings("Computing slices on the [_uid] field is deprecated for 6.x indices, use [_id] instead"); - } - } - public void testToFilterWithRouting() throws IOException { Directory dir = new RAMDirectory(); try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) { diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java index bf73f2efba42a..8b6f137443a5d 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java @@ -119,8 +119,7 @@ public void testParseSetupAndSkipSectionNoSkip() throws Exception { assertThat(setupSection.getSkipSection().isEmpty(), equalTo(false)); assertThat(setupSection.getSkipSection(), notNullValue()); assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.fromString("6.0.0"))); - assertThat(setupSection.getSkipSection().getUpperVersion(), - equalTo(Version.V_6_3_0)); + assertThat(setupSection.getSkipSection().getUpperVersion(), equalTo(Version.fromString("6.3.0"))); assertThat(setupSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(setupSection.getExecutableSections().size(), equalTo(2)); assertThat(setupSection.getExecutableSections().get(0), instanceOf(DoSection.class)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequest.java index cf94312b6a72b..2420fb68de169 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequest.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.license; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; @@ -44,34 +43,14 @@ public boolean isAcknowledged() { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - type = in.readString(); - acknowledge = in.readBoolean(); - } else { - type = "trial"; - acknowledge = true; - } + type = in.readString(); + acknowledge = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { - Version version = Version.V_6_3_0; - if (out.getVersion().onOrAfter(version)) { - super.writeTo(out); - out.writeString(type); - out.writeBoolean(acknowledge); - } else { - if ("trial".equals(type) == false) { - throw new IllegalArgumentException("All nodes in cluster must be version [" + version - + "] or newer to start trial with a different type than 'trial'. Attempting to write to " + - "a node with version [" + out.getVersion() + "] with trial type [" + type + "]."); - } else if (acknowledge == false) { - throw new IllegalArgumentException("Request must be acknowledged to send to a node with a version " + - "prior to [" + version + "]. Attempting to send request to node with version [" + out.getVersion() + "] " + - "without acknowledgement."); - } else { - super.writeTo(out); - } - } + super.writeTo(out); + out.writeString(type); + out.writeBoolean(acknowledge); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialResponse.java index 93aa923483e79..de995096fc7f3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialResponse.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.license; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -18,11 +17,6 @@ class PostStartTrialResponse extends ActionResponse { - // Nodes Prior to 6.3 did not have NEED_ACKNOWLEDGEMENT as part of status - enum Pre63Status { - UPGRADED_TO_TRIAL, - TRIAL_ALREADY_ACTIVATED; - } enum Status { UPGRADED_TO_TRIAL(true, null, RestStatus.OK), TRIAL_ALREADY_ACTIVATED(false, "Operation failed: Trial was already activated.", RestStatus.FORBIDDEN), @@ -76,47 +70,31 @@ public Status getStatus() { @Override public void readFrom(StreamInput in) throws IOException { status = in.readEnum(Status.class); - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - acknowledgeMessage = in.readOptionalString(); - int size = in.readVInt(); - Map acknowledgeMessages = new HashMap<>(size); - for (int i = 0; i < size; i++) { - String feature = in.readString(); - int nMessages = in.readVInt(); - String[] messages = new String[nMessages]; - for (int j = 0; j < nMessages; j++) { - messages[j] = in.readString(); - } - acknowledgeMessages.put(feature, messages); + acknowledgeMessage = in.readOptionalString(); + int size = in.readVInt(); + Map acknowledgeMessages = new HashMap<>(size); + for (int i = 0; i < size; i++) { + String feature = in.readString(); + int nMessages = in.readVInt(); + String[] messages = new String[nMessages]; + for (int j = 0; j < nMessages; j++) { + messages[j] = in.readString(); } - this.acknowledgeMessages = acknowledgeMessages; - } else { - this.acknowledgeMessages = Collections.emptyMap(); + acknowledgeMessages.put(feature, messages); } + this.acknowledgeMessages = acknowledgeMessages; } @Override public void writeTo(StreamOutput out) throws IOException { - Version version = Version.V_6_3_0; - if (out.getVersion().onOrAfter(version)) { - out.writeEnum(status); - out.writeOptionalString(acknowledgeMessage); - out.writeVInt(acknowledgeMessages.size()); - for (Map.Entry entry : acknowledgeMessages.entrySet()) { - out.writeString(entry.getKey()); - out.writeVInt(entry.getValue().length); - for (String message : entry.getValue()) { - out.writeString(message); - } - } - } else { - if (status == Status.UPGRADED_TO_TRIAL) { - out.writeEnum(Pre63Status.UPGRADED_TO_TRIAL); - } else if (status == Status.TRIAL_ALREADY_ACTIVATED) { - out.writeEnum(Pre63Status.TRIAL_ALREADY_ACTIVATED); - } else { - throw new IllegalArgumentException("Starting trial on node with version [" + Version.CURRENT + "] requires " + - "acknowledgement parameter."); + out.writeEnum(status); + out.writeOptionalString(acknowledgeMessage); + out.writeVInt(acknowledgeMessages.size()); + for (Map.Entry entry : acknowledgeMessages.entrySet()) { + out.writeString(entry.getKey()); + out.writeVInt(entry.getValue().length); + for (String message : entry.getValue()) { + out.writeString(message); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index ababc3c21289a..696e3a2871fb3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -9,7 +9,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.SpecialPermission; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -194,11 +193,7 @@ public static List nodesNotReadyForXPackCustomMetadata(ClusterSta // check that all nodes would be capable of deserializing newly added x-pack metadata final List notReadyNodes = StreamSupport.stream(clusterState.nodes().spliterator(), false).filter(node -> { final String xpackInstalledAttr = node.getAttributes().getOrDefault(XPACK_INSTALLED_NODE_ATTR, "false"); - - // The node attribute XPACK_INSTALLED_NODE_ATTR was only introduced in 6.3.0, so when - // we have an older node in this mixed-version cluster without any x-pack metadata, - // we want to prevent x-pack from adding custom metadata - return node.getVersion().before(Version.V_6_3_0) || Booleans.parseBoolean(xpackInstalledAttr) == false; + return Booleans.parseBoolean(xpackInstalledAttr) == false; }).collect(Collectors.toList()); return notReadyNodes; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java index beff26eb34d82..ae68b2fdb26ab 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; @@ -149,20 +148,12 @@ public Response(List scheduledEvents) { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - if (in.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag was removed - in.readBoolean(); - } in.readList(ScheduledEvent::new); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag is no longer supported - out.writeBoolean(true); - } out.writeList(scheduledEvents); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutCalendarAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutCalendarAction.java index 345c4f1a96db4..0314103a3006b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutCalendarAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutCalendarAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; @@ -147,10 +146,6 @@ public Response(Calendar calendar) { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - if (in.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag was removed - in.readBoolean(); - } calendar = new Calendar(in); } @@ -158,10 +153,6 @@ public void readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag is no longer supported - out.writeBoolean(true); - } calendar.writeTo(out); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java index 448d826973595..1ac325b864536 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -119,20 +118,12 @@ public DatafeedConfig getResponse() { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - if (in.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag was removed - in.readBoolean(); - } datafeed = new DatafeedConfig(in); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag is no longer supported - out.writeBoolean(true); - } datafeed.writeTo(out); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java index dc3983644f7b7..2ae19c4f32250 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -148,20 +147,12 @@ public Job getResponse() { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - if (in.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag was removed - in.readBoolean(); - } job = new Job(in); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag is no longer supported - out.writeBoolean(true); - } job.writeTo(out); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java index 316598b6ab505..cae1efb7e7a31 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -172,20 +171,12 @@ public ModelSnapshot getModel() { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - if (in.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag was removed - in.readBoolean(); - } model = new ModelSnapshot(in); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag is no longer supported - out.writeBoolean(true); - } model.writeTo(out); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java index 6ecee409c30f1..3e1b0ea6b3c55 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -93,9 +92,6 @@ public void readFrom(StreamInput in) throws IOException { jobId = in.readString(); update = new JobUpdate(in); isInternal = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_6_3_0) && in.getVersion().before(Version.V_7_0_0)) { - in.readBoolean(); // was waitForAck - } } @Override @@ -104,9 +100,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); update.writeTo(out); out.writeBoolean(isInternal); - if (out.getVersion().onOrAfter(Version.V_6_3_0) && out.getVersion().before(Version.V_7_0_0)) { - out.writeBoolean(false); // was waitForAck - } } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java index 81a0e017c6584..b50b7d2fa5126 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java @@ -127,7 +127,7 @@ public JobUpdate(StreamInput in) throws IOException { } customSettings = in.readMap(); modelSnapshotId = in.readOptionalString(); - if (in.getVersion().onOrAfter(Version.V_6_3_0) && in.readBoolean()) { + if (in.readBoolean()) { jobVersion = Version.readVersion(in); } else { jobVersion = null; @@ -166,13 +166,11 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeMap(customSettings); out.writeOptionalString(modelSnapshotId); - if (out.getVersion().onOrAfter(Version.V_6_3_0)) { - if (jobVersion != null) { - out.writeBoolean(true); - Version.writeVersion(jobVersion, out); - } else { - out.writeBoolean(false); - } + if (jobVersion != null) { + out.writeBoolean(true); + Version.writeVersion(jobVersion, out); + } else { + out.writeBoolean(false); } if (out.getVersion().onOrAfter(Version.V_6_6_0)) { out.writeOptionalBoolean(clearJobFinishTime); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java index e1933ef1a59ef..02bef36c00ab0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java @@ -336,7 +336,7 @@ public static class Builder { // Stored snapshot documents created prior to 6.3.0 will have no // value for min_version. - private Version minVersion = Version.V_6_3_0; + private Version minVersion = Version.fromString("6.3.0"); private Date timestamp; private String description; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java index a8cf5b895fb35..90728753dd366 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.monitoring; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -27,9 +26,7 @@ public class MonitoringFeatureSetUsage extends XPackFeatureSet.Usage { public MonitoringFeatureSetUsage(StreamInput in) throws IOException { super(in); exporters = in.readMap(); - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - collectionEnabled = in.readOptionalBoolean(); - } + collectionEnabled = in.readOptionalBoolean(); } public MonitoringFeatureSetUsage(boolean available, boolean enabled, @@ -47,9 +44,7 @@ public Map getExporters() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeMap(exporters); - if (out.getVersion().onOrAfter(Version.V_6_3_0)) { - out.writeOptionalBoolean(collectionEnabled); - } + out.writeOptionalBoolean(collectionEnabled); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkResponse.java index 12192da0bb22f..4fda1971a012f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkResponse.java @@ -7,7 +7,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -81,10 +80,7 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); tookInMillis = in.readVLong(); error = in.readOptionalWriteable(Error::new); - - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - ignored = in.readBoolean(); - } + ignored = in.readBoolean(); } @Override @@ -92,10 +88,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVLong(tookInMillis); out.writeOptionalWriteable(error); - - if (out.getVersion().onOrAfter(Version.V_6_3_0)) { - out.writeBoolean(ignored); - } + out.writeBoolean(ignored); } public static class Error implements Writeable, ToXContentObject { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJob.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJob.java index 94306966a34da..4b81b3c288a3c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJob.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJob.java @@ -114,6 +114,6 @@ public int hashCode() { @Override public Version getMinimalSupportedVersion() { - return Version.V_6_3_0; + return Version.CURRENT.minimumCompatibilityVersion(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BeatsSystemUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BeatsSystemUser.java index 3daf242d5203f..d65dd243ace47 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BeatsSystemUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BeatsSystemUser.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.security.user; -import org.elasticsearch.Version; import org.elasticsearch.xpack.core.security.support.MetadataUtils; /** @@ -15,7 +14,6 @@ public class BeatsSystemUser extends User { public static final String NAME = UsernamesField.BEATS_NAME; public static final String ROLE_NAME = UsernamesField.BEATS_ROLE; - public static final Version DEFINED_SINCE = Version.V_6_3_0; public BeatsSystemUser(boolean enabled) { super(NAME, new String[]{ ROLE_NAME }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, enabled); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java index bc8d7817f4d69..7c599e95cc026 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -242,7 +242,7 @@ public void testSecurityPlatinumExpired() { public void testNewTrialDefaultsSecurityOff() { XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); - licenseState.update(TRIAL, true, VersionUtils.randomVersionBetween(random(), Version.V_6_3_0, Version.CURRENT)); + licenseState.update(TRIAL, true, VersionUtils.randomCompatibleVersion(random(), Version.CURRENT)); assertThat(licenseState.isSecurityDisabledByLicenseDefaults(), is(true)); assertSecurityNotAllowed(licenseState); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackPluginTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackPluginTests.java index 59731cab71db8..ac1fe54f85abd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackPluginTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackPluginTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.security.authc.TokenMetaData; import org.elasticsearch.xpack.core.ssl.SSLService; @@ -48,9 +47,8 @@ public void testNodesNotReadyForXPackCustomMetadata() { DiscoveryNodes.Builder discoveryNodes = DiscoveryNodes.builder(); for (int i = 0; i < randomInt(3); i++) { - final Version version = VersionUtils.randomVersion(random()); final Map attributes; - if (randomBoolean() && version.onOrAfter(Version.V_6_3_0)) { + if (randomBoolean()) { attributes = Collections.singletonMap(XPackPlugin.XPACK_INSTALLED_NODE_ATTR, "true"); } else { nodesCompatible = false; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigratorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigratorTests.java index 81d344fd1dd02..802f969d88609 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigratorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigratorTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; @@ -144,7 +145,7 @@ public void testStoppedDatafeedConfigs() { public void testUpdateJobForMigration() { Job.Builder oldJob = JobTests.buildJobBuilder("pre-migration"); - Version oldVersion = Version.V_6_3_0; + Version oldVersion = VersionUtils.randomVersion(random()); oldJob.setJobVersion(oldVersion); Job migratedJob = MlConfigMigrator.updateJobForMigration(oldJob.build()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java index 1065503e091d4..5f1a4050d1f3e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java @@ -406,8 +406,7 @@ public void testSelectLeastLoadedMlNode_jobWithRulesButNoNodeMeetsRequiredVersio Map nodeAttr = new HashMap<>(); nodeAttr.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "10"); nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); - Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), - VersionUtils.getPreviousVersion(Version.V_6_4_0)); + Version version = Version.fromString("6.3.0"); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), nodeAttr, Collections.emptySet(), version)) diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java index d644a63e7bcaa..ff545549e80ef 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java @@ -63,7 +63,6 @@ public void testEnabledDefault() { } public void testUsage() throws Exception { - // anything prior to 6.3 does not include collection_enabled (so defaults it to null) final Version serializedVersion = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); final boolean collectionEnabled = randomBoolean(); int localCount = randomIntBetween(0, 5); @@ -116,11 +115,7 @@ public void testUsage() throws Exception { usage.toXContent(builder, ToXContent.EMPTY_PARAMS); source = ObjectPath.createFromXContent(builder.contentType().xContent(), BytesReference.bytes(builder)); } - if (usage == monitoringUsage || serializedVersion.onOrAfter(Version.V_6_3_0)) { - assertThat(source.evaluate("collection_enabled"), is(collectionEnabled)); - } else { - assertThat(source.evaluate("collection_enabled"), is(nullValue())); - } + assertThat(source.evaluate("collection_enabled"), is(collectionEnabled)); assertThat(source.evaluate("enabled_exporters"), is(notNullValue())); if (localCount > 0) { assertThat(source.evaluate("enabled_exporters.local"), is(localCount)); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkResponseTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkResponseTests.java index 3ac7f2de63c5e..901025ff2c444 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkResponseTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkResponseTests.java @@ -79,12 +79,7 @@ public void testSerialization() throws IOException { } else { assertThat(response2.getError(), is(notNullValue())); } - - if (version.onOrAfter(Version.V_6_3_0)) { - assertThat(response2.isIgnored(), is(response.isIgnored())); - } else { - assertThat(response2.isIgnored(), is(false)); - } + assertThat(response2.isIgnored(), is(response.isIgnored())); } } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java index 7e498efa4df2e..ac6248f4f30d6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java @@ -7,7 +7,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureSetting; @@ -199,10 +198,7 @@ public void users(ActionListener> listener) { private void getUserInfo(final String username, ActionListener listener) { - if (userIsDefinedForCurrentSecurityMapping(username) == false) { - logger.debug("Marking user [{}] as disabled because the security mapping is not at the required version", username); - listener.onResponse(disabledDefaultUserInfo.deepClone()); - } else if (securityIndex.indexExists() == false) { + if (securityIndex.indexExists() == false) { listener.onResponse(getDefaultUserInfo(username)); } else { nativeUsersStore.getReservedUserInfo(username, ActionListener.wrap((userInfo) -> { @@ -227,24 +223,6 @@ private ReservedUserInfo getDefaultUserInfo(String username) { } } - private boolean userIsDefinedForCurrentSecurityMapping(String username) { - final Version requiredVersion = getDefinedVersion(username); - return securityIndex.checkMappingVersion(requiredVersion::onOrBefore); - } - - private Version getDefinedVersion(String username) { - switch (username) { - case BeatsSystemUser.NAME: - return BeatsSystemUser.DEFINED_SINCE; - case APMSystemUser.NAME: - return APMSystemUser.DEFINED_SINCE; - case RemoteMonitoringUser.NAME: - return RemoteMonitoringUser.DEFINED_SINCE; - default: - return Version.CURRENT.minimumIndexCompatibilityVersion(); - } - } - public static void addSettings(List> settingsList) { settingsList.add(BOOTSTRAP_ELASTIC_PASSWORD); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java index 0d48cd6b856e1..b62cb44ac028c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java @@ -130,12 +130,6 @@ public SecurityIndexManager freeze() { return new SecurityIndexManager(null, aliasName, internalIndexName, internalIndexFormat, mappingSourceSupplier, indexState); } - public boolean checkMappingVersion(Predicate requiredVersion) { - // pull value into local variable for consistent view - final State currentIndexState = this.indexState; - return currentIndexState.mappingVersion == null || requiredVersion.test(currentIndexState.mappingVersion); - } - public String aliasName() { return aliasName; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java index 33cec72060886..3245c064ef07f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java @@ -158,7 +158,6 @@ public void testReservedUsersOnly() { NativeUsersStore usersStore = mock(NativeUsersStore.class); SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); when(securityIndex.isAvailable()).thenReturn(true); - when(securityIndex.checkMappingVersion(any())).thenReturn(true); ReservedRealmTests.mockGetAllReservedUserInfo(usersStore, Collections.emptyMap()); ReservedRealm reservedRealm = diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java index ea1b6483fd795..42efeebf03f19 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java @@ -32,14 +32,12 @@ import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore.ReservedUserInfo; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.Before; -import org.mockito.ArgumentCaptor; import java.util.Collection; import java.util.Collections; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.ExecutionException; -import java.util.function.Predicate; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -72,7 +70,6 @@ public void setupMocks() throws Exception { usersStore = mock(NativeUsersStore.class); securityIndex = mock(SecurityIndexManager.class); when(securityIndex.isAvailable()).thenReturn(true); - when(securityIndex.checkMappingVersion(any())).thenReturn(true); mockGetAllReservedUserInfo(usersStore, Collections.emptyMap()); threadPool = mock(ThreadPool.class); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); @@ -164,8 +161,6 @@ private void verifySuccessfulAuthentication(boolean enabled) throws Exception { verify(securityIndex, times(2)).indexExists(); verify(usersStore, times(2)).getReservedUserInfo(eq(principal), any(ActionListener.class)); - final ArgumentCaptor predicateCaptor = ArgumentCaptor.forClass(Predicate.class); - verify(securityIndex, times(2)).checkMappingVersion(predicateCaptor.capture()); verifyNoMoreInteractions(usersStore); } @@ -182,9 +177,6 @@ public void testLookup() throws Exception { assertEquals(expectedUser, user); verify(securityIndex).indexExists(); - final ArgumentCaptor predicateCaptor = ArgumentCaptor.forClass(Predicate.class); - verify(securityIndex).checkMappingVersion(predicateCaptor.capture()); - PlainActionFuture future = new PlainActionFuture<>(); reservedRealm.doLookupUser("foobar", future); final User doesntExist = future.actionGet(); @@ -229,9 +221,6 @@ public void testLookupThrows() throws Exception { verify(securityIndex).indexExists(); verify(usersStore).getReservedUserInfo(eq(principal), any(ActionListener.class)); - final ArgumentCaptor predicateCaptor = ArgumentCaptor.forClass(Predicate.class); - verify(securityIndex).checkMappingVersion(predicateCaptor.capture()); - verifyNoMoreInteractions(usersStore); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java index 6e7a9806781b5..157e0ffb82013 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java @@ -394,14 +394,6 @@ private static String loadTemplate(String templateName) { return TemplateUtils.loadTemplate(resource, Version.CURRENT.toString(), TEMPLATE_VERSION_PATTERN); } - public void testMappingVersionMatching() throws IOException { - String templateString = "/" + SECURITY_MAIN_TEMPLATE_7 + ".json"; - ClusterState.Builder clusterStateBuilder = createClusterStateWithMappingAndTemplate(templateString); - manager.clusterChanged(new ClusterChangedEvent("test-event", clusterStateBuilder.build(), EMPTY_CLUSTER_STATE)); - assertTrue(manager.checkMappingVersion(Version.CURRENT.minimumIndexCompatibilityVersion()::before)); - assertFalse(manager.checkMappingVersion(Version.CURRENT.minimumIndexCompatibilityVersion()::after)); - } - public void testMissingVersionMappingThrowsError() throws IOException { String templateString = "/missing-version-security-index-template.json"; ClusterState.Builder clusterStateBuilder = createClusterStateWithMappingAndTemplate(templateString); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index a62a23dac70b8..3ac0b20f95d0f 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -203,7 +203,6 @@ public void testWatcher() throws Exception { * Tests that a RollUp job created on a old cluster is correctly restarted after the upgrade. */ public void testRollupAfterRestart() throws Exception { - assumeTrue("Rollup can be tested with 6.3.0 and onwards", getOldClusterVersion().onOrAfter(Version.V_6_3_0)); if (isRunningAgainstOldCluster()) { final int numDocs = 59; final int year = randomIntBetween(1970, 2018); From d992b1da009be22cbcc25b13aadc0e56a77b9d89 Mon Sep 17 00:00:00 2001 From: Henning Andersen <33268011+henningandersen@users.noreply.github.com> Date: Fri, 24 May 2019 16:48:45 +0200 Subject: [PATCH 094/224] Shard CLI tool always check shards (#41480) The shard CLI tool would not do anything if a corruption marker was not present. But a corruption marker is only added if a corruption is detected during indexing/writing, not if a search or other read fails. Changed the tool to always check shards regardless of corruption marker presence. Related to #41298 --- .../RemoveCorruptedLuceneSegmentsAction.java | 17 +--- .../RemoveCorruptedShardDataCommandTests.java | 90 +++++++++++++++---- 2 files changed, 79 insertions(+), 28 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedLuceneSegmentsAction.java b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedLuceneSegmentsAction.java index b4b59872758ed..da0257c19e334 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedLuceneSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedLuceneSegmentsAction.java @@ -38,9 +38,7 @@ public Tuple getCleanStatus Lock writeLock, PrintStream printStream, boolean verbose) throws IOException { - if (RemoveCorruptedShardDataCommand.isCorruptMarkerFileIsPresent(indexDirectory) == false) { - return Tuple.tuple(RemoveCorruptedShardDataCommand.CleanStatus.CLEAN, null); - } + boolean markedCorrupted = RemoveCorruptedShardDataCommand.isCorruptMarkerFileIsPresent(indexDirectory); final CheckIndex.Status status; try (CheckIndex checker = new CheckIndex(indexDirectory, writeLock)) { @@ -55,7 +53,9 @@ public Tuple getCleanStatus } return status.clean - ? Tuple.tuple(RemoveCorruptedShardDataCommand.CleanStatus.CLEAN_WITH_CORRUPTED_MARKER, null) + ? Tuple.tuple(markedCorrupted + ? RemoveCorruptedShardDataCommand.CleanStatus.CLEAN_WITH_CORRUPTED_MARKER + : RemoveCorruptedShardDataCommand.CleanStatus.CLEAN, null) : Tuple.tuple(RemoveCorruptedShardDataCommand.CleanStatus.CORRUPTED, "Corrupted Lucene index segments found - " + status.totLoseDocCount + " documents will be lost."); } @@ -67,8 +67,6 @@ public void execute(Terminal terminal, Lock writeLock, PrintStream printStream, boolean verbose) throws IOException { - checkCorruptMarkerFileIsPresent(indexDirectory); - final CheckIndex.Status status; try (CheckIndex checker = new CheckIndex(indexDirectory, writeLock)) { @@ -90,11 +88,4 @@ public void execute(Terminal terminal, } } } - - protected void checkCorruptMarkerFileIsPresent(Directory directory) throws IOException { - if (RemoveCorruptedShardDataCommand.isCorruptMarkerFileIsPresent(directory) == false) { - throw new ElasticsearchException("There is no corruption file marker"); - } - } - } diff --git a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java index c9a7b236d9c8f..c7b1846356363 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java @@ -76,6 +76,9 @@ public class RemoveCorruptedShardDataCommandTests extends IndexShardTestCase { private Path translogPath; private Path indexPath; + private static final Pattern NUM_CORRUPT_DOCS_PATTERN = + Pattern.compile("Corrupted Lucene index segments found -\\s+(?\\d+) documents will be lost."); + @Before public void setup() throws IOException { shardId = new ShardId("index0", "_na_", 0); @@ -154,11 +157,13 @@ public void testCorruptedIndex() throws Exception { final boolean corruptSegments = randomBoolean(); CorruptionUtils.corruptIndex(random(), indexPath, corruptSegments); - // test corrupted shard - final IndexShard corruptedShard = reopenIndexShard(true); - allowShardFailures(); - expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true)); - closeShards(corruptedShard); + if (randomBoolean()) { + // test corrupted shard and add corruption marker + final IndexShard corruptedShard = reopenIndexShard(true); + allowShardFailures(); + expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true)); + closeShards(corruptedShard); + } final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); final MockTerminal t = new MockTerminal(); @@ -196,8 +201,7 @@ public void testCorruptedIndex() throws Exception { final Set shardDocUIDs = getShardDocUIDs(newShard); - final Pattern pattern = Pattern.compile("Corrupted Lucene index segments found -\\s+(?\\d+) documents will be lost."); - final Matcher matcher = pattern.matcher(output); + final Matcher matcher = NUM_CORRUPT_DOCS_PATTERN.matcher(output); assertThat(matcher.find(), equalTo(true)); final int expectedNumDocs = numDocs - Integer.parseInt(matcher.group("docs")); @@ -272,12 +276,13 @@ public void testCorruptedBothIndexAndTranslog() throws Exception { CorruptionUtils.corruptIndex(random(), indexPath, false); - // test corrupted shard - final IndexShard corruptedShard = reopenIndexShard(true); - allowShardFailures(); - expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true)); - closeShards(corruptedShard); - + if (randomBoolean()) { + // test corrupted shard and add corruption marker + final IndexShard corruptedShard = reopenIndexShard(true); + allowShardFailures(); + expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true)); + closeShards(corruptedShard); + } TestTranslog.corruptRandomTranslogFile(logger, random(), Arrays.asList(translogPath)); final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); @@ -313,8 +318,7 @@ public void testCorruptedBothIndexAndTranslog() throws Exception { final Set shardDocUIDs = getShardDocUIDs(newShard); - final Pattern pattern = Pattern.compile("Corrupted Lucene index segments found -\\s+(?\\d+) documents will be lost."); - final Matcher matcher = pattern.matcher(output); + final Matcher matcher = NUM_CORRUPT_DOCS_PATTERN.matcher(output); assertThat(matcher.find(), equalTo(true)); final int expectedNumDocs = numDocsToKeep - Integer.parseInt(matcher.group("docs")); @@ -347,6 +351,62 @@ public void testResolveIndexDirectory() throws Exception { shardPath -> assertThat(shardPath.resolveIndex(), equalTo(indexPath))); } + public void testCleanWithCorruptionMarker() throws Exception { + // index some docs in several segments + final int numDocs = indexDocs(indexShard, true); + + indexShard.store().markStoreCorrupted(null); + + closeShards(indexShard); + + allowShardFailures(); + final IndexShard corruptedShard = reopenIndexShard(true); + expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true)); + closeShards(corruptedShard); + + final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); + final MockTerminal t = new MockTerminal(); + final OptionParser parser = command.getParser(); + + final OptionSet options = parser.parse("-d", translogPath.toString()); + // run command with dry-run + t.addTextInput("n"); // mean dry run + t.addTextInput("n"); // mean dry run + t.setVerbosity(Terminal.Verbosity.VERBOSE); + try { + command.execute(t, options, environment); + fail(); + } catch (ElasticsearchException e) { + assertThat(e.getMessage(), containsString("aborted by user")); + assertThat(t.getOutput(), containsString("Continue and remove corrupted data from the shard ?")); + assertThat(t.getOutput(), containsString("Lucene index is marked corrupted, but no corruption detected")); + } + + logger.info("--> output:\n{}", t.getOutput()); + + // run command without dry-run + t.reset(); + t.addTextInput("y"); + t.addTextInput("y"); + command.execute(t, options, environment); + + final String output = t.getOutput(); + logger.info("--> output:\n{}", output); + + failOnShardFailures(); + final IndexShard newShard = newStartedShard(p -> reopenIndexShard(false), true); + + final Set shardDocUIDs = getShardDocUIDs(newShard); + assertEquals(numDocs, shardDocUIDs.size()); + + assertThat(t.getOutput(), containsString("This shard has been marked as corrupted but no corruption can now be detected.")); + + final Matcher matcher = NUM_CORRUPT_DOCS_PATTERN.matcher(output); + assertFalse(matcher.find()); + + closeShards(newShard); + } + private IndexShard reopenIndexShard(boolean corrupted) throws IOException { // open shard with the same location final ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), From 43848fc40e4cb3b3c965577ee4088c0f8707c0a2 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Fri, 24 May 2019 17:18:53 +0200 Subject: [PATCH 095/224] Wipe repositories more often (#42511) Fixes an issue where repositories are unintentionally shared among tests (given that the repo contents is captured in a static variable on the test class, to allow "sharing" among nodes) and two tests randomly chose the same snapshot name, leading to a conflict. Closes #42519 --- .../gcs/GoogleCloudStorageBlobStoreRepositoryTests.java | 6 +++--- .../repositories/s3/S3BlobStoreRepositoryTests.java | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index db166a228b576..0e3ecde69c4f0 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; -import org.junit.AfterClass; +import org.junit.After; import java.util.Collection; import java.util.Collections; @@ -67,8 +67,8 @@ protected void afterCreationCheck(Repository repository) { assertThat(repository, instanceOf(GoogleCloudStorageRepository.class)); } - @AfterClass - public static void wipeRepository() { + @After + public void wipeRepository() { blobs.clear(); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 61c0328e516b7..e94ea5ef6c9c3 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -38,7 +38,7 @@ import org.elasticsearch.rest.action.admin.cluster.RestGetRepositoriesAction; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.ThreadPool; -import org.junit.AfterClass; +import org.junit.After; import org.junit.BeforeClass; import java.util.Collection; @@ -78,8 +78,8 @@ public static void setUpRepositorySettings() { } } - @AfterClass - public static void wipeRepository() { + @After + public void wipeRepository() { blobs.clear(); } From cd324a1b3ae4e68eea3a98d764738be4c3c4eb56 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 24 May 2019 11:19:41 -0400 Subject: [PATCH 096/224] Add test to verify force primary allocation on closed indices (#42458) This change adds a test verifying that we can force primary allocation on closed indices. --- .../cluster/routing/PrimaryAllocationIT.java | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 00a2f5e34a791..0e6b24c45d169 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -23,9 +23,11 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequestBuilder; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -65,6 +67,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; @@ -231,7 +234,9 @@ public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { Set historyUUIDs = Arrays.stream(client().admin().indices().prepareStats("test").clear().get().getShards()) .map(shard -> shard.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY)).collect(Collectors.toSet()); createStaleReplicaScenario(master); - + if (randomBoolean()) { + assertAcked(client().admin().indices().prepareClose("test").setWaitForActiveShards(0)); + } boolean useStaleReplica = randomBoolean(); // if true, use stale replica, otherwise a completely empty copy logger.info("--> explicitly promote old primary shard"); final String idxName = "test"; @@ -281,15 +286,18 @@ public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get() .getState().routingTable().index(idxName).allPrimaryShardsActive())); } - assertHitCount(client().prepareSearch(idxName).setSize(0).setQuery(matchAllQuery()).get(), useStaleReplica ? 1L : 0L); - + ShardStats[] shardStats = client().admin().indices().prepareStats("test") + .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED).get().getShards(); + for (ShardStats shardStat : shardStats) { + assertThat(shardStat.getCommitStats().getNumDocs(), equalTo(useStaleReplica ? 1 : 0)); + } // allocation id of old primary was cleaned from the in-sync set final ClusterState state = client().admin().cluster().prepareState().get().getState(); assertEquals(Collections.singleton(state.routingTable().index(idxName).shard(0).primary.allocationId().getId()), state.metaData().index(idxName).inSyncAllocationIds(0)); - Set newHistoryUUIds = Arrays.stream(client().admin().indices().prepareStats("test").clear().get().getShards()) + Set newHistoryUUIds = Stream.of(shardStats) .map(shard -> shard.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY)).collect(Collectors.toSet()); assertThat(newHistoryUUIds, everyItem(not(isIn(historyUUIDs)))); assertThat(newHistoryUUIds, hasSize(1)); From dfc3b8e416f35b4d2ea1348f362614ff2ecc4b08 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Fri, 24 May 2019 09:00:38 -0700 Subject: [PATCH 097/224] [DOCS] Removes X-Pack setup (#42481) --- docs/reference/index.asciidoc | 2 -- docs/reference/redirects.asciidoc | 8 ++++++++ docs/reference/setup/setup-xes.asciidoc | 18 ------------------ 3 files changed, 8 insertions(+), 20 deletions(-) delete mode 100644 docs/reference/setup/setup-xes.asciidoc diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 5ed2a93e4bae4..64f5b57d57c17 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -14,8 +14,6 @@ include::getting-started.asciidoc[] include::setup.asciidoc[] -include::setup/setup-xes.asciidoc[] - include::monitoring/configuring-monitoring.asciidoc[] include::{xes-repo-dir}/security/configuring-es.asciidoc[] diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index b5f0e08a45232..9a8a0c20bb272 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -595,3 +595,11 @@ See <>. [role="exclude",id="_faster_prefix_queries_with_literal_index_prefixes_literal.html"] See <>. + +[role="exclude",id="setup-xpack"] +=== Set up {xpack} + +{xpack} is an Elastic Stack extension that provides security, alerting, +monitoring, reporting, machine learning, and many other capabilities. By default, +when you install {es}, {xpack} is installed. + diff --git a/docs/reference/setup/setup-xes.asciidoc b/docs/reference/setup/setup-xes.asciidoc deleted file mode 100644 index 55c1fe8bf42f6..0000000000000 --- a/docs/reference/setup/setup-xes.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -[role="xpack"] -[[setup-xpack]] -== Set up {xpack} - -{xpack} is an Elastic Stack extension that provides security, alerting, -monitoring, reporting, machine learning, and many other capabilities. By default, -when you install {es}, {xpack} is installed. - -If you want to try all of the {xpack} features, you can -{stack-ov}/license-management.html[start a 30-day trial]. At the end of the -trial period, you can purchase a subscription to keep using the full -functionality of the {xpack} components. For more information, see -https://www.elastic.co/subscriptions. - -* <> -* <> -* <> -* <> From ffa5461b7f6bbc5d6587fd181591b710bd53a543 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Fri, 24 May 2019 09:31:24 -0700 Subject: [PATCH 098/224] [DOCS] Removes X-Pack Java client configuration (#42480) --- docs/reference/index.asciidoc | 2 - docs/reference/redirects.asciidoc | 8 ++ docs/reference/setup/setup-xclient.asciidoc | 113 -------------------- 3 files changed, 8 insertions(+), 115 deletions(-) delete mode 100644 docs/reference/setup/setup-xclient.asciidoc diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 64f5b57d57c17..8e5fea810fb7b 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -18,8 +18,6 @@ include::monitoring/configuring-monitoring.asciidoc[] include::{xes-repo-dir}/security/configuring-es.asciidoc[] -include::setup/setup-xclient.asciidoc[] - include::setup/bootstrap-checks-xes.asciidoc[] :edit_url: diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 9a8a0c20bb272..6f68d781f4856 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -603,3 +603,11 @@ See <>. monitoring, reporting, machine learning, and many other capabilities. By default, when you install {es}, {xpack} is installed. +[role="exclude",id="setup-xpack-client"] +=== Configuring {xpack} Java Clients + +The `TransportClient` is deprecated in favour of the +{java-rest}/java-rest-high.html[Java High Level REST Client] and was removed in +Elasticsearch 8.0. The +{java-rest}/java-rest-high-level-migration.html[migration guide] describes all +the steps needed to migrate. \ No newline at end of file diff --git a/docs/reference/setup/setup-xclient.asciidoc b/docs/reference/setup/setup-xclient.asciidoc deleted file mode 100644 index a192aeb6ea39a..0000000000000 --- a/docs/reference/setup/setup-xclient.asciidoc +++ /dev/null @@ -1,113 +0,0 @@ -[role="xpack"] -[testenv="basic"] -[[setup-xpack-client]] -== Configuring {xpack} Java Clients - -deprecated[7.0.0, The `TransportClient` is deprecated in favour of the {java-rest}/java-rest-high.html[Java High Level REST Client] and will be removed in Elasticsearch 8.0. The {java-rest}/java-rest-high-level-migration.html[migration guide] describes all the steps needed to migrate.] - -If you want to use a Java {javaclient}/transport-client.html[transport client] with a -cluster where {xpack} is installed, then you must download and configure the -{xpack} transport client. - -. Add the {xpack} transport JAR file to your *CLASSPATH*. You can download the {xpack} -distribution and extract the JAR file manually or you can get it from the -https://artifacts.elastic.co/maven/org/elasticsearch/client/x-pack-transport/{version}/x-pack-transport-{version}.jar[Elasticsearch Maven repository]. -As with any dependency, you will also need its transitive dependencies. Refer to the -https://artifacts.elastic.co/maven/org/elasticsearch/client/x-pack-transport/{version}/x-pack-transport-{version}.pom[X-Pack POM file -for your version] when downloading for offline usage. - -. If you are using Maven, you need to add the {xpack} JAR file as a dependency in -your project's `pom.xml` file: -+ --- -[source,xml] --------------------------------------------------------------- - - - - - elasticsearch-releases - https://artifacts.elastic.co/maven - - true - - - false - - - ... - - ... - - - - - org.elasticsearch.client - x-pack-transport - {version} - - ... - - ... - - --------------------------------------------------------------- --- - -. If you are using Gradle, you need to add the {xpack} JAR file as a dependency in -your `build.gradle` file: -+ --- -[source,groovy] --------------------------------------------------------------- -repositories { - /* ... Any other repositories ... */ - - // Add the Elasticsearch Maven Repository - maven { - name "elastic" - url "https://artifacts.elastic.co/maven" - } -} - -dependencies { - compile "org.elasticsearch.client:x-pack-transport:{version}" - - /* ... */ -} --------------------------------------------------------------- --- - -. If you are using a repository manager such as https://www.sonatype.com/nexus-repository-oss[Nexus OSS] within your -company, you need to add the repository as per the following screenshot: -+ --- -image::security/images/nexus.png["Adding the Elastic repo in Nexus",link="images/nexus.png"] - -Then in your project's `pom.xml` if using maven, add the following repositories and dependencies definitions: - -[source,xml] --------------------------------------------------------------- - - - org.elasticsearch.client - x-pack-transport - {version} - - - - - - local-nexus - Elastic Local Nexus - http://0.0.0.0:8081/repository/elasticsearch/ - - true - - - false - - - --------------------------------------------------------------- --- From a92f3504749dafed90942722ae4e59cf55d1f527 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Fri, 24 May 2019 13:16:48 -0400 Subject: [PATCH 099/224] SQL: Moves the JTS-based tests suppression to Before (#42526) Moves the test suppression from `ClassRule` to `Before`, where it is properly handled in the CI build. Fixes #42221 --- .../xpack/sql/qa/geo/GeoSqlSpecTestCase.java | 4 +-- .../sql/qa/src/main/resources/geo/geo.csv | 30 +++++++++---------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java index ec97cab6f10b1..025b04d66ce95 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java @@ -31,8 +31,6 @@ public abstract class GeoSqlSpecTestCase extends SpecBaseIntegrationTestCase { @ClassRule public static LocalH2 H2 = new LocalH2((c) -> { - assumeTrue("JTS inside H2 is using default local for toUpperCase() in string comparison making it fail to parse WKT on certain" + - " locales", "point".toUpperCase(Locale.getDefault()).equals("POINT")); // Load GIS extensions H2GISFunctions.load(c); c.createStatement().execute("RUNSCRIPT FROM 'classpath:/ogc/sqltsch.sql'"); @@ -52,6 +50,8 @@ public static List readScriptSpec() throws Exception { public void setupTestGeoDataIfNeeded() throws Exception { assumeTrue("Cannot support locales that don't use Hindu-Arabic numerals and non-ascii - sign due to H2", "-42".equals(NumberFormat.getInstance(Locale.getDefault()).format(-42))); + assumeTrue("JTS inside H2 is using default local for toUpperCase() in string comparison making it fail to parse WKT on certain" + + " locales", "point".toUpperCase(Locale.getDefault()).equals("POINT")); if (client().performRequest(new Request("HEAD", "/ogc")).getStatusLine().getStatusCode() == 404) { GeoDataLoader.loadOGCDatasetIntoEs(client(), "ogc"); } diff --git a/x-pack/plugin/sql/qa/src/main/resources/geo/geo.csv b/x-pack/plugin/sql/qa/src/main/resources/geo/geo.csv index 8275bd7c884ef..d21ea71c5b949 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/geo/geo.csv +++ b/x-pack/plugin/sql/qa/src/main/resources/geo/geo.csv @@ -1,16 +1,16 @@ city,region,region_point,location,shape -Mountain View,Americas,POINT(-105.2551 54.5260),point (-122.083843 37.386483),point (-122.083843 37.386483) -Chicago,Americas,POINT(-105.2551 54.5260),point (-87.637874 41.888783),point (-87.637874 41.888783) -New York,Americas,POINT(-105.2551 54.5260),point (-73.990027 40.745171),point (-73.990027 40.745171) -San Francisco,Americas,POINT(-105.2551 54.5260),point (-122.394228 37.789541),point (-122.394228 37.789541) -Phoenix,Americas,POINT(-105.2551 54.5260),point (-111.973505 33.376242),point (-111.973505 33.376242) -Amsterdam,Europe,POINT(15.2551 54.5260),point (4.850312 52.347557),point (4.850312 52.347557) -Berlin,Europe,POINT(15.2551 54.5260),point (13.390889 52.486701),point (13.390889 52.486701) -Munich,Europe,POINT(15.2551 54.5260),point (11.537505 48.146321),point (11.537505 48.146321) -London,Europe,POINT(15.2551 54.5260),point (-0.121672 51.510871),point (-0.121672 51.510871) -Paris,Europe,POINT(15.2551 54.5260),point (2.351773 48.845538),point (2.351773 48.845538) -Singapore,Asia,POINT(100.6197 34.0479),point (103.855535 1.295868),point (103.855535 1.295868) -Hong Kong,Asia,POINT(100.6197 34.0479),point (114.183925 22.281397),point (114.183925 22.281397) -Seoul,Asia,POINT(100.6197 34.0479),point (127.060851 37.509132),point (127.060851 37.509132) -Tokyo,Asia,POINT(100.6197 34.0479),point (139.76402225 35.669616),point (139.76402225 35.669616) -Sydney,Asia,POINT(100.6197 34.0479),point (151.208629 -33.863385),point (151.208629 -33.863385) +Mountain View,Americas,POINT(-105.2551 54.5260),POINT (-122.083843 37.386483),POINT (-122.083843 37.386483) +Chicago,Americas,POINT(-105.2551 54.5260),POINT (-87.637874 41.888783),POINT (-87.637874 41.888783) +New York,Americas,POINT(-105.2551 54.5260),POINT (-73.990027 40.745171),POINT (-73.990027 40.745171) +San Francisco,Americas,POINT(-105.2551 54.5260),POINT (-122.394228 37.789541),POINT (-122.394228 37.789541) +Phoenix,Americas,POINT(-105.2551 54.5260),POINT (-111.973505 33.376242),POINT (-111.973505 33.376242) +Amsterdam,Europe,POINT(15.2551 54.5260),POINT (4.850312 52.347557),POINT (4.850312 52.347557) +Berlin,Europe,POINT(15.2551 54.5260),POINT (13.390889 52.486701),POINT (13.390889 52.486701) +Munich,Europe,POINT(15.2551 54.5260),POINT (11.537505 48.146321),POINT (11.537505 48.146321) +London,Europe,POINT(15.2551 54.5260),POINT (-0.121672 51.510871),POINT (-0.121672 51.510871) +Paris,Europe,POINT(15.2551 54.5260),POINT (2.351773 48.845538),POINT (2.351773 48.845538) +Singapore,Asia,POINT(100.6197 34.0479),POINT (103.855535 1.295868),POINT (103.855535 1.295868) +Hong Kong,Asia,POINT(100.6197 34.0479),POINT (114.183925 22.281397),POINT (114.183925 22.281397) +Seoul,Asia,POINT(100.6197 34.0479),POINT (127.060851 37.509132),POINT (127.060851 37.509132) +Tokyo,Asia,POINT(100.6197 34.0479),POINT (139.76402225 35.669616),POINT (139.76402225 35.669616) +Sydney,Asia,POINT(100.6197 34.0479),POINT (151.208629 -33.863385),POINT (151.208629 -33.863385) From 1b6dc178388c4a4163fb908419c20a580e23b46a Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 24 May 2019 10:30:06 -0700 Subject: [PATCH 100/224] Remove transport client from tests (#42457) This commit removes testing infrastructure for using the transport client. --- .../mustache/MultiSearchTemplateIT.java | 5 - .../join/query/ParentChildTestCase.java | 5 - .../index/rankeval/RankEvalRequestIT.java | 5 - .../documentation/ReindexDocumentationIT.java | 5 - .../index/reindex/ReindexTestCase.java | 5 - .../index/reindex/RetryTests.java | 7 - .../elasticsearch/ESNetty4IntegTestCase.java | 13 - .../netty4/Netty4HttpRequestSizeLimitIT.java | 2 +- .../rest/discovery/Zen2RestApiIT.java | 2 +- .../azure/classic/AzureSimpleTests.java | 1 - .../classic/AzureTwoStartedNodesTests.java | 1 - .../ec2/Ec2DiscoveryUpdateSettingsTests.java | 2 +- .../org/elasticsearch/NioIntegTestCase.java | 13 - .../http/ContextAndHeaderTransportIT.java | 347 -------------- .../elasticsearch/http/HttpSmokeTestCase.java | 13 - .../admin/cluster/node/tasks/TasksIT.java | 7 +- .../admin/indices/exists/IndicesExistsIT.java | 3 +- .../transport/FailAndRetryMockTransport.java | 235 ---------- .../client/transport/NodeDisconnectIT.java | 83 ---- .../TransportClientHeadersTests.java | 190 -------- .../client/transport/TransportClientIT.java | 108 ----- .../TransportClientNodesServiceTests.java | 441 ------------------ .../transport/TransportClientRetryIT.java | 85 ---- .../transport/TransportClientTests.java | 121 ----- .../elasticsearch/cluster/ClusterStateIT.java | 350 -------------- .../coordination/RareClusterStateIT.java | 2 +- .../ClusterDisruptionCleanSettingsIT.java | 2 +- .../discovery/ClusterDisruptionIT.java | 2 +- .../discovery/DiscoveryDisruptionIT.java | 2 +- .../discovery/MasterDisruptionIT.java | 2 +- .../discovery/SnapshotDisruptionIT.java | 2 +- .../discovery/StableMasterDisruptionIT.java | 2 +- .../single/SingleNodeDiscoveryIT.java | 4 +- .../query/plugin/CustomQueryParserIT.java | 5 - .../index/store/ExceptionRetryIT.java | 2 +- .../indices/settings/InternalSettingsIT.java | 5 - .../indices/settings/PrivateSettingsIT.java | 5 - .../PersistentTasksExecutorFullRestartIT.java | 5 - .../persistent/PersistentTasksExecutorIT.java | 5 - .../decider/EnableAssignmentDeciderIT.java | 5 - .../recovery/FullRollingRestartIT.java | 2 +- .../SignificantTermsSignificanceScoreIT.java | 5 - .../search/fetch/FetchSubPhasePluginIT.java | 5 - .../functionscore/FunctionScorePluginIT.java | 5 - .../search/scroll/SearchScrollIT.java | 6 +- .../DedicatedClusterSnapshotRestoreIT.java | 2 +- .../snapshots/SnapshotShardsServiceIT.java | 2 +- .../threadpool/SimpleThreadPoolIT.java | 3 +- .../ConcurrentSeqNoVersioningIT.java | 3 +- .../elasticsearch/test/ESIntegTestCase.java | 127 +---- .../test/InternalTestCluster.java | 110 +---- .../org/elasticsearch/test/TestCluster.java | 7 +- .../test/test/InternalTestClusterTests.java | 14 +- .../elasticsearch/xpack/CcrIntegTestCase.java | 4 +- .../xpack/ccr/CcrDisabledIT.java | 11 - .../AbstractLicensesIntegrationTestCase.java | 13 - .../license/LicenseServiceClusterTests.java | 7 +- .../license/StartBasicLicenseTests.java | 8 +- .../license/StartTrialLicenseTests.java | 8 +- .../snapshots/SourceOnlySnapshotIT.java | 2 +- .../IndexLifecycleInitialisationTests.java | 2 +- .../xpack/ml/support/BaseMlIntegTestCase.java | 19 +- .../monitoring/MultiNodesStatsTests.java | 2 +- .../AbstractIndicesCleanerTestCase.java | 2 +- .../exporter/http/HttpExporterIT.java | 2 +- .../exporter/http/HttpExporterSslIT.java | 2 +- .../local/LocalExporterIntegTests.java | 2 +- .../LocalExporterResourceIntegTests.java | 2 +- .../test/MonitoringIntegTestCase.java | 16 - .../DocumentLevelSecurityTests.java | 5 - .../integration/FieldLevelSecurityTests.java | 8 - .../ShrinkIndexWithSecurityTests.java | 2 +- .../test/SecurityIntegTestCase.java | 3 +- .../SecurityServerTransportServiceTests.java | 9 - .../xpack/security/TemplateUpgraderTests.java | 2 +- .../AuditTrailSettingsUpdateTests.java | 2 +- .../filter/IpFilteringIntegrationTests.java | 2 +- .../filter/IpFilteringUpdateTests.java | 2 +- .../transport/ssl/EllipticCurveSSLTests.java | 15 - .../xpack/ssl/SSLTrustRestrictionsTests.java | 2 +- .../sql/action/AbstractSqlIntegTestCase.java | 7 +- .../xpack/sql/action/SqlDisabledIT.java | 8 - .../AbstractWatcherIntegrationTestCase.java | 18 +- .../test/integration/SingleNodeTests.java | 2 +- 84 files changed, 78 insertions(+), 2501 deletions(-) delete mode 100644 qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java delete mode 100644 server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java delete mode 100644 server/src/test/java/org/elasticsearch/client/transport/NodeDisconnectIT.java delete mode 100644 server/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java delete mode 100644 server/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java delete mode 100644 server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java delete mode 100644 server/src/test/java/org/elasticsearch/client/transport/TransportClientRetryIT.java delete mode 100644 server/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java delete mode 100644 server/src/test/java/org/elasticsearch/cluster/ClusterStateIT.java diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java index 0463069609d4c..d7cdea7bd73f9 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java @@ -48,11 +48,6 @@ protected Collection> nodePlugins() { return Collections.singleton(MustachePlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - public void testBasic() throws Exception { createIndex("msearch"); final int numDocs = randomIntBetween(10, 100); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java index 87b16bc448ef1..40d46a88fe2a6 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java @@ -50,11 +50,6 @@ protected Collection> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, ParentJoinPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - @Override public Settings indexSettings() { Settings.Builder builder = Settings.builder().put(super.indexSettings()) diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java index 9fb10251f6325..a132ee5cb5938 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java @@ -51,11 +51,6 @@ public class RankEvalRequestIT extends ESIntegTestCase { private static final String INDEX_ALIAS = "alias0"; private static final int RELEVANT_RATING_1 = 1; - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(RankEvalPlugin.class); - } - @Override protected Collection> nodePlugins() { return Arrays.asList(RankEvalPlugin.class); diff --git a/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java b/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java index 8def6dbb40316..7667fbbcf89f8 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java +++ b/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java @@ -78,11 +78,6 @@ protected Collection> nodePlugins() { return Arrays.asList(ReindexPlugin.class, ReindexCancellationPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Collections.singletonList(ReindexPlugin.class); - } - @Before public void setup() { client().admin().indices().prepareCreate(INDEX_NAME).get(); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexTestCase.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexTestCase.java index 01b5539a23c48..2b53f2842f164 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexTestCase.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexTestCase.java @@ -41,11 +41,6 @@ protected Collection> nodePlugins() { return Arrays.asList(ReindexPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(ReindexPlugin.class); - } - protected ReindexRequestBuilder reindex() { return new ReindexRequestBuilder(client(), ReindexAction.INSTANCE); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java index 916c18e38f7f5..4a0813a6a7486 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java @@ -75,13 +75,6 @@ protected Collection> nodePlugins() { Netty4Plugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList( - ReindexPlugin.class, - Netty4Plugin.class); - } - /** * Lower the queue sizes to be small enough that both bulk and searches will time out and have to be retried. */ diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java b/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java index b38cda76c6980..9d8baf9e3f871 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java @@ -52,21 +52,8 @@ protected Settings nodeSettings(int nodeOrdinal) { return builder.build(); } - @Override - protected Settings transportClientSettings() { - Settings.Builder builder = Settings.builder().put(super.transportClientSettings()); - builder.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME); - return builder.build(); - } - @Override protected Collection> nodePlugins() { return Arrays.asList(Netty4Plugin.class); } - - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(Netty4Plugin.class); - } - } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java index 52732d5bc1df4..e45f6de92d5fe 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java @@ -45,7 +45,7 @@ * As the same setting is also used to limit in-flight requests on transport level, we avoid transport messages by forcing * a single node "cluster". We also force test infrastructure to use the node client instead of the transport client for the same reason. */ -@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numClientNodes = 0, numDataNodes = 1, transportClientRatio = 0) +@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numClientNodes = 0, numDataNodes = 1) public class Netty4HttpRequestSizeLimitIT extends ESNetty4IntegTestCase { private static final ByteSizeValue LIMIT = new ByteSizeValue(2, ByteSizeUnit.KB); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java index 83d4c3419ef64..fcb8e75700d0c 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java @@ -47,7 +47,7 @@ // These tests are here today so they have access to a proper REST client. They cannot be in :server:integTest since the REST client needs a // proper transport implementation, and they cannot be REST tests today since they need to restart nodes. When #35599 and friends land we // should be able to move these tests to run against a proper cluster instead. TODO do this. -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0, autoMinMasterNodes = false) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false) public class Zen2RestApiIT extends ESNetty4IntegTestCase { @Override diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java index 1a36e6c55bd0e..3c174de172e2a 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java @@ -29,7 +29,6 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, - transportClientRatio = 0.0, numClientNodes = 0) public class AzureSimpleTests extends AbstractAzureComputeServiceTestCase { diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java index c454569b7f260..79fced1801c15 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java @@ -27,7 +27,6 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, - transportClientRatio = 0.0, numClientNodes = 0) public class AzureTwoStartedNodesTests extends AbstractAzureComputeServiceTestCase { diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java index 9802479fe84d3..f9d576874c510 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java @@ -34,7 +34,7 @@ * starting. * This test requires AWS to run. */ -@ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0) +@ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0) public class Ec2DiscoveryUpdateSettingsTests extends AbstractAwsTestCase { public void testMinimumMasterNodesStart() { Settings nodeSettings = Settings.builder() diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java b/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java index 9ed64213cbf5f..6de96d17fe239 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java @@ -52,21 +52,8 @@ protected Settings nodeSettings(int nodeOrdinal) { return builder.build(); } - @Override - protected Settings transportClientSettings() { - Settings.Builder builder = Settings.builder().put(super.transportClientSettings()); - builder.put(NetworkModule.TRANSPORT_TYPE_KEY, NioTransportPlugin.NIO_TRANSPORT_NAME); - return builder.build(); - } - @Override protected Collection> nodePlugins() { return Collections.singletonList(NioTransportPlugin.class); } - - @Override - protected Collection> transportClientPlugins() { - return Collections.singletonList(NioTransportPlugin.class); - } - } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java deleted file mode 100644 index 47cce87c4b959..0000000000000 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java +++ /dev/null @@ -1,347 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.http; - -import org.apache.lucene.util.SetOnce; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.ActionFilter; -import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; -import org.elasticsearch.client.Response; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.GeoShapeQueryBuilder; -import org.elasticsearch.index.query.MoreLikeThisQueryBuilder; -import org.elasticsearch.index.query.MoreLikeThisQueryBuilder.Item; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.index.query.TermsQueryBuilder; -import org.elasticsearch.indices.TermsLookup; -import org.elasticsearch.plugins.ActionPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.watcher.ResourceWatcherService; -import org.junit.After; -import org.junit.Before; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; - -import static java.util.Collections.singletonList; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; - -@ClusterScope(scope = SUITE) -public class ContextAndHeaderTransportIT extends HttpSmokeTestCase { - private static final List requests = new CopyOnWriteArrayList<>(); - private static final String CUSTOM_HEADER = "SomeCustomHeader"; - private String randomHeaderValue = randomAlphaOfLength(20); - private String queryIndex = "query-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - private String lookupIndex = "lookup-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .build(); - } - - @Override - protected Collection> nodePlugins() { - ArrayList> plugins = new ArrayList<>(super.nodePlugins()); - plugins.add(ActionLoggingPlugin.class); - plugins.add(CustomHeadersPlugin.class); - return plugins; - } - - @Before - public void createIndices() throws Exception { - String mapping = Strings.toString(jsonBuilder().startObject().startObject("type") - .startObject("properties") - .startObject("location").field("type", "geo_shape").endObject() - .startObject("name").field("type", "text").endObject() - .endObject() - .endObject().endObject()); - - Settings settings = Settings.builder() - .put(indexSettings()) - .put(SETTING_NUMBER_OF_SHARDS, 1) // A single shard will help to keep the tests repeatable. - .build(); - assertAcked(transportClient().admin().indices().prepareCreate(lookupIndex) - .setSettings(settings).addMapping("type", mapping, XContentType.JSON)); - assertAcked(transportClient().admin().indices().prepareCreate(queryIndex) - .setSettings(settings).addMapping("type", mapping, XContentType.JSON)); - ensureGreen(queryIndex, lookupIndex); - requests.clear(); - } - - @After - public void checkAllRequestsContainHeaders() { - assertRequestsContainHeader(IndexRequest.class); - assertRequestsContainHeader(RefreshRequest.class); - } - - public void testThatTermsLookupGetRequestContainsContextAndHeaders() throws Exception { - transportClient().prepareIndex(lookupIndex, "type", "1") - .setSource(jsonBuilder().startObject().array("followers", "foo", "bar", "baz").endObject()).get(); - transportClient().prepareIndex(queryIndex, "type", "1") - .setSource(jsonBuilder().startObject().field("username", "foo").endObject()).get(); - transportClient().admin().indices().prepareRefresh(queryIndex, lookupIndex).get(); - - TermsLookup termsLookup = new TermsLookup(lookupIndex, "type", "1", "followers"); - TermsQueryBuilder termsLookupFilterBuilder = QueryBuilders.termsLookupQuery("username", termsLookup); - BoolQueryBuilder queryBuilder = QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).must(termsLookupFilterBuilder); - - SearchResponse searchResponse = transportClient() - .prepareSearch(queryIndex) - .setQuery(queryBuilder) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1); - - assertGetRequestsContainHeaders(); - } - - - - public void testThatGeoShapeQueryGetRequestContainsContextAndHeaders() throws Exception { - transportClient().prepareIndex(lookupIndex, "type", "1").setSource(jsonBuilder().startObject() - .field("name", "Munich Suburban Area") - .startObject("location") - .field("type", "polygon") - .startArray("coordinates").startArray() - .startArray().value(11.34).value(48.25).endArray() - .startArray().value(11.68).value(48.25).endArray() - .startArray().value(11.65).value(48.06).endArray() - .startArray().value(11.37).value(48.13).endArray() - .startArray().value(11.34).value(48.25).endArray() // close the polygon - .endArray().endArray() - .endObject() - .endObject()) - .get(); - // second document - transportClient().prepareIndex(queryIndex, "type", "1").setSource(jsonBuilder().startObject() - .field("name", "Munich Center") - .startObject("location") - .field("type", "point") - .startArray("coordinates").value(11.57).value(48.13).endArray() - .endObject() - .endObject()) - .get(); - transportClient().admin().indices().prepareRefresh(lookupIndex, queryIndex).get(); - - GeoShapeQueryBuilder queryBuilder = QueryBuilders.geoShapeQuery("location", "1") - .indexedShapeIndex(lookupIndex) - .indexedShapePath("location"); - - SearchResponse searchResponse = transportClient() - .prepareSearch(queryIndex) - .setQuery(queryBuilder) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1); - assertThat(requests, hasSize(greaterThan(0))); - - assertGetRequestsContainHeaders(); - } - - public void testThatMoreLikeThisQueryMultiTermVectorRequestContainsContextAndHeaders() throws Exception { - transportClient().prepareIndex(lookupIndex, "type", "1") - .setSource(jsonBuilder().startObject().field("name", "Star Wars - The new republic").endObject()) - .get(); - transportClient().prepareIndex(queryIndex, "type", "1") - .setSource(jsonBuilder().startObject().field("name", "Jar Jar Binks - A horrible mistake").endObject()) - .get(); - transportClient().prepareIndex(queryIndex, "type", "2") - .setSource(jsonBuilder().startObject().field("name", "Star Wars - Return of the jedi").endObject()) - .get(); - transportClient().admin().indices().prepareRefresh(lookupIndex, queryIndex).get(); - - MoreLikeThisQueryBuilder moreLikeThisQueryBuilder = QueryBuilders.moreLikeThisQuery(new String[]{"name"}, null, - new Item[]{new Item(lookupIndex, "type", "1")}) - .minTermFreq(1) - .minDocFreq(1); - - SearchResponse searchResponse = transportClient() - .prepareSearch(queryIndex) - .setQuery(moreLikeThisQueryBuilder) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1); - - assertRequestsContainHeader(MultiTermVectorsRequest.class); - } - - public void testThatRelevantHttpHeadersBecomeRequestHeaders() throws IOException { - final String IRRELEVANT_HEADER = "SomeIrrelevantHeader"; - Request request = new Request("GET", "/" + queryIndex + "/_search"); - RequestOptions.Builder options = request.getOptions().toBuilder(); - options.addHeader(CUSTOM_HEADER, randomHeaderValue); - options.addHeader(IRRELEVANT_HEADER, randomHeaderValue); - request.setOptions(options); - Response response = getRestClient().performRequest(request); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - List searchRequests = getRequests(SearchRequest.class); - assertThat(searchRequests, hasSize(greaterThan(0))); - for (RequestAndHeaders requestAndHeaders : searchRequests) { - assertThat(requestAndHeaders.headers.containsKey(CUSTOM_HEADER), is(true)); - // was not specified, thus is not included - assertThat(requestAndHeaders.headers.containsKey(IRRELEVANT_HEADER), is(false)); - } - } - - private List getRequests(Class clazz) { - List results = new ArrayList<>(); - for (RequestAndHeaders request : requests) { - if (request.request.getClass().equals(clazz)) { - results.add(request); - } - } - - return results; - } - - private void assertRequestsContainHeader(Class clazz) { - List classRequests = getRequests(clazz); - for (RequestAndHeaders request : classRequests) { - assertRequestContainsHeader(request.request, request.headers); - } - } - - private void assertGetRequestsContainHeaders() { - assertGetRequestsContainHeaders(this.lookupIndex); - } - - private void assertGetRequestsContainHeaders(String index) { - List getRequests = getRequests(GetRequest.class); - assertThat(getRequests, hasSize(greaterThan(0))); - - for (RequestAndHeaders request : getRequests) { - if (!((GetRequest)request.request).index().equals(index)) { - continue; - } - assertRequestContainsHeader(request.request, request.headers); - } - } - - private void assertRequestContainsHeader(ActionRequest request, Map context) { - String msg = String.format(Locale.ROOT, "Expected header %s to be in request %s", CUSTOM_HEADER, request.getClass().getName()); - if (request instanceof IndexRequest) { - IndexRequest indexRequest = (IndexRequest) request; - msg = String.format(Locale.ROOT, "Expected header %s to be in index request %s/%s/%s", CUSTOM_HEADER, - indexRequest.index(), indexRequest.type(), indexRequest.id()); - } - assertThat(msg, context.containsKey(CUSTOM_HEADER), is(true)); - assertThat(context.get(CUSTOM_HEADER).toString(), is(randomHeaderValue)); - } - - /** - * a transport client that adds our random header - */ - private Client transportClient() { - return internalCluster().transportClient().filterWithHeader(Collections.singletonMap(CUSTOM_HEADER, randomHeaderValue)); - } - - public static class ActionLoggingPlugin extends Plugin implements ActionPlugin { - - private final SetOnce loggingFilter = new SetOnce<>(); - - @Override - public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, - ResourceWatcherService resourceWatcherService, ScriptService scriptService, - NamedXContentRegistry xContentRegistry, Environment environment, - NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { - loggingFilter.set(new LoggingFilter(threadPool)); - return Collections.emptyList(); - } - - @Override - public List getActionFilters() { - return singletonList(loggingFilter.get()); - } - - } - - public static class LoggingFilter extends ActionFilter.Simple { - - private final ThreadPool threadPool; - - public LoggingFilter(ThreadPool pool) { - this.threadPool = pool; - } - - @Override - public int order() { - return 999; - } - - @Override - protected boolean apply(String action, ActionRequest request, ActionListener listener) { - requests.add(new RequestAndHeaders(threadPool.getThreadContext().getHeaders(), request)); - return true; - } - } - - private static class RequestAndHeaders { - final Map headers; - final ActionRequest request; - - private RequestAndHeaders(Map headers, ActionRequest request) { - this.headers = headers; - this.request = request; - } - } - - public static class CustomHeadersPlugin extends Plugin implements ActionPlugin { - public Collection getRestHeaders() { - return Collections.singleton(CUSTOM_HEADER); - } - } -} diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java index b40c6f3a1b26f..654cfb5e47129 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java @@ -82,19 +82,6 @@ protected Collection> nodePlugins() { return Arrays.asList(getTestTransportPlugin(), Netty4Plugin.class, NioTransportPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(getTestTransportPlugin(), Netty4Plugin.class, NioTransportPlugin.class); - } - - @Override - protected Settings transportClientSettings() { - return Settings.builder() - .put(super.transportClientSettings()) - .put(NetworkModule.TRANSPORT_TYPE_KEY, clientTypeKey) - .build(); - } - @Override protected boolean ignoreExternalCluster() { return true; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 179fd82cda020..fc758788e6197 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -105,7 +105,7 @@ *

    * We need at least 2 nodes so we have a master node a non-master node */ -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, minNumDataNodes = 2, transportClientRatio = 0.0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, minNumDataNodes = 2) public class TasksIT extends ESIntegTestCase { private Map, RecordingTaskManagerListener> listeners = new HashMap<>(); @@ -122,11 +122,6 @@ protected Collection> nodePlugins() { return Arrays.asList(MockTransportService.TestPlugin.class, TestTaskPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java index 7cfc2ea1f280d..51d3ecc89afc7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java @@ -30,8 +30,7 @@ import java.io.IOException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; -@ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0, - autoMinMasterNodes = false) +@ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, autoMinMasterNodes = false) public class IndicesExistsIT extends ESIntegTestCase { public void testIndexExistsWithBlocksInPlace() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java b/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java deleted file mode 100644 index 5149a0837e908..0000000000000 --- a/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.transport; - -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.node.liveness.LivenessResponse; -import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; -import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.component.Lifecycle; -import org.elasticsearch.common.component.LifecycleListener; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.Maps; -import org.elasticsearch.transport.CloseableConnection; -import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.ConnectionProfile; -import org.elasticsearch.transport.RequestHandlerRegistry; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportMessageListener; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.transport.TransportStats; - -import java.net.UnknownHostException; -import java.util.Collections; -import java.util.Map; -import java.util.Random; -import java.util.Set; -import java.util.concurrent.CopyOnWriteArraySet; -import java.util.concurrent.atomic.AtomicInteger; - -abstract class FailAndRetryMockTransport implements Transport { - - private final Random random; - private final ClusterName clusterName; - private volatile Map requestHandlers = Collections.emptyMap(); - private final Object requestHandlerMutex = new Object(); - private final ResponseHandlers responseHandlers = new ResponseHandlers(); - private TransportMessageListener listener; - - private boolean connectMode = true; - - private final AtomicInteger connectTransportExceptions = new AtomicInteger(); - private final AtomicInteger failures = new AtomicInteger(); - private final AtomicInteger successes = new AtomicInteger(); - private final Set triedNodes = new CopyOnWriteArraySet<>(); - - FailAndRetryMockTransport(Random random, ClusterName clusterName) { - this.random = new Random(random.nextLong()); - this.clusterName = clusterName; - } - - protected abstract ClusterState getMockClusterState(DiscoveryNode node); - - @Override - public Releasable openConnection(DiscoveryNode node, ConnectionProfile profile, ActionListener connectionListener) { - connectionListener.onResponse(new CloseableConnection() { - - @Override - public DiscoveryNode getNode() { - return node; - } - - @Override - public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws TransportException { - //we make sure that nodes get added to the connected ones when calling addTransportAddress, by returning proper nodes info - if (connectMode) { - if (TransportLivenessAction.NAME.equals(action)) { - TransportResponseHandler transportResponseHandler = responseHandlers.onResponseReceived(requestId, listener); - ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY); - transportResponseHandler.handleResponse(new LivenessResponse(clusterName, node)); - } else if (ClusterStateAction.NAME.equals(action)) { - TransportResponseHandler transportResponseHandler = responseHandlers.onResponseReceived(requestId, listener); - ClusterState clusterState = getMockClusterState(node); - transportResponseHandler.handleResponse(new ClusterStateResponse(clusterName, clusterState, false)); - } else if (TransportService.HANDSHAKE_ACTION_NAME.equals(action)) { - TransportResponseHandler transportResponseHandler = responseHandlers.onResponseReceived(requestId, listener); - Version version = node.getVersion(); - transportResponseHandler.handleResponse(new TransportService.HandshakeResponse(node, clusterName, version)); - - } else { - throw new UnsupportedOperationException("Mock transport does not understand action " + action); - } - return; - } - - //once nodes are connected we'll just return errors for each sendRequest call - triedNodes.add(node); - - if (random.nextInt(100) > 10) { - connectTransportExceptions.incrementAndGet(); - throw new ConnectTransportException(node, "node not available"); - } else { - if (random.nextBoolean()) { - failures.incrementAndGet(); - //throw whatever exception that is not a subclass of ConnectTransportException - throw new IllegalStateException(); - } else { - TransportResponseHandler transportResponseHandler = responseHandlers.onResponseReceived(requestId, listener); - if (random.nextBoolean()) { - successes.incrementAndGet(); - transportResponseHandler.handleResponse(newResponse()); - } else { - failures.incrementAndGet(); - transportResponseHandler.handleException(new TransportException("transport exception")); - } - } - } - } - }); - - return () -> {}; - } - - protected abstract Response newResponse(); - - public void endConnectMode() { - this.connectMode = false; - } - - public int connectTransportExceptions() { - return connectTransportExceptions.get(); - } - - public int failures() { - return failures.get(); - } - - public int successes() { - return successes.get(); - } - - public Set triedNodes() { - return triedNodes; - } - - - @Override - public BoundTransportAddress boundAddress() { - return null; - } - - @Override - public TransportAddress[] addressesFromString(String address) throws UnknownHostException { - throw new UnsupportedOperationException(); - } - - @Override - public Lifecycle.State lifecycleState() { - return null; - } - - @Override - public void addLifecycleListener(LifecycleListener listener) { - throw new UnsupportedOperationException(); - } - - @Override - public void removeLifecycleListener(LifecycleListener listener) { - throw new UnsupportedOperationException(); - } - - @Override - public void start() {} - - @Override - public void stop() {} - - @Override - public void close() {} - - @Override - public Map profileBoundAddresses() { - return Collections.emptyMap(); - } - - @Override - public TransportStats getStats() { - throw new UnsupportedOperationException(); - } - - @Override - public void registerRequestHandler(RequestHandlerRegistry reg) { - synchronized (requestHandlerMutex) { - if (requestHandlers.containsKey(reg.getAction())) { - throw new IllegalArgumentException("transport handlers for action " + reg.getAction() + " is already registered"); - } - requestHandlers = Maps.copyMapWithAddedEntry(requestHandlers, reg.getAction(), reg); - } - } - @Override - public ResponseHandlers getResponseHandlers() { - return responseHandlers; - } - - @Override - public RequestHandlerRegistry getRequestHandler(String action) { - return requestHandlers.get(action); - } - - - @Override - public void setMessageListener(TransportMessageListener listener) { - this.listener = listener; - } -} diff --git a/server/src/test/java/org/elasticsearch/client/transport/NodeDisconnectIT.java b/server/src/test/java/org/elasticsearch/client/transport/NodeDisconnectIT.java deleted file mode 100644 index 6fa1848cc84b8..0000000000000 --- a/server/src/test/java/org/elasticsearch/client/transport/NodeDisconnectIT.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.client.transport; - -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.transport.MockTransportClient; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; -import java.util.stream.Collectors; - -import static org.elasticsearch.client.transport.TransportClient.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL; - -@ClusterScope(scope = Scope.TEST) -public class NodeDisconnectIT extends ESIntegTestCase { - - public void testNotifyOnDisconnect() throws IOException { - internalCluster().ensureAtLeastNumDataNodes(2); - - final Set disconnectedNodes = Collections.synchronizedSet(new HashSet<>()); - try (TransportClient client = new MockTransportClient(Settings.builder() - .put("cluster.name", internalCluster().getClusterName()) - .put(CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL.getKey(), "1h") // disable sniffing for better control - .build(), - Collections.emptySet(), (n, e) -> disconnectedNodes.add(n))) { - for (TransportService service : internalCluster().getInstances(TransportService.class)) { - client.addTransportAddress(service.boundAddress().publishAddress()); - } - internalCluster().stopRandomDataNode(); - for (int i = 0; i < 20; i++) { // fire up requests such that we hit the node and pass it to the listener - client.admin().cluster().prepareState().get(); - } - assertEquals(1, disconnectedNodes.size()); - } - assertEquals(1, disconnectedNodes.size()); - } - - public void testNotifyOnDisconnectInSniffer() throws IOException { - internalCluster().ensureAtLeastNumDataNodes(2); - - final Set disconnectedNodes = Collections.synchronizedSet(new HashSet<>()); - try (TransportClient client = new MockTransportClient(Settings.builder() - .put("cluster.name", internalCluster().getClusterName()).build(), Collections.emptySet(), (n, e) -> disconnectedNodes.add(n))) { - int numNodes = 0; - for (TransportService service : internalCluster().getInstances(TransportService.class)) { - numNodes++; - client.addTransportAddress(service.boundAddress().publishAddress()); - } - Set discoveryNodes = client.connectedNodes().stream().map(n -> n.getAddress()).collect(Collectors.toSet()); - assertEquals(numNodes, discoveryNodes.size()); - assertEquals(0, disconnectedNodes.size()); - internalCluster().stopRandomDataNode(); - client.getNodesService().doSample(); - assertEquals(1, disconnectedNodes.size()); - assertTrue(discoveryNodes.contains(disconnectedNodes.stream().findAny().get().getAddress())); - } - assertEquals(1, disconnectedNodes.size()); - } -} diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java deleted file mode 100644 index e63f3a1d59a29..0000000000000 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.transport; - -import org.elasticsearch.Version; -import org.elasticsearch.action.Action; -import org.elasticsearch.action.admin.cluster.node.liveness.LivenessResponse; -import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; -import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.client.AbstractClientHeadersTestCase; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.env.Environment; -import org.elasticsearch.plugins.NetworkPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.MockTransportClient; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportInterceptor; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; - -import java.util.Collections; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; - -public class TransportClientHeadersTests extends AbstractClientHeadersTestCase { - - private MockTransportService transportService; - - @Override - public void tearDown() throws Exception { - try { - // stop this first before we bubble up since - // transportService uses the threadpool that super.tearDown will close - transportService.stop(); - transportService.close(); - } finally { - super.tearDown(); - } - - } - - @Override - protected Client buildClient(Settings headersSettings, Action[] testedActions) { - transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null); - transportService.start(); - transportService.acceptIncomingRequests(); - String transport = getTestTransportType(); - TransportClient client = new MockTransportClient(Settings.builder() - .put("client.transport.sniff", false) - .put("cluster.name", "cluster1") - .put("node.name", "transport_client_" + this.getTestName()) - .put(NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), transport) - .put(headersSettings) - .build(), InternalTransportServiceInterceptor.TestPlugin.class); - InternalTransportServiceInterceptor.TestPlugin plugin = client.injector.getInstance(PluginsService.class) - .filterPlugins(InternalTransportServiceInterceptor.TestPlugin.class).stream().findFirst().get(); - plugin.instance.threadPool = client.threadPool(); - plugin.instance.address = transportService.boundAddress().publishAddress(); - client.addTransportAddress(transportService.boundAddress().publishAddress()); - return client; - } - - public void testWithSniffing() throws Exception { - String transport = getTestTransportType(); - try (TransportClient client = new MockTransportClient( - Settings.builder() - .put("client.transport.sniff", true) - .put("cluster.name", "cluster1") - .put("node.name", "transport_client_" + this.getTestName() + "_1") - .put("client.transport.nodes_sampler_interval", "1s") - .put(NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), transport) - .put(HEADER_SETTINGS) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(), - InternalTransportServiceInterceptor.TestPlugin.class)) { - InternalTransportServiceInterceptor.TestPlugin plugin = client.injector.getInstance(PluginsService.class) - .filterPlugins(InternalTransportServiceInterceptor.TestPlugin.class).stream().findFirst().get(); - plugin.instance.threadPool = client.threadPool(); - plugin.instance.address = transportService.boundAddress().publishAddress(); - client.addTransportAddress(transportService.boundAddress().publishAddress()); - - if (!plugin.instance.clusterStateLatch.await(5, TimeUnit.SECONDS)) { - fail("takes way too long to get the cluster state"); - } - - assertEquals(1, client.connectedNodes().size()); - assertEquals(client.connectedNodes().get(0).getAddress(), transportService.boundAddress().publishAddress()); - } - } - - public static class InternalTransportServiceInterceptor implements TransportInterceptor { - - ThreadPool threadPool; - TransportAddress address; - - - public static class TestPlugin extends Plugin implements NetworkPlugin { - private InternalTransportServiceInterceptor instance = new InternalTransportServiceInterceptor(); - - @Override - public List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry, - ThreadContext threadContext) { - return Collections.singletonList(new TransportInterceptor() { - @Override - public TransportRequestHandler interceptHandler(String action, String executor, - boolean forceExecution, - TransportRequestHandler actualHandler) { - return instance.interceptHandler(action, executor, forceExecution, actualHandler); - } - - @Override - public AsyncSender interceptSender(AsyncSender sender) { - return instance.interceptSender(sender); - } - }); - } - } - - final CountDownLatch clusterStateLatch = new CountDownLatch(1); - - @Override - public AsyncSender interceptSender(AsyncSender sender) { - return new AsyncSender() { - @Override - public void sendRequest(Transport.Connection connection, String action, - TransportRequest request, - TransportRequestOptions options, - TransportResponseHandler handler) { - final ClusterName clusterName = new ClusterName("cluster1"); - if (TransportLivenessAction.NAME.equals(action)) { - assertHeaders(threadPool); - ((TransportResponseHandler) handler).handleResponse( - new LivenessResponse(clusterName, connection.getNode())); - } else if (ClusterStateAction.NAME.equals(action)) { - assertHeaders(threadPool); - ClusterName cluster1 = clusterName; - ClusterState.Builder builder = ClusterState.builder(cluster1); - //the sniffer detects only data nodes - builder.nodes(DiscoveryNodes.builder().add(new DiscoveryNode("node_id", "someId", "some_ephemeralId_id", - address.address().getHostString(), address.getAddress(), address, Collections.emptyMap(), - Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT))); - ((TransportResponseHandler) handler) - .handleResponse(new ClusterStateResponse(cluster1, builder.build(), false)); - clusterStateLatch.countDown(); - } else if (TransportService.HANDSHAKE_ACTION_NAME .equals(action)) { - ((TransportResponseHandler) handler).handleResponse( - new TransportService.HandshakeResponse(connection.getNode(), clusterName, connection.getNode().getVersion())); - } else { - handler.handleException(new TransportException("", new InternalException(action))); - } - } - }; - } - } -} diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java deleted file mode 100644 index dab44b37a3ee9..0000000000000 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.transport; - -import org.elasticsearch.Version; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.env.Environment; -import org.elasticsearch.node.MockNode; -import org.elasticsearch.node.Node; -import org.elasticsearch.node.NodeValidationException; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.MockHttpTransport; -import org.elasticsearch.transport.MockTransportClient; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.util.Arrays; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.startsWith; - -@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 1.0) -public class TransportClientIT extends ESIntegTestCase { - - public void testPickingUpChangesInDiscoveryNode() { - String nodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false)); - - TransportClient client = (TransportClient) internalCluster().client(nodeName); - assertThat(client.connectedNodes().get(0).isDataNode(), equalTo(false)); - - } - - public void testNodeVersionIsUpdated() throws IOException, NodeValidationException { - TransportClient client = (TransportClient) internalCluster().client(); - try (Node node = new MockNode(Settings.builder() - .put(internalCluster().getDefaultSettings()) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put("node.name", "testNodeVersionIsUpdated") - .put("transport.type", getTestTransportType()) - .put(Node.NODE_DATA_SETTING.getKey(), false) - .put("cluster.name", "foobar") - .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), "testNodeVersionIsUpdated") - .build(), Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class)).start()) { - TransportAddress transportAddress = node.injector().getInstance(TransportService.class).boundAddress().publishAddress(); - client.addTransportAddress(transportAddress); - // since we force transport clients there has to be one node started that we connect to. - assertThat(client.connectedNodes().size(), greaterThanOrEqualTo(1)); - // connected nodes have updated version - for (DiscoveryNode discoveryNode : client.connectedNodes()) { - assertThat(discoveryNode.getVersion(), equalTo(Version.CURRENT)); - } - - for (DiscoveryNode discoveryNode : client.listedNodes()) { - assertThat(discoveryNode.getId(), startsWith("#transport#-")); - assertThat(discoveryNode.getVersion(), equalTo(Version.CURRENT.minimumCompatibilityVersion())); - } - - assertThat(client.filteredNodes().size(), equalTo(1)); - for (DiscoveryNode discoveryNode : client.filteredNodes()) { - assertThat(discoveryNode.getVersion(), equalTo(Version.CURRENT.minimumCompatibilityVersion())); - } - } - } - - public void testThatTransportClientSettingIsSet() { - TransportClient client = (TransportClient) internalCluster().client(); - Settings settings = client.injector.getInstance(Settings.class); - assertThat(Client.CLIENT_TYPE_SETTING_S.get(settings), is("transport")); - } - - public void testThatTransportClientSettingCannotBeChanged() { - String transport = getTestTransportType(); - Settings baseSettings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put(NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), transport) - .build(); - try (TransportClient client = new MockTransportClient(baseSettings)) { - Settings settings = client.injector.getInstance(Settings.class); - assertThat(Client.CLIENT_TYPE_SETTING_S.get(settings), is("transport")); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java deleted file mode 100644 index 9e13dbaa89b18..0000000000000 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java +++ /dev/null @@ -1,441 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.transport; - -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.node.liveness.LivenessResponse; -import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; -import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.node.Node; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportInterceptor; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseHandler; -import org.hamcrest.CustomMatcher; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; - -import static org.elasticsearch.test.transport.MockTransportService.createNewService; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.everyItem; -import static org.hamcrest.CoreMatchers.hasItem; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.CoreMatchers.startsWith; -import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.hamcrest.Matchers.notNullValue; - -public class TransportClientNodesServiceTests extends ESTestCase { - - private static class TestIteration implements Closeable { - private final ThreadPool threadPool; - private final FailAndRetryMockTransport transport; - private final MockTransportService transportService; - private final TransportClientNodesService transportClientNodesService; - private final int listNodesCount; - private final int sniffNodesCount; - private TransportAddress livenessAddress = buildNewFakeTransportAddress(); - final List listNodeAddresses; - // map for each address of the nodes a cluster state request should respond with - final Map nodeMap; - - TestIteration() { - this(Settings.EMPTY); - } - - TestIteration(Settings extraSettings) { - Settings settings = Settings.builder().put(extraSettings).put("cluster.name", "test").build(); - ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); - List listNodes = new ArrayList<>(); - Map nodeMap = new HashMap<>(); - this.listNodesCount = randomIntBetween(1, 10); - int sniffNodesCount = 0; - for (int i = 0; i < listNodesCount; i++) { - TransportAddress transportAddress = buildNewFakeTransportAddress(); - listNodes.add(transportAddress); - DiscoveryNodes.Builder discoNodes = DiscoveryNodes.builder(); - discoNodes.add(new DiscoveryNode("#list-node#-" + transportAddress, transportAddress, Version.CURRENT)); - - if (TransportClient.CLIENT_TRANSPORT_SNIFF.get(settings)) { - final int numSniffNodes = randomIntBetween(0, 3); - for (int j = 0; j < numSniffNodes; ++j) { - TransportAddress sniffAddress = buildNewFakeTransportAddress(); - DiscoveryNode sniffNode = new DiscoveryNode("#sniff-node#-" + sniffAddress, sniffAddress, Version.CURRENT); - discoNodes.add(sniffNode); - // also allow sniffing of the sniff node itself - nodeMap.put(sniffAddress, DiscoveryNodes.builder().add(sniffNode).build()); - ++sniffNodesCount; - } - } - nodeMap.put(transportAddress, discoNodes.build()); - } - listNodeAddresses = listNodes; - this.nodeMap = nodeMap; - this.sniffNodesCount = sniffNodesCount; - - threadPool = new TestThreadPool("transport-client-nodes-service-tests"); - transport = new FailAndRetryMockTransport(random(), clusterName) { - @Override - public List getDefaultSeedAddresses() { - return Collections.emptyList(); - } - - @Override - protected TestResponse newResponse() { - return new TestResponse(); - } - - @Override - protected ClusterState getMockClusterState(DiscoveryNode node) { - return ClusterState.builder(clusterName).nodes(TestIteration.this.nodeMap.get(node.getAddress())).build(); - } - }; - - transportService = new MockTransportService(settings, transport, threadPool, new TransportInterceptor() { - @Override - public AsyncSender interceptSender(AsyncSender sender) { - return new AsyncSender() { - @Override - public void sendRequest(Transport.Connection connection, String action, - TransportRequest request, - TransportRequestOptions options, - TransportResponseHandler handler) { - if (TransportLivenessAction.NAME.equals(action)) { - sender.sendRequest(connection, action, request, options, wrapLivenessResponseHandler(handler, - connection.getNode(), clusterName)); - } else { - sender.sendRequest(connection, action, request, options, handler); - } - } - }; - } - }, (addr) -> { - assert addr == null : "boundAddress: " + addr; - return DiscoveryNode.createLocal(settings, buildNewFakeTransportAddress(), UUIDs.randomBase64UUID()); - }, null, Collections.emptySet()); - transportService.addNodeConnectedBehavior((connectionManager, discoveryNode) -> false); - transportService.addGetConnectionBehavior((connectionManager, discoveryNode) -> { - // The FailAndRetryTransport does not use the connection profile - PlainActionFuture future = PlainActionFuture.newFuture(); - transport.openConnection(discoveryNode, null, future); - return future.actionGet(); - }); - transportService.start(); - transportService.acceptIncomingRequests(); - transportClientNodesService = - new TransportClientNodesService(settings, transportService, threadPool, (a, b) -> {}); - transportClientNodesService.addTransportAddresses(listNodeAddresses.toArray(new TransportAddress[0])); - } - - private TransportResponseHandler wrapLivenessResponseHandler(TransportResponseHandler handler, - DiscoveryNode node, - ClusterName clusterName) { - return new TransportResponseHandler() { - @Override - public T read(StreamInput in) throws IOException { - return handler.read(in); - } - - @Override - @SuppressWarnings("unchecked") - public void handleResponse(T response) { - LivenessResponse livenessResponse = new LivenessResponse(clusterName, - new DiscoveryNode(node.getName(), node.getId(), node.getEphemeralId(), "liveness-hostname" + node.getId(), - "liveness-hostaddress" + node.getId(), - livenessAddress, node.getAttributes(), node.getRoles(), - node.getVersion())); - handler.handleResponse((T)livenessResponse); - } - - @Override - public void handleException(TransportException exp) { - handler.handleException(exp); - } - - @Override - public String executor() { - return handler.executor(); - } - }; - } - - @Override - public void close() { - transport.endConnectMode(); - transportService.stop(); - transportClientNodesService.close(); - terminate(threadPool); - } - } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37567") - public void testListenerFailures() throws InterruptedException { - int iters = iterations(10, 100); - for (int i = 0; i finalFailure = new AtomicReference<>(); - final AtomicReference response = new AtomicReference<>(); - ActionListener actionListener = new ActionListener() { - @Override - public void onResponse(TestResponse testResponse) { - response.set(testResponse); - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - finalFailures.incrementAndGet(); - finalFailure.set(e); - latch.countDown(); - } - }; - - final AtomicInteger preSendFailures = new AtomicInteger(); - - iteration.transportClientNodesService.execute((node, retryListener) -> { - if (rarely()) { - preSendFailures.incrementAndGet(); - //throw whatever exception that is not a subclass of ConnectTransportException - throw new IllegalArgumentException(); - } - - iteration.transportService.sendRequest(node, "action", new TestRequest(), - TransportRequestOptions.EMPTY, new TransportResponseHandler() { - @Override - public TestResponse read(StreamInput in) { - return new TestResponse(in); - } - - @Override - public void handleResponse(TestResponse response1) { - retryListener.onResponse(response1); - } - - @Override - public void handleException(TransportException exp) { - retryListener.onFailure(exp); - } - - @Override - public String executor() { - return randomBoolean() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC; - } - }); - }, actionListener); - - latch.await(); - - //there can be only either one failure that causes the request to fail straightaway or success - assertThat(preSendFailures.get() + iteration.transport.failures() + iteration.transport.successes(), lessThanOrEqualTo(1)); - - if (iteration.transport.successes() == 1) { - assertThat(finalFailures.get(), equalTo(0)); - assertThat(finalFailure.get(), nullValue()); - assertThat(response.get(), notNullValue()); - } else { - assertThat(finalFailures.get(), equalTo(1)); - assertThat(finalFailure.get(), notNullValue()); - assertThat(response.get(), nullValue()); - if (preSendFailures.get() == 0 && iteration.transport.failures() == 0) { - assertThat(finalFailure.get(), instanceOf(NoNodeAvailableException.class)); - } - } - - assertThat(iteration.transport.triedNodes().size(), lessThanOrEqualTo(iteration.listNodesCount)); - assertThat(iteration.transport.triedNodes().size(), equalTo(iteration.transport.connectTransportExceptions() + - iteration.transport.failures() + iteration.transport.successes())); - } - } - } - - public void testConnectedNodes() { - int iters = iterations(10, 100); - for (int i = 0; i ("removed address") { - @Override - public boolean matches(Object item) { - return item instanceof DiscoveryNode && ((DiscoveryNode)item).getAddress().equals(addressToRemove); - } - }))); - assertEquals(iteration.listNodesCount + iteration.sniffNodesCount - 1, service.connectedNodes().size()); - } - } - - public void testSniffNodesSamplerClosesConnections() throws Exception { - final TestThreadPool threadPool = new TestThreadPool("testSniffNodesSamplerClosesConnections"); - - Settings remoteSettings = Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "remote").build(); - try (MockTransportService remoteService = createNewService(remoteSettings, Version.CURRENT, threadPool, null)) { - final MockHandler handler = new MockHandler(remoteService); - remoteService.registerRequestHandler(ClusterStateAction.NAME, ThreadPool.Names.SAME, ClusterStateRequest::new, handler); - remoteService.start(); - remoteService.acceptIncomingRequests(); - - Settings clientSettings = Settings.builder() - .put(TransportClient.CLIENT_TRANSPORT_SNIFF.getKey(), true) - .put(TransportClient.CLIENT_TRANSPORT_PING_TIMEOUT.getKey(), TimeValue.timeValueSeconds(1)) - .put(TransportClient.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL.getKey(), TimeValue.timeValueSeconds(30)) - .build(); - - try (MockTransportService clientService = createNewService(clientSettings, Version.CURRENT, threadPool, null)) { - final List establishedConnections = new CopyOnWriteArrayList<>(); - - clientService.addConnectBehavior(remoteService, (transport, discoveryNode, profile, listener) -> - transport.openConnection(discoveryNode, profile, - ActionListener.delegateFailure(listener, (delegatedListener, connection) -> { - establishedConnections.add(connection); - delegatedListener.onResponse(connection); - }))); - - clientService.start(); - clientService.acceptIncomingRequests(); - - try (TransportClientNodesService transportClientNodesService = - new TransportClientNodesService(clientSettings, clientService, threadPool, (a, b) -> {})) { - assertEquals(0, transportClientNodesService.connectedNodes().size()); - assertEquals(0, establishedConnections.size()); - - transportClientNodesService.addTransportAddresses(remoteService.getLocalDiscoNode().getAddress()); - assertEquals(1, transportClientNodesService.connectedNodes().size()); - assertEquals(1, clientService.connectionManager().size()); - - transportClientNodesService.doSample(); - assertEquals(1, clientService.connectionManager().size()); - - establishedConnections.clear(); - handler.failToRespond(); - Thread thread = new Thread(transportClientNodesService::doSample); - thread.start(); - - assertBusy(() -> assertTrue(establishedConnections.size() >= 1)); - assertFalse("Temporary ping connection must be opened", establishedConnections.get(0).isClosed()); - - thread.join(); - - assertTrue(establishedConnections.get(0).isClosed()); - } - } - } finally { - terminate(threadPool); - } - } - - class MockHandler implements TransportRequestHandler { - - private final AtomicBoolean failToRespond = new AtomicBoolean(false); - private final MockTransportService transportService; - - MockHandler(MockTransportService transportService) { - this.transportService = transportService; - } - - @Override - public void messageReceived(ClusterStateRequest request, TransportChannel channel, Task task) throws Exception { - if (failToRespond.get()) { - return; - } - - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(transportService.getLocalDiscoNode()).build(); - ClusterState build = ClusterState.builder(ClusterName.DEFAULT).nodes(discoveryNodes).build(); - channel.sendResponse(new ClusterStateResponse(ClusterName.DEFAULT, build, false)); - } - - void failToRespond() { - if (failToRespond.compareAndSet(false, true) == false) { - throw new AssertionError("Request handler is already marked as failToRespond"); - } - } - } - - public static class TestRequest extends TransportRequest { - - } - - private static class TestResponse extends TransportResponse { - - private TestResponse() {} - private TestResponse(StreamInput in) {} - } -} diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientRetryIT.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientRetryIT.java deleted file mode 100644 index 8444b3bd1374f..0000000000000 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientRetryIT.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.transport; - -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.Requests; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.env.Environment; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.transport.MockTransportClient; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.util.concurrent.ExecutionException; - -import static org.hamcrest.Matchers.greaterThanOrEqualTo; - -@ClusterScope(scope = Scope.TEST, numClientNodes = 0, supportsDedicatedMasters = false) -public class TransportClientRetryIT extends ESIntegTestCase { - public void testRetry() throws IOException, ExecutionException, InterruptedException { - Iterable instances = internalCluster().getInstances(TransportService.class); - TransportAddress[] addresses = new TransportAddress[internalCluster().size()]; - int i = 0; - for (TransportService instance : instances) { - addresses[i++] = instance.boundAddress().publishAddress(); - } - - String transport = getTestTransportType(); - - Settings.Builder builder = Settings.builder().put("client.transport.nodes_sampler_interval", "1s") - .put("node.name", "transport_client_retry_test") - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), internalCluster().getClusterName()) - .put(NetworkModule.TRANSPORT_TYPE_SETTING.getKey(),transport) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()); - - try (TransportClient client = new MockTransportClient(builder.build())) { - client.addTransportAddresses(addresses); - assertEquals(client.connectedNodes().size(), internalCluster().size()); - - int size = cluster().size(); - //kill all nodes one by one, leaving a single master/data node at the end of the loop - for (int j = 1; j < size; j++) { - internalCluster().stopRandomNode(input -> true); - - ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest().local(true); - ClusterState clusterState; - //use both variants of execute method: with and without listener - if (randomBoolean()) { - clusterState = client.admin().cluster().state(clusterStateRequest).get().getState(); - } else { - PlainActionFuture future = PlainActionFuture.newFuture(); - client.admin().cluster().state(clusterStateRequest, future); - clusterState = future.get().getState(); - } - assertThat(clusterState.nodes().getSize(), greaterThanOrEqualTo(size - j)); - assertThat(client.connectedNodes().size(), greaterThanOrEqualTo(size - j)); - } - } - } -} diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java deleted file mode 100644 index 03ac1ebc3b67b..0000000000000 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.transport; - -import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; -import org.elasticsearch.common.io.stream.NamedWriteable; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.env.Environment; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.transport.MockTransportClient; -import org.elasticsearch.transport.TransportSettings; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.ExecutionException; - -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.hasItem; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.object.HasToString.hasToString; - -public class TransportClientTests extends ESTestCase { - - public void testThatUsingAClosedClientThrowsAnException() throws ExecutionException, InterruptedException { - final TransportClient client = new MockTransportClient(Settings.EMPTY); - client.close(); - final IllegalStateException e = - expectThrows(IllegalStateException.class, () -> client.admin().cluster().health(new ClusterHealthRequest()).get()); - assertThat(e, hasToString(containsString("transport client is closed"))); - } - - /** - * test that when plugins are provided that want to register - * {@link NamedWriteable}, those are also made known to the - * {@link NamedWriteableRegistry} of the transport client - */ - public void testPluginNamedWriteablesRegistered() { - Settings baseSettings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .build(); - try (TransportClient client = new MockTransportClient(baseSettings, Arrays.asList(MockPlugin.class))) { - assertNotNull(client.namedWriteableRegistry.getReader(MockPlugin.MockNamedWriteable.class, MockPlugin.MockNamedWriteable.NAME)); - } - } - - public void testSettingsContainsTransportClient() { - final Settings baseSettings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .build(); - try (TransportClient client = new MockTransportClient(baseSettings, Arrays.asList(MockPlugin.class))) { - final Settings settings = TransportSettings.DEFAULT_FEATURES_SETTING.get(client.settings()); - assertThat(settings.keySet(), hasItem("transport_client")); - assertThat(settings.get("transport_client"), equalTo("true")); - } - } - - public void testDefaultHeader() { - final Settings baseSettings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .build(); - try (TransportClient client = new MockTransportClient(baseSettings, Arrays.asList(MockPlugin.class))) { - final ThreadContext threadContext = client.threadPool().getThreadContext(); - assertEquals("true", threadContext.getHeader("test")); - } - } - - public static class MockPlugin extends Plugin { - - @Override - public List getNamedWriteables() { - return Arrays.asList(new Entry[]{ new Entry(MockNamedWriteable.class, MockNamedWriteable.NAME, MockNamedWriteable::new)}); - } - - @Override - public Settings additionalSettings() { - return Settings.builder().put(ThreadContext.PREFIX + "." + "test", true).build(); - } - - public class MockNamedWriteable implements NamedWriteable { - - static final String NAME = "mockNamedWritable"; - - MockNamedWriteable(StreamInput in) { - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - } - - @Override - public String getWriteableName() { - return NAME; - } - - } - } -} diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateIT.java deleted file mode 100644 index fc917d60deede..0000000000000 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateIT.java +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster; - -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.metadata.IndexGraveyard; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.CheckedFunction; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.Priority; -import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.watcher.ResourceWatcherService; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.EnumSet; -import java.util.HashSet; -import java.util.List; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; -import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.instanceOf; - -/** - * This test suite sets up a situation where the cluster has two plugins installed (node, and node-and-transport-client), and a transport - * client only has node-and-transport-client plugin installed. Each of these plugins inject customs into the cluster state and we want to - * check that the client can de-serialize a cluster state response based on the fact that the response should not contain customs that the - * transport client does not understand based on the fact that it only presents the node-and-transport-client-feature. - */ -@ESIntegTestCase.ClusterScope(scope = TEST) -public class ClusterStateIT extends ESIntegTestCase { - - public abstract static - class Custom implements MetaData.Custom { - - private static final ParseField VALUE = new ParseField("value"); - - private final int value; - - int value() { - return value; - } - - Custom(final int value) { - this.value = value; - } - - Custom(final StreamInput in) throws IOException { - value = in.readInt(); - } - - @Override - public EnumSet context() { - return MetaData.ALL_CONTEXTS; - } - - @Override - public Diff diff(final MetaData.Custom previousState) { - return null; - } - - @Override - public void writeTo(final StreamOutput out) throws IOException { - out.writeInt(value); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(VALUE.getPreferredName(), value); - return builder; - } - - } - - public static class NodeCustom extends Custom { - - public static final String TYPE = "node"; - - NodeCustom(final int value) { - super(value); - } - - NodeCustom(final StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - @Override - public Optional getRequiredFeature() { - return Optional.of("node"); - } - - } - - public static class NodeAndTransportClientCustom extends Custom { - - public static final String TYPE = "node-and-transport-client"; - - NodeAndTransportClientCustom(final int value) { - super(value); - } - - public NodeAndTransportClientCustom(final StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - /* - * This custom should always be returned yet we randomize whether it has a required feature that the client is expected to have - * versus not requiring any feature. We use a field to make the random choice exactly once. - */ - @SuppressWarnings("OptionalUsedAsFieldOrParameterType") - private final Optional requiredFeature = randomBoolean() ? Optional.empty() : Optional.of("node-and-transport-client"); - - @Override - public Optional getRequiredFeature() { - return requiredFeature; - } - - } - - public abstract static class CustomPlugin extends Plugin { - - private final List namedWritables = new ArrayList<>(); - private final List namedXContents = new ArrayList<>(); - - public CustomPlugin() { - registerBuiltinWritables(); - } - - protected void registerMetaDataCustom( - final String name, final Writeable.Reader reader, final CheckedFunction parser) { - namedWritables.add(new NamedWriteableRegistry.Entry(MetaData.Custom.class, name, reader)); - namedXContents.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(name), parser)); - } - - protected abstract void registerBuiltinWritables(); - - protected abstract String getType(); - - protected abstract Custom getInstance(); - - @Override - public List getNamedWriteables() { - return namedWritables; - } - - @Override - public List getNamedXContent() { - return namedXContents; - } - - private final AtomicBoolean installed = new AtomicBoolean(); - - @Override - public Collection createComponents( - final Client client, - final ClusterService clusterService, - final ThreadPool threadPool, - final ResourceWatcherService resourceWatcherService, - final ScriptService scriptService, - final NamedXContentRegistry xContentRegistry, - final Environment environment, - final NodeEnvironment nodeEnvironment, - final NamedWriteableRegistry namedWriteableRegistry) { - clusterService.addListener(event -> { - final ClusterState state = event.state(); - if (state.getBlocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) { - return; - } - - final MetaData metaData = state.metaData(); - if (state.nodes().isLocalNodeElectedMaster()) { - if (metaData.custom(getType()) == null) { - if (installed.compareAndSet(false, true)) { - clusterService.submitStateUpdateTask("install-metadata-custom", new ClusterStateUpdateTask(Priority.URGENT) { - - @Override - public ClusterState execute(ClusterState currentState) { - if (currentState.custom(getType()) == null) { - final MetaData.Builder builder = MetaData.builder(currentState.metaData()); - builder.putCustom(getType(), getInstance()); - return ClusterState.builder(currentState).metaData(builder).build(); - } else { - return currentState; - } - } - - @Override - public void onFailure(String source, Exception e) { - throw new AssertionError(e); - } - - }); - } - } - } - - }); - return Collections.emptyList(); - } - } - - public static class NodePlugin extends CustomPlugin { - - public Optional getFeature() { - return Optional.of("node"); - } - - static final int VALUE = randomInt(); - - @Override - protected void registerBuiltinWritables() { - registerMetaDataCustom( - NodeCustom.TYPE, - NodeCustom::new, - parser -> { - throw new IOException(new UnsupportedOperationException()); - }); - } - - @Override - protected String getType() { - return NodeCustom.TYPE; - } - - @Override - protected Custom getInstance() { - return new NodeCustom(VALUE); - } - - } - - public static class NodeAndTransportClientPlugin extends CustomPlugin { - - @Override - protected Optional getFeature() { - return Optional.of("node-and-transport-client"); - } - - static final int VALUE = randomInt(); - - @Override - protected void registerBuiltinWritables() { - registerMetaDataCustom( - NodeAndTransportClientCustom.TYPE, - NodeAndTransportClientCustom::new, - parser -> { - throw new IOException(new UnsupportedOperationException()); - }); - } - - @Override - protected String getType() { - return NodeAndTransportClientCustom.TYPE; - } - - @Override - protected Custom getInstance() { - return new NodeAndTransportClientCustom(VALUE); - } - - } - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(NodePlugin.class, NodeAndTransportClientPlugin.class); - } - - @Override - protected Collection> transportClientPlugins() { - return Collections.singletonList(NodeAndTransportClientPlugin.class); - } - - public void testOptionalCustoms() throws Exception { - // ensure that the customs are injected into the cluster state - assertBusy(() -> assertTrue(clusterService().state().metaData().customs().containsKey(NodeCustom.TYPE))); - assertBusy(() -> assertTrue(clusterService().state().metaData().customs().containsKey(NodeAndTransportClientCustom.TYPE))); - final ClusterStateResponse state = internalCluster().transportClient().admin().cluster().prepareState().get(); - final ImmutableOpenMap customs = state.getState().metaData().customs(); - final Set keys = new HashSet<>(Arrays.asList(customs.keys().toArray(String.class))); - assertThat(keys, hasItem(IndexGraveyard.TYPE)); - assertThat(keys, not(hasItem(NodeCustom.TYPE))); - assertThat(keys, hasItem(NodeAndTransportClientCustom.TYPE)); - final MetaData.Custom actual = customs.get(NodeAndTransportClientCustom.TYPE); - assertThat(actual, instanceOf(NodeAndTransportClientCustom.class)); - assertThat(((NodeAndTransportClientCustom)actual).value(), equalTo(NodeAndTransportClientPlugin.VALUE)); - } - -} diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java index c31df3ade71cc..27036680880b2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java @@ -67,7 +67,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) @TestLogging("_root:DEBUG") public class RareClusterStateIT extends ESIntegTestCase { diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionCleanSettingsIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionCleanSettingsIT.java index 2ad951312fa14..6ba738714ee8f 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionCleanSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionCleanSettingsIT.java @@ -35,7 +35,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class ClusterDisruptionCleanSettingsIT extends ESIntegTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 5bc5efc96c661..3215325e835bf 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -85,7 +85,7 @@ * Tests various cluster operations (e.g., indexing) during disruptions. */ @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class ClusterDisruptionIT extends AbstractDisruptionTestCase { private enum ConflictMode { diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java index 923af536cbb58..29a75ea744cd8 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java @@ -48,7 +48,7 @@ * Tests for discovery during disruptions. */ @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class DiscoveryDisruptionIT extends AbstractDisruptionTestCase { /** diff --git a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java index aea0c8c5c25f9..37d8efd72c72f 100644 --- a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -51,7 +51,7 @@ * Tests relating to the loss of the master. */ @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class MasterDisruptionIT extends AbstractDisruptionTestCase { /** diff --git a/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java index b9ac5f33dd911..c8bb9e33c6eac 100644 --- a/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java @@ -55,7 +55,7 @@ * Tests snapshot operations during disruptions. */ @TestLogging("org.elasticsearch.snapshot:TRACE") -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class SnapshotDisruptionIT extends ESIntegTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java index c4655bcf7ce9a..1bb93123309eb 100644 --- a/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java @@ -66,7 +66,7 @@ * not detect a master failure too quickly. */ @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class StableMasterDisruptionIT extends ESIntegTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java index e0177af1bed39..1b90ea691c1cd 100644 --- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java @@ -100,7 +100,7 @@ public Path nodeConfigPath(int nodeOrdinal) { "other", Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), Function.identity())) { - other.beforeTest(random(), 0); + other.beforeTest(random()); final ClusterState first = internalCluster().getInstance(ClusterService.class).state(); final ClusterState second = other.getInstance(ClusterService.class).state(); assertThat(first.nodes().getSize(), equalTo(1)); @@ -175,7 +175,7 @@ public Path nodeConfigPath(int nodeOrdinal) { Logger clusterLogger = LogManager.getLogger(JoinHelper.class); Loggers.addAppender(clusterLogger, mockAppender); try { - other.beforeTest(random(), 0); + other.beforeTest(random()); final ClusterState first = internalCluster().getInstance(ClusterService.class).state(); assertThat(first.nodes().getSize(), equalTo(1)); assertBusy(() -> mockAppender.assertAllExpectationsMatched()); diff --git a/server/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java b/server/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java index f1ebcd971741e..b23f709d7350d 100644 --- a/server/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java +++ b/server/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java @@ -37,11 +37,6 @@ protected Collection> nodePlugins() { return Arrays.asList(DummyQueryParserPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(DummyQueryParserPlugin.class); - } - @Override @Before public void setUp() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java b/server/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java index 91e73e53ebc15..409e007790ec4 100644 --- a/server/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java @@ -58,7 +58,7 @@ import static org.hamcrest.Matchers.greaterThan; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 2, - supportsDedicatedMasters = false, numClientNodes = 1, transportClientRatio = 0.0) + supportsDedicatedMasters = false, numClientNodes = 1) public class ExceptionRetryIT extends ESIntegTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/indices/settings/InternalSettingsIT.java b/server/src/test/java/org/elasticsearch/indices/settings/InternalSettingsIT.java index 1d11fbc79fc71..f7935021c09f2 100644 --- a/server/src/test/java/org/elasticsearch/indices/settings/InternalSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/settings/InternalSettingsIT.java @@ -38,11 +38,6 @@ protected Collection> nodePlugins() { return Collections.singleton(InternalOrPrivateSettingsPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Collections.singletonList(InternalOrPrivateSettingsPlugin.class); - } - public void testSetInternalIndexSettingOnCreate() { final Settings settings = Settings.builder().put("index.internal", "internal").build(); createIndex("index", settings); diff --git a/server/src/test/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java b/server/src/test/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java index 08f45eac5be64..c8f0740bc35aa 100644 --- a/server/src/test/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java @@ -41,11 +41,6 @@ protected Collection> nodePlugins() { return Collections.singletonList(InternalOrPrivateSettingsPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Collections.singletonList(InternalOrPrivateSettingsPlugin.class); - } - public void testSetPrivateIndexSettingOnCreate() { final Settings settings = Settings.builder().put("index.private", "private").build(); final Exception e = expectThrows(Exception.class, () -> createIndex("index", settings)); diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java index 195e14f65c004..63ff7f9f97463 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java @@ -44,11 +44,6 @@ protected Collection> nodePlugins() { return Collections.singletonList(TestPersistentTasksPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - protected boolean ignoreExternalCluster() { return true; } diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java index 4acb391d9c0ee..95e01c79e19d4 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java @@ -59,11 +59,6 @@ protected Collection> nodePlugins() { return Collections.singletonList(TestPersistentTasksPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - protected boolean ignoreExternalCluster() { return true; } diff --git a/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java index 2ea6567c9f8d0..8c6b82d5d4128 100644 --- a/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java +++ b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java @@ -48,11 +48,6 @@ protected Collection> nodePlugins() { return singletonList(TestPersistentTasksPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - @Override protected boolean ignoreExternalCluster() { return true; diff --git a/server/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java b/server/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java index 2c32d6eb33ba8..0506d18ffecbd 100644 --- a/server/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java @@ -38,7 +38,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0.0) +@ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class FullRollingRestartIT extends ESIntegTestCase { protected void assertTimeout(ClusterHealthRequestBuilder requestBuilder) { ClusterHealthResponse clusterHealth = requestBuilder.get(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index d21519fa96754..81ea71621ae3e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -94,11 +94,6 @@ protected Collection> nodePlugins() { return Arrays.asList(CustomSignificanceHeuristicPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(CustomSignificanceHeuristicPlugin.class); - } - public String randomExecutionHint() { return randomBoolean() ? null : randomFrom(SignificantTermsAggregatorFactory.ExecutionMode.values()).toString(); } diff --git a/server/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java b/server/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java index e6f7ecf6b37e7..f2239d80d7f62 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java @@ -64,11 +64,6 @@ protected Collection> nodePlugins() { return Collections.singletonList(FetchTermVectorsPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - @SuppressWarnings("unchecked") public void testPlugin() throws Exception { client().admin() diff --git a/server/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java b/server/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java index cb67481d8ddd0..bfbf04a7f5ad8 100644 --- a/server/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java +++ b/server/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java @@ -59,11 +59,6 @@ protected Collection> nodePlugins() { return Arrays.asList(CustomDistanceScorePlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(CustomDistanceScorePlugin.class); - } - public void testPlugin() throws Exception { client().admin() .indices() diff --git a/server/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java b/server/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java index e0ae78dff3466..7ac48e03be688 100644 --- a/server/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java +++ b/server/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java @@ -434,9 +434,9 @@ public void testSimpleScrollQueryThenFetchClearAllScrollIds() throws Exception { assertThat(clearResponse.status(), equalTo(RestStatus.OK)); assertToXContentResponse(clearResponse, true, clearResponse.getNumFreed()); - assertThrows(internalCluster().transportClient().prepareSearchScroll(searchResponse1.getScrollId()) + assertThrows(internalCluster().client().prepareSearchScroll(searchResponse1.getScrollId()) .setScroll(TimeValue.timeValueMinutes(2)), RestStatus.NOT_FOUND); - assertThrows(internalCluster().transportClient().prepareSearchScroll(searchResponse2.getScrollId()) + assertThrows(internalCluster().client().prepareSearchScroll(searchResponse2.getScrollId()) .setScroll(TimeValue.timeValueMinutes(2)), RestStatus.NOT_FOUND); } @@ -484,7 +484,7 @@ public void testThatNonExistingScrollIdReturnsCorrectException() throws Exceptio ClearScrollResponse clearScrollResponse = client().prepareClearScroll().addScrollId(searchResponse.getScrollId()).get(); assertThat(clearScrollResponse.isSucceeded(), is(true)); - assertThrows(internalCluster().transportClient().prepareSearchScroll(searchResponse.getScrollId()), RestStatus.NOT_FOUND); + assertThrows(internalCluster().client().prepareSearchScroll(searchResponse.getScrollId()), RestStatus.NOT_FOUND); } public void testStringSortMissingAscTerminates() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 3345fbd3f248e..4a28b1eeea440 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -114,7 +114,7 @@ import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; -@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +@ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCase { public static class TestCustomMetaDataPlugin extends Plugin { diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java index 777918a7d5eba..20dd3693f78cd 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java @@ -41,7 +41,7 @@ import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.hasSize; -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class SnapshotShardsServiceIT extends AbstractSnapshotIntegTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java b/server/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java index 5f286a5ff0ad4..e5febc2bc36ec 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java +++ b/server/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java @@ -25,7 +25,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.hamcrest.RegexMatcher; import java.lang.management.ManagementFactory; @@ -93,7 +92,7 @@ public void testThreadNames() throws Exception { || threadName.contains("Keep-Alive-Timer")) { continue; } - String nodePrefix = "(" + Pattern.quote(InternalTestCluster.TRANSPORT_CLIENT_PREFIX) + ")?(" + + String nodePrefix = "(" + Pattern.quote(ESIntegTestCase.SUITE_CLUSTER_NODE_PREFIX) + "|" + Pattern.quote(ESIntegTestCase.TEST_CLUSTER_NODE_PREFIX) +")"; assertThat(threadName, RegexMatcher.matches("\\[" + nodePrefix + "\\d+\\]")); diff --git a/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java b/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java index 416f26e81972a..a6b3865de247b 100644 --- a/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java +++ b/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java @@ -118,8 +118,7 @@ * stale or dirty, i.e., come from a stale primary or belong to a write that ends up being discarded. * */ -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, minNumDataNodes = 4, maxNumDataNodes = 6, - transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, minNumDataNodes = 4, maxNumDataNodes = 6) @TestLogging("_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.get:TRACE," + "org.elasticsearch.discovery:TRACE,org.elasticsearch.action.support.replication:TRACE," + "org.elasticsearch.cluster.service:TRACE,org.elasticsearch.indices.recovery:TRACE," + diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 94a8e9b7728ce..0ab0afb6b0e7a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -62,7 +62,6 @@ import org.elasticsearch.client.Requests; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; -import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.RestoreInProgress; @@ -245,8 +244,6 @@ *

    * This class supports the following system properties (passed with -Dkey=value to the application) *

      - *
    • -D{@value #TESTS_CLIENT_RATIO} - a double value in the interval [0..1] which defines the ration between node - * and transport clients used
    • *
    • -D{@value #TESTS_ENABLE_MOCK_MODULES} - a boolean value to enable or disable mock modules. This is * useful to test the system without asserting modules that to make sure they don't hide any bugs in production.
    • *
    • - a random seed used to initialize the index random context. @@ -279,11 +276,6 @@ public abstract class ESIntegTestCase extends ESTestCase { public static final String SUITE_CLUSTER_NODE_PREFIX = "node_s"; public static final String TEST_CLUSTER_NODE_PREFIX = "node_t"; - /** - * Key used to set the transport client ratio via the commandline -D{@value #TESTS_CLIENT_RATIO} - */ - public static final String TESTS_CLIENT_RATIO = "tests.client.ratio"; - /** * Key used to eventually switch to using an external cluster and provide its transport addresses */ @@ -349,8 +341,6 @@ public abstract class ESIntegTestCase extends ESTestCase { private static TestCluster currentCluster; private static RestClient restClient = null; - private static final double TRANSPORT_CLIENT_RATIO = transportClientRatio(); - private static final Map, TestCluster> clusters = new IdentityHashMap<>(); private static ESIntegTestCase INSTANCE = null; // see @SuiteScope @@ -372,7 +362,7 @@ protected final boolean enableWarningsCheck() { protected final void beforeInternal() throws Exception { final Scope currentClusterScope = getCurrentClusterScope(); Callable setup = () -> { - cluster().beforeTest(random(), getPerTestTransportClientRatio()); + cluster().beforeTest(random()); cluster().wipe(excludeTemplates()); randomIndexTemplate(); return null; @@ -1128,28 +1118,13 @@ protected void ensureClusterStateConsistency() throws IOException { && masterId.equals(localClusterState.nodes().getMasterNodeId())) { try { assertEquals("cluster state UUID does not match", masterClusterState.stateUUID(), localClusterState.stateUUID()); - /* - * The cluster state received by the transport client can miss customs that the client does not understand. This - * means that we only expect equality in the cluster state including customs if the master client and the local - * client are of the same type (both or neither are transport clients). Otherwise, we can only assert equality - * modulo non-core customs. - */ - if (isTransportClient(masterClient) == isTransportClient(client)) { - // We cannot compare serialization bytes since serialization order of maps is not guaranteed - // but we can compare serialization sizes - they should be the same - assertEquals("cluster state size does not match", masterClusterStateSize, localClusterStateSize); - // Compare JSON serialization - assertNull( - "cluster state JSON serialization does not match", - differenceBetweenMapsIgnoringArrayOrder(masterStateMap, localStateMap)); - } else { - // remove non-core customs and compare the cluster states - assertNull( - "cluster state JSON serialization does not match (after removing some customs)", - differenceBetweenMapsIgnoringArrayOrder( - convertToMap(removePluginCustoms(masterClusterState)), - convertToMap(removePluginCustoms(localClusterState)))); - } + // We cannot compare serialization bytes since serialization order of maps is not guaranteed + // but we can compare serialization sizes - they should be the same + assertEquals("cluster state size does not match", masterClusterStateSize, localClusterStateSize); + // Compare JSON serialization + assertNull( + "cluster state JSON serialization does not match", + differenceBetweenMapsIgnoringArrayOrder(masterStateMap, localStateMap)); } catch (final AssertionError error) { logger.error( "Cluster state from master:\n{}\nLocal cluster state:\n{}", @@ -1163,21 +1138,6 @@ protected void ensureClusterStateConsistency() throws IOException { } - /** - * Tests if the client is a transport client or wraps a transport client. - * - * @param client the client to test - * @return true if the client is a transport client or a wrapped transport client - */ - private boolean isTransportClient(final Client client) { - if (TransportClient.class.isAssignableFrom(client.getClass())) { - return true; - } else if (client instanceof RandomizingClient) { - return isTransportClient(((RandomizingClient) client).in()); - } - return false; - } - private static final Set SAFE_METADATA_CUSTOMS = Set.of(IndexGraveyard.TYPE, IngestMetadata.TYPE, RepositoriesMetaData.TYPE, ScriptMetaData.TYPE); @@ -1382,8 +1342,7 @@ protected final void disableAllocation(String... indices) { } /** - * Returns a random admin client. This client can either be a node or a transport client pointing to any of - * the nodes in the cluster. + * Returns a random admin client. This client can be pointing to any of the nodes in the cluster. */ protected AdminClient admin() { return client().admin(); @@ -1658,12 +1617,6 @@ public enum Scope { * negative value means that the number of client nodes will be randomized. */ int numClientNodes() default InternalTestCluster.DEFAULT_NUM_CLIENT_NODES; - - /** - * Returns the transport client ratio. By default this returns -1 which means a random - * ratio in the interval [0..1] is used. - */ - double transportClientRatio() default -1; } private class LatchedActionListener implements ActionListener { @@ -1817,23 +1770,6 @@ protected Collection> nodePlugins() { return Collections.emptyList(); } - /** - * Returns a collection of plugins that should be loaded when creating a transport client. - */ - protected Collection> transportClientPlugins() { - return Collections.emptyList(); - } - - /** - * This method is used to obtain additional settings for clients created by the internal cluster. - * These settings will be applied on the client in addition to some randomized settings defined in - * the cluster. These settings will also override any other settings the internal cluster might - * add by default. - */ - protected Settings transportClientSettings() { - return Settings.EMPTY; - } - private ExternalTestCluster buildExternalCluster(String clusterAddresses, String clusterName) throws IOException { String[] stringAddresses = clusterAddresses.split(","); TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; @@ -1932,22 +1868,6 @@ public Path nodeConfigPath(int nodeOrdinal) { public Collection> nodePlugins() { return ESIntegTestCase.this.nodePlugins(); } - - @Override - public Settings transportClientSettings() { - return Settings.builder().put(initialTransportClientSettings.build()) - .put(ESIntegTestCase.this.transportClientSettings()).build(); - } - - @Override - public Collection> transportClientPlugins() { - Collection> plugins = ESIntegTestCase.this.transportClientPlugins(); - if (plugins.contains(getTestTransportPlugin()) == false) { - plugins = new ArrayList<>(plugins); - plugins.add(getTestTransportPlugin()); - } - return Collections.unmodifiableCollection(plugins); - } }; } @@ -2035,35 +1955,6 @@ public TransportRequestHandler interceptHandler( } } - /** - * Returns the client ratio configured via - */ - private static double transportClientRatio() { - String property = System.getProperty(TESTS_CLIENT_RATIO); - if (property == null || property.isEmpty()) { - return Double.NaN; - } - return Double.parseDouble(property); - } - - /** - * Returns the transport client ratio from the class level annotation or via - * {@link System#getProperty(String)} if available. If both are not available this will - * return a random ratio in the interval {@code [0..1]}. - */ - protected double getPerTestTransportClientRatio() { - final ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class); - double perTestRatio = -1; - if (annotation != null) { - perTestRatio = annotation.transportClientRatio(); - } - if (perTestRatio == -1) { - return Double.isNaN(TRANSPORT_CLIENT_RATIO) ? randomDouble() : TRANSPORT_CLIENT_RATIO; - } - assert perTestRatio >= 0.0 && perTestRatio <= 1.0; - return perTestRatio; - } - /** * Returns path to a random directory that can be used to create a temporary file system repo */ diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 3b4f8c8f55d4c..695564690c4b1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -38,7 +38,6 @@ import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; import org.elasticsearch.client.Client; -import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; @@ -61,12 +60,10 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings.Builder; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -105,7 +102,6 @@ import org.elasticsearch.search.SearchService; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.MockTransportClient; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportSettings; @@ -153,7 +149,6 @@ import static org.elasticsearch.node.Node.INITIAL_STATE_TIMEOUT_SETTING; import static org.elasticsearch.test.ESTestCase.assertBusy; import static org.elasticsearch.test.ESTestCase.awaitBusy; -import static org.elasticsearch.test.ESTestCase.getTestTransportType; import static org.elasticsearch.test.ESTestCase.randomFrom; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -170,7 +165,7 @@ * The cluster supports randomized configuration such that nodes started in the cluster will * automatically load asserting services tracking resources like file handles or open searchers. *

      - * The Cluster is bound to a test lifecycle where tests must call {@link #beforeTest(java.util.Random, double)} and + * The Cluster is bound to a test lifecycle where tests must call {@link #beforeTest(java.util.Random)} and * {@link #afterTest()} to initialize and reset the cluster in order to be more reproducible. The term "more" relates * to the async nature of Elasticsearch in combination with randomized testing. Once Threads and asynchronous calls * are involved reproducibility is very limited. This class should only be used through {@link ESIntegTestCase}. @@ -720,7 +715,7 @@ private static String getRoleSuffix(Settings settings) { public synchronized Client client() { ensureOpen(); /* Randomly return a client to one of the nodes in the cluster */ - return getOrBuildRandomNode().client(random); + return getOrBuildRandomNode().client(); } /** @@ -729,7 +724,7 @@ public synchronized Client client() { */ public Client dataNodeClient() { /* Randomly return a client to one of the nodes in the cluster */ - return getRandomNodeAndClient(DATA_NODE_PREDICATE).client(random); + return getRandomNodeAndClient(DATA_NODE_PREDICATE).client(); } /** @@ -762,12 +757,12 @@ public synchronized Client coordOnlyNodeClient() { ensureOpen(); NodeAndClient randomNodeAndClient = getRandomNodeAndClient(NO_DATA_NO_MASTER_PREDICATE); if (randomNodeAndClient != null) { - return randomNodeAndClient.client(random); + return randomNodeAndClient.client(); } int nodeId = nextNodeId.getAndIncrement(); Settings settings = getSettings(nodeId, random.nextLong(), Settings.EMPTY); startCoordinatingOnlyNode(settings); - return getRandomNodeAndClient(NO_DATA_NO_MASTER_PREDICATE).client(random); + return getRandomNodeAndClient(NO_DATA_NO_MASTER_PREDICATE).client(); } public synchronized String startCoordinatingOnlyNode(Settings settings) { @@ -777,26 +772,17 @@ public synchronized String startCoordinatingOnlyNode(Settings settings) { return startNode(builder); } - /** - * Returns a transport client - */ - public synchronized Client transportClient() { - // randomly return a transport client going to one of the nodes in the cluster - return getOrBuildRandomNode().transportClient(); - } - /** * Returns a node client to a given node. */ public Client client(String nodeName) { NodeAndClient nodeAndClient = nodes.get(nodeName); if (nodeAndClient != null) { - return nodeAndClient.client(random); + return nodeAndClient.client(); } throw new AssertionError("No node found with name: [" + nodeName + "]"); } - /** * Returns a "smart" node client to a random node in the cluster */ @@ -830,7 +816,6 @@ private final class NodeAndClient implements Closeable { private MockNode node; private final Settings originalNodeSettings; private Client nodeClient; - private Client transportClient; private final AtomicBoolean closed = new AtomicBoolean(false); private final String name; private final int nodeAndClientId; @@ -862,18 +847,11 @@ public boolean isMasterEligible() { return Node.NODE_MASTER_SETTING.get(node.settings()); } - Client client(Random random) { - double nextDouble = random.nextDouble(); - if (nextDouble < transportClientRatio) { - if (logger.isTraceEnabled()) { - logger.trace("Using transport client for node [{}] sniff: [{}]", node.settings().get("node.name"), false); - } - return getOrBuildTransportClient(); - } else { - return getOrBuildNodeClient(); - } + Client client() { + return getOrBuildNodeClient(); } + // TODO: collapse these together? Client nodeClient() { if (closed.get()) { throw new RuntimeException("already closed"); @@ -881,13 +859,6 @@ Client nodeClient() { return getOrBuildNodeClient(); } - Client transportClient() { - if (closed.get()) { - throw new RuntimeException("already closed"); - } - return getOrBuildTransportClient(); - } - private Client getOrBuildNodeClient() { synchronized (InternalTestCluster.this) { if (closed.get()) { @@ -900,28 +871,10 @@ private Client getOrBuildNodeClient() { } } - private Client getOrBuildTransportClient() { - synchronized (InternalTestCluster.this) { - if (closed.get()) { - throw new RuntimeException("already closed"); - } - if (transportClient == null) { - /* don't sniff client for now - doesn't work will all tests - * since it might throw NoNodeAvailableException if nodes are - * shut down. we first need support of transportClientRatio - * as annotations or so */ - transportClient = new TransportClientFactory(nodeConfigurationSource.transportClientSettings(), - baseDir, nodeConfigurationSource.transportClientPlugins()).client(node, clusterName); - } - return clientWrapper.apply(transportClient); - } - } - void resetClient() { if (closed.get() == false) { - Releasables.close(nodeClient, transportClient); + Releasables.close(nodeClient); nodeClient = null; - transportClient = null; } } @@ -1023,44 +976,9 @@ private void markNodeDataDirsAsNotEligibleForWipe(Node node) { } } - public static final String TRANSPORT_CLIENT_PREFIX = "transport_client_"; - - private static class TransportClientFactory { - private final Settings settings; - private final Path baseDir; - private final Collection> plugins; - - TransportClientFactory(Settings settings, Path baseDir, Collection> plugins) { - this.settings = settings != null ? settings : Settings.EMPTY; - this.baseDir = baseDir; - this.plugins = plugins; - } - - public Client client(Node node, String clusterName) { - TransportAddress addr = node.injector().getInstance(TransportService.class).boundAddress().publishAddress(); - Settings nodeSettings = node.settings(); - Builder builder = Settings.builder() - .put("client.transport.nodes_sampler_interval", "1s") - .put(Environment.PATH_HOME_SETTING.getKey(), baseDir) - .put("node.name", TRANSPORT_CLIENT_PREFIX + node.settings().get("node.name")) - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), clusterName).put("client.transport.sniff", false) - .put("logger.prefix", nodeSettings.get("logger.prefix", "")) - .put("logger.level", nodeSettings.get("logger.level", "INFO")) - .put(settings); - if (NetworkModule.TRANSPORT_TYPE_SETTING.exists(settings)) { - builder.put(NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), NetworkModule.TRANSPORT_TYPE_SETTING.get(settings)); - } else { - builder.put(NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), getTestTransportType()); - } - TransportClient client = new MockTransportClient(builder.build(), plugins); - client.addTransportAddress(addr); - return client; - } - } - @Override - public synchronized void beforeTest(Random random, double transportClientRatio) throws IOException, InterruptedException { - super.beforeTest(random, transportClientRatio); + public synchronized void beforeTest(Random random) throws IOException, InterruptedException { + super.beforeTest(random); reset(true); } @@ -1807,7 +1725,7 @@ private void removeExclusions(Set excludedNodeIds) { if (excludedNodeIds.isEmpty() == false) { logger.info("removing voting config exclusions for {} after restart/shutdown", excludedNodeIds); try { - Client client = getRandomNodeAndClient(node -> excludedNodeIds.contains(node.name) == false).client(random); + Client client = getRandomNodeAndClient(node -> excludedNodeIds.contains(node.name) == false).client(); client.execute(ClearVotingConfigExclusionsAction.INSTANCE, new ClearVotingConfigExclusionsRequest()).get(); } catch (InterruptedException | ExecutionException e) { throw new AssertionError("unexpected", e); @@ -2253,7 +2171,7 @@ public boolean hasNext() { @Override public Client next() { - return iterator.next().client(random); + return iterator.next().client(); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java index b5aa26a38549e..a469088bcb6c3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java @@ -52,8 +52,6 @@ public abstract class TestCluster implements Closeable { protected Random random; - protected double transportClientRatio = 0.0; - public TestCluster(long seed) { this.seed = seed; } @@ -65,10 +63,7 @@ public long seed() { /** * This method should be executed before each test to reset the cluster to its initial state. */ - public void beforeTest(Random random, double transportClientRatio) throws IOException, InterruptedException { - assert transportClientRatio >= 0.0 && transportClientRatio <= 1.0; - logger.debug("Reset test cluster with transport client ratio: [{}]", transportClientRatio); - this.transportClientRatio = transportClientRatio; + public void beforeTest(Random random) throws IOException, InterruptedException { this.random = new Random(random.nextLong()); } diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index a690e4bbbdd21..6fffd246ec245 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -192,11 +192,11 @@ public Settings transportClientSettings() { try { { Random random = new Random(seed); - cluster0.beforeTest(random, random.nextDouble()); + cluster0.beforeTest(random); } { Random random = new Random(seed); - cluster1.beforeTest(random, random.nextDouble()); + cluster1.beforeTest(random); } assertArrayEquals(cluster0.getNodeNames(), cluster1.getNodeNames()); Iterator iterator1 = cluster1.getClients().iterator(); @@ -248,7 +248,7 @@ public Settings transportClientSettings() { true, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, nodePrefix, mockPlugins(), Function.identity()); try { - cluster.beforeTest(random(), 0.0); + cluster.beforeTest(random()); final int originalMasterCount = cluster.numMasterNodes(); final Map shardNodePaths = new HashMap<>(); for (String name: cluster.getNodeNames()) { @@ -279,7 +279,7 @@ public Settings transportClientSettings() { Files.createDirectories(newTestMarker); final String newNode3 = cluster.startNode(poorNodeDataPathSettings); assertThat(getNodePaths(cluster, newNode3)[0], equalTo(dataPath)); - cluster.beforeTest(random(), 0.0); + cluster.beforeTest(random()); assertFileNotExists(newTestMarker); // the cluster should be reset for a new test, cleaning up the extra path we made assertFileNotExists(testMarker); // a new unknown node used this path, it should be cleaned assertFileExists(stableTestMarker); // but leaving the structure of existing, reused nodes @@ -287,7 +287,7 @@ public Settings transportClientSettings() { assertThat("data paths for " + name + " changed", getNodePaths(cluster, name), equalTo(shardNodePaths.get(name))); } - cluster.beforeTest(random(), 0.0); + cluster.beforeTest(random()); assertFileExists(stableTestMarker); // but leaving the structure of existing, reused nodes for (String name: cluster.getNodeNames()) { assertThat("data paths for " + name + " changed", getNodePaths(cluster, name), @@ -336,7 +336,7 @@ public Settings transportClientSettings() { .put(NetworkModule.TRANSPORT_TYPE_KEY, transportClient).build(); } }, 0, "", mockPlugins(), Function.identity()); - cluster.beforeTest(random(), 0.0); + cluster.beforeTest(random()); List roles = new ArrayList<>(); for (int i = 0; i < numNodes; i++) { final DiscoveryNode.Role role = i == numNodes - 1 && roles.contains(MASTER) == false ? @@ -426,7 +426,7 @@ public Settings transportClientSettings() { "test", nodeConfigurationSource, 0, nodePrefix, plugins, Function.identity()); try { - cluster.beforeTest(random(), 0.0); + cluster.beforeTest(random()); switch (randomInt(2)) { case 0: cluster.stopRandomDataNode(); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index eba3532f063bf..fd7c8bd4086ef 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -154,7 +154,7 @@ public final void startClusters() throws Exception { InternalTestCluster leaderCluster = new InternalTestCluster(randomLong(), createTempDir(), true, true, numberOfNodesPerCluster(), numberOfNodesPerCluster(), "leader_cluster", createNodeConfigurationSource(null, true), 0, "leader", mockPlugins, Function.identity()); - leaderCluster.beforeTest(random(), 0.0D); + leaderCluster.beforeTest(random()); leaderCluster.ensureAtLeastNumDataNodes(numberOfNodesPerCluster()); assertBusy(() -> { ClusterService clusterService = leaderCluster.getInstance(ClusterService.class); @@ -167,7 +167,7 @@ public final void startClusters() throws Exception { mockPlugins, Function.identity()); clusterGroup = new ClusterGroup(leaderCluster, followerCluster); - followerCluster.beforeTest(random(), 0.0D); + followerCluster.beforeTest(random()); followerCluster.ensureAtLeastNumDataNodes(numberOfNodesPerCluster()); assertBusy(() -> { ClusterService clusterService = followerCluster.getInstance(ClusterService.class); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrDisabledIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrDisabledIT.java index 92e0ea06a30e7..3e182ef20d699 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrDisabledIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrDisabledIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xpack.core.XPackClientPlugin; import org.elasticsearch.xpack.core.XPackSettings; import java.util.Collection; @@ -28,18 +27,8 @@ protected Settings nodeSettings(int nodeOrdinal) { .put(XPackSettings.SECURITY_ENABLED.getKey(), false).build(); } - @Override - protected Settings transportClientSettings() { - return Settings.builder().put(super.transportClientSettings()).put(XPackSettings.SECURITY_ENABLED.getKey(), false).build(); - } - @Override protected Collection> nodePlugins() { return Collections.singletonList(LocalStateCcr.class); } - - @Override - protected Collection> transportClientPlugins() { - return Collections.singletonList(XPackClientPlugin.class); - } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java index 2d4991d514027..58caf0c512b08 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java @@ -15,14 +15,12 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.XPackClientPlugin; import org.elasticsearch.xpack.core.XPackSettings; import java.util.Arrays; import java.util.Collection; import java.util.concurrent.CountDownLatch; -@ESIntegTestCase.ClusterScope(transportClientRatio = 0.0) public abstract class AbstractLicensesIntegrationTestCase extends ESIntegTestCase { @Override @@ -35,17 +33,6 @@ protected Collection> nodePlugins() { return Arrays.asList(LocalStateCompositeXPackPlugin.class, CommonAnalysisPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(XPackClientPlugin.class, CommonAnalysisPlugin.class); - } - - @Override - protected Settings transportClientSettings() { - // Plugin should be loaded on the transport client as well - return nodeSettings(0); - } - protected void putLicense(final License license) throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterTests.java index 00d1c47cdedaa..10a441526400d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterTests.java @@ -25,7 +25,7 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.nullValue; -@ClusterScope(scope = TEST, numDataNodes = 0, numClientNodes = 0, maxNumDataNodes = 0, transportClientRatio = 0) +@ClusterScope(scope = TEST, numDataNodes = 0, numClientNodes = 0, maxNumDataNodes = 0) public class LicenseServiceClusterTests extends AbstractLicensesIntegrationTestCase { @Override @@ -50,11 +50,6 @@ protected Collection> nodePlugins() { return Arrays.asList(LocalStateCompositeXPackPlugin.class, CommonAnalysisPlugin.class, Netty4Plugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - public void testClusterRestartWithLicense() throws Exception { wipeAllLicenses(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java index 1f09f959883f3..9a2cb24e48b72 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.transport.Netty4Plugin; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.XPackClientPlugin; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; @@ -25,7 +24,7 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; -@ESIntegTestCase.ClusterScope(scope = SUITE, transportClientRatio = 0.0) +@ESIntegTestCase.ClusterScope(scope = SUITE) public class StartBasicLicenseTests extends AbstractLicensesIntegrationTestCase { @Override @@ -46,11 +45,6 @@ protected Collection> nodePlugins() { return Arrays.asList(LocalStateCompositeXPackPlugin.class, Netty4Plugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(XPackClientPlugin.class, Netty4Plugin.class); - } - public void testStartBasicLicense() throws Exception { LicensingClient licensingClient = new LicensingClient(client()); License license = TestUtils.generateSignedLicense("trial", License.VERSION_CURRENT, -1, TimeValue.timeValueHours(24)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java index eac145dd0ffa8..537df2a4a51ed 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.transport.Netty4Plugin; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.XPackClientPlugin; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; @@ -24,7 +23,7 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; -@ESIntegTestCase.ClusterScope(scope = SUITE, transportClientRatio = 0.0) +@ESIntegTestCase.ClusterScope(scope = SUITE) public class StartTrialLicenseTests extends AbstractLicensesIntegrationTestCase { @Override @@ -45,11 +44,6 @@ protected Collection> nodePlugins() { return Arrays.asList(LocalStateCompositeXPackPlugin.class, Netty4Plugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(XPackClientPlugin.class, Netty4Plugin.class); - } - public void testStartTrial() throws Exception { LicensingClient licensingClient = new LicensingClient(client()); ensureStartingWithBasic(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java index b03f51d1d195b..81be978d33103 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java @@ -54,7 +54,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -@ESIntegTestCase.ClusterScope(numDataNodes = 0, transportClientRatio = 0.0) +@ESIntegTestCase.ClusterScope(numDataNodes = 0) public class SourceOnlySnapshotIT extends ESIntegTestCase { @Override diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java index 673c10f885447..cc23579547ed0 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java @@ -77,7 +77,7 @@ import static org.hamcrest.core.Is.is; import static org.hamcrest.core.IsNull.nullValue; -@ESIntegTestCase.ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0.0) +@ESIntegTestCase.ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class IndexLifecycleInitialisationTests extends ESIntegTestCase { private Settings settings; private LifecyclePolicy lifecyclePolicy; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java index b73bb7c5ad499..aae9b0d89c2a0 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java @@ -67,8 +67,7 @@ * Note for other type of integration tests you should use the external test cluster created by the Gradle integTest task. * For example tests extending this base class test with the non native autodetect process. */ -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, - transportClientRatio = 0, supportsDedicatedMasters = false) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, supportsDedicatedMasters = false) public abstract class BaseMlIntegTestCase extends ESIntegTestCase { @Override @@ -89,28 +88,12 @@ protected Settings nodeSettings(int nodeOrdinal) { return settings.build(); } - @Override - protected Settings transportClientSettings() { - Settings.Builder settings = Settings.builder().put(super.transportClientSettings()); - settings.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), true); - settings.put(XPackSettings.SECURITY_ENABLED.getKey(), false); - settings.put(XPackSettings.WATCHER_ENABLED.getKey(), false); - settings.put(XPackSettings.MONITORING_ENABLED.getKey(), false); - settings.put(XPackSettings.GRAPH_ENABLED.getKey(), false); - return settings.build(); - } - @Override protected Collection> nodePlugins() { return Arrays.asList(LocalStateMachineLearning.class, CommonAnalysisPlugin.class, ReindexPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - @Override protected Collection> getMockPlugins() { return Arrays.asList(TestSeedPlugin.class, MockHttpTransport.TestPlugin.class); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java index 057c6d8f3a225..3aef6dad5889f 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java @@ -24,7 +24,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; -@ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0) +@ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0) public class MultiNodesStatsTests extends MonitoringIntegTestCase { @Override diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/cleaner/AbstractIndicesCleanerTestCase.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/cleaner/AbstractIndicesCleanerTestCase.java index 42d89608efd46..588882e91f854 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/cleaner/AbstractIndicesCleanerTestCase.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/cleaner/AbstractIndicesCleanerTestCase.java @@ -21,7 +21,7 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; -@ClusterScope(scope = TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0) +@ClusterScope(scope = TEST, numDataNodes = 0, numClientNodes = 0) public abstract class AbstractIndicesCleanerTestCase extends MonitoringIntegTestCase { static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("yyyy.MM.dd").withZone(ZoneOffset.UTC); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java index d4ea017ca8b3d..aa58b9fa60660 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java @@ -81,7 +81,7 @@ import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = Scope.TEST, - numDataNodes = 1, numClientNodes = 0, transportClientRatio = 0.0, supportsDedicatedMasters = false) + numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) public class HttpExporterIT extends MonitoringIntegTestCase { private final List clusterAlertBlacklist = diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java index 2621a43ccee4d..34885563c87ac 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java @@ -40,7 +40,7 @@ import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = Scope.SUITE, - numDataNodes = 1, numClientNodes = 0, transportClientRatio = 0.0, supportsDedicatedMasters = false) + numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) public class HttpExporterSslIT extends MonitoringIntegTestCase { private final Settings globalSettings = Settings.builder().put("path.home", createTempDir()).build(); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java index ed5d3ef40ae24..5db71f72cf6ef 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java @@ -58,7 +58,7 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, - numDataNodes = 1, numClientNodes = 0, transportClientRatio = 0.0, supportsDedicatedMasters = false) + numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) public class LocalExporterIntegTests extends LocalExporterIntegTestCase { private final String indexTimeFormat = randomFrom("yy", "yyyy", "yyyy.MM", "yyyy-MM", "MM.yyyy", "MM", null); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java index 4af080b7fabde..2604aad151cd3 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java @@ -28,7 +28,7 @@ import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = TEST, - numDataNodes = 1, numClientNodes = 0, transportClientRatio = 0.0, supportsDedicatedMasters = false) + numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) public class LocalExporterResourceIntegTests extends LocalExporterIntegTestCase { public LocalExporterResourceIntegTests() throws Exception { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java index d716e2e479f4a..c350b9a374ab2 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java @@ -20,8 +20,6 @@ import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.XPackClientPlugin; -import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.monitoring.client.MonitoringClient; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils; import org.elasticsearch.xpack.core.monitoring.test.MockPainlessScriptEngine; @@ -63,14 +61,6 @@ protected Settings nodeSettings(int nodeOrdinal) { return builder.build(); } - @Override - protected Settings transportClientSettings() { - return Settings.builder().put(super.transportClientSettings()) -// .put(XPackSettings.SECURITY_ENABLED.getKey(), false) - .put(XPackSettings.WATCHER_ENABLED.getKey(), false) - .build(); - } - @Override protected Collection> getMockPlugins() { Set> plugins = new HashSet<>(super.getMockPlugins()); @@ -85,12 +75,6 @@ protected Collection> nodePlugins() { MockIngestPlugin.class, CommonAnalysisPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(XPackClientPlugin.class, MockPainlessScriptEngine.TestPlugin.class, - MockIngestPlugin.class, CommonAnalysisPlugin.class); - } - protected MonitoringClient monitoringClient() { return randomBoolean() ? new XPackClient(client()).monitoring() : new MonitoringClient(client()); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java index 77f5b6c57b4c3..dc8f93a0cdcf7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java @@ -86,11 +86,6 @@ protected Collection> nodePlugins() { return Arrays.asList(LocalStateSecurity.class, CommonAnalysisPlugin.class, ParentJoinPlugin.class, InternalSettingsPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - @Override protected String configUsers() { final String usersPasswdHashed = new String(getFastStoredHashAlgoForTests().hash(USERS_PASSWD)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java index 5e83ef99563d9..c5ff8242b6bec 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.xpack.core.XPackSettings; @@ -63,8 +62,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -// The random usage of meta fields such as _timestamp add noise to the test, so disable random index templates: -@ESIntegTestCase.ClusterScope(transportClientRatio = 0.0) public class FieldLevelSecurityTests extends SecurityIntegTestCase { protected static final SecureString USERS_PASSWD = new SecureString("change_me".toCharArray()); @@ -75,11 +72,6 @@ protected Collection> nodePlugins() { InternalSettingsPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - @Override protected String configUsers() { final String usersPasswHashed = new String(getFastStoredHashAlgoForTests().hash(USERS_PASSWD)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java index 349bef3fc3152..87db72bcf0285 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java @@ -18,7 +18,7 @@ /** * Integration test that uses multiple data nodes to test that the shrink index api works with security. */ -@ClusterScope(minNumDataNodes = 2, transportClientRatio = 0.0) +@ClusterScope(minNumDataNodes = 2) public class ShrinkIndexWithSecurityTests extends SecurityIntegTestCase { @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java index d862d248976da..a3b75b188a278 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java @@ -74,7 +74,6 @@ * * @see SecuritySettingsSource */ -@ESIntegTestCase.ClusterScope(transportClientRatio = 0.0) public abstract class SecurityIntegTestCase extends ESIntegTestCase { private static SecuritySettingsSource SECURITY_DEFAULT_SETTINGS; @@ -84,7 +83,7 @@ public abstract class SecurityIntegTestCase extends ESIntegTestCase { * Settings used when the {@link org.elasticsearch.test.ESIntegTestCase.ClusterScope} is set to * {@link org.elasticsearch.test.ESIntegTestCase.Scope#SUITE} or {@link org.elasticsearch.test.ESIntegTestCase.Scope#TEST} * so that some of the configuration parameters can be overridden through test instance methods, similarly - * to how {@link #nodeSettings(int)} and {@link #transportClientSettings()} work. + * to how {@link #nodeSettings(int)} works. */ private static CustomSecuritySettingsSource customSecuritySettingsSource = null; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/transport/SecurityServerTransportServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/transport/SecurityServerTransportServiceTests.java index c8238ab49b146..0367fc4f74be7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/transport/SecurityServerTransportServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/transport/SecurityServerTransportServiceTests.java @@ -5,20 +5,11 @@ */ package org.elasticsearch.transport; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.security.transport.SecurityServerTransportInterceptor; // this class sits in org.elasticsearch.transport so that TransportService.requestHandlers is visible public class SecurityServerTransportServiceTests extends SecurityIntegTestCase { - @Override - protected Settings transportClientSettings() { - return Settings.builder() - .put(super.transportClientSettings()) - .put(XPackSettings.SECURITY_ENABLED.getKey(), true) - .build(); - } public void testSecurityServerTransportServiceWrapsAllHandlers() { for (TransportService transportService : internalCluster().getInstances(TransportService.class)) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java index b04b8c8ac3d36..f6e5552ddbc53 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java @@ -32,7 +32,7 @@ * templates when started within security, as this requires certain * system privileges */ -@ClusterScope(maxNumDataNodes = 1, scope = Scope.SUITE, numClientNodes = 0, transportClientRatio = 0.0) +@ClusterScope(maxNumDataNodes = 1, scope = Scope.SUITE, numClientNodes = 0) public class TemplateUpgraderTests extends SecurityIntegTestCase { public void testTemplatesWorkAsExpected() throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java index 866c52989af6f..23408f5668ec9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java @@ -29,7 +29,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -@ClusterScope(scope = TEST, numDataNodes = 1, transportClientRatio = 0.0) +@ClusterScope(scope = TEST, numDataNodes = 1) public class AuditTrailSettingsUpdateTests extends SecurityIntegTestCase { private static Settings startupFilterSettings; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java index 9f0b7863d30e7..2284a920eac11 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java @@ -25,7 +25,7 @@ import static org.hamcrest.Matchers.is; // no client nodes, no transport clients, as they all get rejected on network connections -@ClusterScope(scope = Scope.SUITE, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0) +@ClusterScope(scope = Scope.SUITE, numDataNodes = 0, numClientNodes = 0) public class IpFilteringIntegrationTests extends SecurityIntegTestCase { private static int randomClientPort; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java index 96922aa8822e4..65a5fb080cdb0 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java @@ -21,7 +21,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.is; -@ClusterScope(scope = TEST, supportsDedicatedMasters = false, numDataNodes = 1, transportClientRatio = 0.0) +@ClusterScope(scope = TEST, supportsDedicatedMasters = false, numDataNodes = 1) public class IpFilteringUpdateTests extends SecurityIntegTestCase { private static int randomClientPort; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/EllipticCurveSSLTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/EllipticCurveSSLTests.java index 71b3d7a1990b4..24e6a5bca009b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/EllipticCurveSSLTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/EllipticCurveSSLTests.java @@ -54,21 +54,6 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } - @Override - protected Settings transportClientSettings() { - final Path keyPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/prime256v1-key.pem"); - final Path certPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/prime256v1-cert.pem"); - return Settings.builder() - .put(super.transportClientSettings().filter(s -> s.startsWith("xpack.security.transport.ssl") == false)) - .put("xpack.security.transport.ssl.enabled", true) - .put("xpack.security.transport.ssl.key", keyPath) - .put("xpack.security.transport.ssl.certificate", certPath) - .put("xpack.security.transport.ssl.certificate_authorities", certPath) - // disable hostname verificate since these certs aren't setup for that - .put("xpack.security.transport.ssl.verification_mode", "certificate") - .build(); - } - @Override protected boolean transportSSLEnabled() { return true; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java index 9c540f559b688..e2df24643aec3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java @@ -49,7 +49,7 @@ * * @see RestrictedTrustManager */ -@ESIntegTestCase.ClusterScope(numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false, transportClientRatio = 0.0) +@ESIntegTestCase.ClusterScope(numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) @TestLogging("org.elasticsearch.xpack.ssl.RestrictedTrustManager:DEBUG") public class SSLTrustRestrictionsTests extends SecurityIntegTestCase { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/AbstractSqlIntegTestCase.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/AbstractSqlIntegTestCase.java index c741667ba9ebf..58af33172da6e 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/AbstractSqlIntegTestCase.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/AbstractSqlIntegTestCase.java @@ -16,7 +16,7 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; -@ESIntegTestCase.ClusterScope(scope = SUITE, numDataNodes = 0, numClientNodes = 0, maxNumDataNodes = 0, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = SUITE, numDataNodes = 0, numClientNodes = 0, maxNumDataNodes = 0) public abstract class AbstractSqlIntegTestCase extends ESIntegTestCase { @Override @@ -35,10 +35,5 @@ protected Settings nodeSettings(int nodeOrdinal) { protected Collection> nodePlugins() { return Collections.singletonList(LocalStateSQLXPackPlugin.class); } - - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlDisabledIT.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlDisabledIT.java index 51be147005173..335ab8bc84928 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlDisabledIT.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlDisabledIT.java @@ -31,14 +31,6 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } - @Override - protected Settings transportClientSettings() { - return Settings.builder() - .put(super.transportClientSettings()) - .put(XPackSettings.SQL_ENABLED.getKey(), randomBoolean()) - .build(); - } - public void testSqlAction() { Throwable throwable = expectThrows(Throwable.class, () -> new SqlQueryRequestBuilder(client(), SqlQueryAction.INSTANCE).query("SHOW tables").get()); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java index 8c44ba831b359..5b11b444db3ca 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java @@ -21,7 +21,6 @@ import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -44,7 +43,6 @@ import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.watcher.WatcherState; import org.elasticsearch.xpack.core.watcher.client.WatcherClient; import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; @@ -97,7 +95,7 @@ import static org.hamcrest.core.Is.is; import static org.hamcrest.core.IsNot.not; -@ClusterScope(scope = SUITE, numClientNodes = 0, transportClientRatio = 0, maxNumDataNodes = 3) +@ClusterScope(scope = SUITE, numClientNodes = 0, maxNumDataNodes = 3) public abstract class AbstractWatcherIntegrationTestCase extends ESIntegTestCase { public static final String WATCHER_LANG = Script.DEFAULT_SCRIPT_LANG; @@ -119,15 +117,6 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } - @Override - protected Settings transportClientSettings() { - return Settings.builder() - .put("client.transport.sniff", false) - .put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4) - .put(NetworkModule.HTTP_TYPE_KEY, SecurityField.NAME4) - .build(); - } - @Override protected Set excludeTemplates() { Set excludes = new HashSet<>(); @@ -152,11 +141,6 @@ protected Collection> nodePlugins() { return pluginTypes(); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - protected List> pluginTypes() { List> types = new ArrayList<>(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java index b03d75af113af..318d1f1a8b1a1 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java @@ -29,7 +29,7 @@ import static org.hamcrest.Matchers.is; @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/36782") -@ClusterScope(scope = SUITE, numClientNodes = 0, transportClientRatio = 0, maxNumDataNodes = 1, supportsDedicatedMasters = false) +@ClusterScope(scope = SUITE, numClientNodes = 0, maxNumDataNodes = 1, supportsDedicatedMasters = false) public class SingleNodeTests extends AbstractWatcherIntegrationTestCase { @Override From 9b800a5801606b0bb24313dc2a99ccb1e256ff0d Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Fri, 24 May 2019 13:39:29 -0400 Subject: [PATCH 101/224] [DOCS] Fix nested def list for Asciidoctor (#42353) --- .../settings/notification-settings.asciidoc | 69 ++++++++++--------- 1 file changed, 35 insertions(+), 34 deletions(-) diff --git a/docs/reference/settings/notification-settings.asciidoc b/docs/reference/settings/notification-settings.asciidoc index 400b55271f975..77f755b09e285 100644 --- a/docs/reference/settings/notification-settings.asciidoc +++ b/docs/reference/settings/notification-settings.asciidoc @@ -304,38 +304,39 @@ Specifies account information for sending notifications via PagerDuty. You can specify the following PagerDuty account attributes: + -- - `name`;; - A name for the PagerDuty account associated with the API key you - are using to access PagerDuty. Required. - - `secure_service_api_key` (<>);; - The https://developer.pagerduty.com/documentation/rest/authentication[ - PagerDuty API key] to use to access PagerDuty. Required. - - - `event_defaults`;; - Default values for {xpack-ref}/actions-pagerduty.html#pagerduty-event-trigger-incident-attributes[ - PagerDuty event attributes]. Optional. - + - `description`:: - A string that contains the default description for PagerDuty events. - If no default is configured, each PagerDuty action must specify a - `description`. - + - `incident_key`:: - A string that contains the default incident key to use when sending - PagerDuty events. - + - `client`:: - A string that specifies the default monitoring client. - + - `client_url`:: - The URL of the default monitoring client. - + - `event_type`:: - The default event type. Valid values: `trigger`,`resolve`, `acknowledge`. - + - `attach_payload`:: - Whether or not to provide the watch payload as context for - the event by default. Valid values: `true`, `false`. +`name`;; +A name for the PagerDuty account associated with the API key you +are using to access PagerDuty. Required. + +`secure_service_api_key` (<>);; +The https://developer.pagerduty.com/documentation/rest/authentication[ +PagerDuty API key] to use to access PagerDuty. Required. +-- ++ +`event_defaults`;; +Default values for {xpack-ref}/actions-pagerduty.html#pagerduty-event-trigger-incident-attributes[ +PagerDuty event attributes]. Optional. ++ +-- +`description`:: +A string that contains the default description for PagerDuty events. +If no default is configured, each PagerDuty action must specify a +`description`. + +`incident_key`:: +A string that contains the default incident key to use when sending +PagerDuty events. + +`client`:: +A string that specifies the default monitoring client. + +`client_url`:: +The URL of the default monitoring client. + +`event_type`:: +The default event type. Valid values: `trigger`,`resolve`, `acknowledge`. + +`attach_payload`:: +Whether or not to provide the watch payload as context for +the event by default. Valid values: `true`, `false`. -- \ No newline at end of file From 37be0a164f006d074d4675abc55b215828f27fa8 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Fri, 24 May 2019 18:44:51 +0100 Subject: [PATCH 102/224] [DOCS] Adding ML-specific prerequisites to setup docs (#42529) --- docs/reference/settings/ml-settings.asciidoc | 5 +++++ docs/reference/setup/install/windows.asciidoc | 8 ++++++++ docs/reference/setup/install/zip-windows.asciidoc | 8 ++++++++ 3 files changed, 21 insertions(+) diff --git a/docs/reference/settings/ml-settings.asciidoc b/docs/reference/settings/ml-settings.asciidoc index dbc11223f40ce..09fb8adad8523 100644 --- a/docs/reference/settings/ml-settings.asciidoc +++ b/docs/reference/settings/ml-settings.asciidoc @@ -8,6 +8,11 @@ You do not need to configure any settings to use {ml}. It is enabled by default. +IMPORTANT: {ml-cap} uses SSE4.2 instructions, so will only work on machines whose +CPUs https://en.wikipedia.org/wiki/SSE4#Supporting_CPUs[support] SSE4.2. If you +run {es} on older hardware you must disable {ml} (by setting `xpack.ml.enabled` +to `false`). + All of these settings can be added to the `elasticsearch.yml` configuration file. The dynamic settings can also be updated across a cluster with the <>. diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index 83d1251148c4a..e53e8d4122070 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -12,6 +12,14 @@ You can continue using the `.zip` approach if you prefer. include::license.asciidoc[] +NOTE: On Windows the Elasticsearch {ml} feature requires the Microsoft Universal +C Runtime library. This is built into Windows 10, Windows Server 2016 and more +recent versions of Windows. For older versions of Windows it can be installed +via Windows Update, or from a +https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows[separate download]. +If you cannot install the Microsoft Universal C Runtime library you can still +use the rest of Elasticsearch if you disable the {ml} feature. + The latest stable version of Elasticsearch can be found on the link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the diff --git a/docs/reference/setup/install/zip-windows.asciidoc b/docs/reference/setup/install/zip-windows.asciidoc index efed4b613c54b..669e3c72ea8b8 100644 --- a/docs/reference/setup/install/zip-windows.asciidoc +++ b/docs/reference/setup/install/zip-windows.asciidoc @@ -11,6 +11,14 @@ experience for Windows. You can continue using the `.zip` approach if you prefer include::license.asciidoc[] +NOTE: On Windows the Elasticsearch {ml} feature requires the Microsoft Universal +C Runtime library. This is built into Windows 10, Windows Server 2016 and more +recent versions of Windows. For older versions of Windows it can be installed +via Windows Update, or from a +https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows[separate download]. +If you cannot install the Microsoft Universal C Runtime library you can still +use the rest of Elasticsearch if you disable the {ml} feature. + The latest stable version of Elasticsearch can be found on the link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the From e87c8b22c6f2e256a72e417e1436b12be2ccab40 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 24 May 2019 10:53:24 -0700 Subject: [PATCH 103/224] Fix compilation This test was added while a PR removing transportClientRatio was in flight. --- .../cluster/state/TransportClusterStateActionDisruptionIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java index 0d51f647ee28c..3b2b7d997d708 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java @@ -46,7 +46,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.not; -@ESIntegTestCase.ClusterScope(numDataNodes = 0, scope = ESIntegTestCase.Scope.TEST, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(numDataNodes = 0, scope = ESIntegTestCase.Scope.TEST) public class TransportClusterStateActionDisruptionIT extends ESIntegTestCase { @Override From 44c15512ffc92c785874ab44876f0ec79056d523 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Fri, 24 May 2019 19:54:06 +0200 Subject: [PATCH 104/224] [ML-DataFrame] add support for fixed_interval, calendar_interval, remove interval (#42427) * add support for fixed_interval, calendar_interval, remove interval * adapt HLRC * checkstyle * add a hlrc to server test * adapt yml test * improve naming and doc * improve interface and add test code for hlrc to server * address review comments * repair merge conflict * fix date patterns * address review comments * remove assert for warning * improve exception message * use constants --- .../pivot/DateHistogramGroupSource.java | 231 ++++++++++++----- .../pivot/DateHistogramGroupSourceTests.java | 17 +- .../hlrc/DateHistogramGroupSourceTests.java | 79 ++++++ .../pivot/DateHistogramGroupSource.java | 235 ++++++++++++++---- .../pivot/DateHistogramGroupSourceTests.java | 10 +- .../integration/DataFrameIntegTestCase.java | 25 +- .../integration/DataFrameTransformIT.java | 8 +- .../integration/DataFramePivotRestIT.java | 19 +- .../test/data_frame/preview_transforms.yml | 15 +- 9 files changed, 476 insertions(+), 163 deletions(-) create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSource.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSource.java index 71e7e258c5c8b..d880bfd82140b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSource.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSource.java @@ -20,9 +20,9 @@ package org.elasticsearch.client.dataframe.transforms.pivot; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -31,7 +31,11 @@ import java.io.IOException; import java.time.ZoneId; import java.time.ZoneOffset; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; import java.util.Objects; +import java.util.Set; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; @@ -43,32 +47,164 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo private static final ParseField TIME_ZONE = new ParseField("time_zone"); private static final ParseField FORMAT = new ParseField("format"); + // From DateHistogramAggregationBuilder in core, transplanted and modified to a set + // so we don't need to import a dependency on the class + private static final Set DATE_FIELD_UNITS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( + "year", + "1y", + "quarter", + "1q", + "month", + "1M", + "week", + "1w", + "day", + "1d", + "hour", + "1h", + "minute", + "1m", + "second", + "1s"))); + + /** + * Interval can be specified in 2 ways: + * + * fixed_interval fixed intervals like 1h, 1m, 1d + * calendar_interval calendar aware intervals like 1M, 1Y, ... + * + * Note: data frames do not support the deprecated interval option + */ + public interface Interval extends ToXContentFragment { + String getName(); + DateHistogramInterval getInterval(); + } + + public static class FixedInterval implements Interval { + private static final String NAME = "fixed_interval"; + private final DateHistogramInterval interval; + + public FixedInterval(DateHistogramInterval interval) { + this.interval = interval; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public DateHistogramInterval getInterval() { + return interval; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(NAME); + interval.toXContent(builder, params); + return builder; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final FixedInterval that = (FixedInterval) other; + return Objects.equals(this.interval, that.interval); + } + + @Override + public int hashCode() { + return Objects.hash(interval); + } + } + + public static class CalendarInterval implements Interval { + private static final String NAME = "calendar_interval"; + private final DateHistogramInterval interval; + + public CalendarInterval(DateHistogramInterval interval) { + this.interval = interval; + if (DATE_FIELD_UNITS.contains(interval.toString()) == false) { + throw new IllegalArgumentException("The supplied interval [" + interval + "] could not be parsed " + + "as a calendar interval."); + } + } + + @Override + public String getName() { + return NAME; + } + + @Override + public DateHistogramInterval getInterval() { + return interval; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(NAME); + interval.toXContent(builder, params); + return builder; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final CalendarInterval that = (CalendarInterval) other; + return Objects.equals(this.interval, that.interval); + } + + @Override + public int hashCode() { + return Objects.hash(interval); + } + } + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("date_histogram_group_source", true, (args) -> { String field = (String)args[0]; - long interval = 0; - DateHistogramInterval dateHistogramInterval = null; - if (args[1] instanceof Long) { - interval = (Long)args[1]; + String fixedInterval = (String) args[1]; + String calendarInterval = (String) args[2]; + + Interval interval = null; + + if (fixedInterval != null && calendarInterval != null) { + throw new IllegalArgumentException("You must specify either fixed_interval or calendar_interval, found both"); + } else if (fixedInterval != null) { + interval = new FixedInterval(new DateHistogramInterval(fixedInterval)); + } else if (calendarInterval != null) { + interval = new CalendarInterval(new DateHistogramInterval(calendarInterval)); } else { - dateHistogramInterval = (DateHistogramInterval) args[1]; + throw new IllegalArgumentException("You must specify either fixed_interval or calendar_interval, found none"); } - ZoneId zoneId = (ZoneId) args[2]; - String format = (String) args[3]; - return new DateHistogramGroupSource(field, interval, dateHistogramInterval, format, zoneId); + + ZoneId zoneId = (ZoneId) args[3]; + String format = (String) args[4]; + return new DateHistogramGroupSource(field, interval, format, zoneId); }); static { PARSER.declareString(optionalConstructorArg(), FIELD); - PARSER.declareField(optionalConstructorArg(), p -> { - if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) { - return p.longValue(); - } else { - return new DateHistogramInterval(p.text()); - } - }, HistogramGroupSource.INTERVAL, ObjectParser.ValueType.LONG); + + PARSER.declareString(optionalConstructorArg(), new ParseField(FixedInterval.NAME)); + PARSER.declareString(optionalConstructorArg(), new ParseField(CalendarInterval.NAME)); + PARSER.declareField(optionalConstructorArg(), p -> { if (p.currentToken() == XContentParser.Token.VALUE_STRING) { return ZoneId.of(p.text()); @@ -84,15 +220,13 @@ public static DateHistogramGroupSource fromXContent(final XContentParser parser) return PARSER.apply(parser, null); } - private final long interval; - private final DateHistogramInterval dateHistogramInterval; + private final Interval interval; private final String format; private final ZoneId timeZone; - DateHistogramGroupSource(String field, long interval, DateHistogramInterval dateHistogramInterval, String format, ZoneId timeZone) { + DateHistogramGroupSource(String field, Interval interval, String format, ZoneId timeZone) { super(field); this.interval = interval; - this.dateHistogramInterval = dateHistogramInterval; this.format = format; this.timeZone = timeZone; } @@ -102,14 +236,10 @@ public Type getType() { return Type.DATE_HISTOGRAM; } - public long getInterval() { + public Interval getInterval() { return interval; } - public DateHistogramInterval getDateHistogramInterval() { - return dateHistogramInterval; - } - public String getFormat() { return format; } @@ -124,11 +254,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (field != null) { builder.field(FIELD.getPreferredName(), field); } - if (dateHistogramInterval == null) { - builder.field(HistogramGroupSource.INTERVAL.getPreferredName(), interval); - } else { - builder.field(HistogramGroupSource.INTERVAL.getPreferredName(), dateHistogramInterval.toString()); - } + interval.toXContent(builder, params); if (timeZone != null) { builder.field(TIME_ZONE.getPreferredName(), timeZone.toString()); } @@ -152,15 +278,14 @@ public boolean equals(Object other) { final DateHistogramGroupSource that = (DateHistogramGroupSource) other; return Objects.equals(this.field, that.field) && - Objects.equals(interval, that.interval) && - Objects.equals(dateHistogramInterval, that.dateHistogramInterval) && - Objects.equals(timeZone, that.timeZone) && - Objects.equals(format, that.format); + Objects.equals(this.interval, that.interval) && + Objects.equals(this.timeZone, that.timeZone) && + Objects.equals(this.format, that.format); } @Override public int hashCode() { - return Objects.hash(field, interval, dateHistogramInterval, timeZone, format); + return Objects.hash(field, interval, timeZone, format); } public static Builder builder() { @@ -170,8 +295,7 @@ public static Builder builder() { public static class Builder { private String field; - private long interval = 0; - private DateHistogramInterval dateHistogramInterval; + private Interval interval; private String format; private ZoneId timeZone; @@ -187,41 +311,14 @@ public Builder setField(String field) { /** * Set the interval for the DateHistogram grouping - * @param interval the time interval in milliseconds + * @param interval a fixed or calendar interval * @return the {@link Builder} with the interval set. */ - public Builder setInterval(long interval) { - if (interval < 1) { - throw new IllegalArgumentException("[interval] must be greater than or equal to 1."); - } + public Builder setInterval(Interval interval) { this.interval = interval; return this; } - /** - * Set the interval for the DateHistogram grouping - * @param timeValue The time value to use as the interval - * @return the {@link Builder} with the interval set. - */ - public Builder setInterval(TimeValue timeValue) { - return setInterval(timeValue.getMillis()); - } - - /** - * Sets the interval of the DateHistogram grouping - * - * If this DateHistogramInterval is set, it supersedes the #{@link DateHistogramGroupSource#getInterval()} - * @param dateHistogramInterval the DateHistogramInterval to set - * @return The {@link Builder} with the dateHistogramInterval set. - */ - public Builder setDateHistgramInterval(DateHistogramInterval dateHistogramInterval) { - if (dateHistogramInterval == null) { - throw new IllegalArgumentException("[dateHistogramInterval] must not be null"); - } - this.dateHistogramInterval = dateHistogramInterval; - return this; - } - /** * Set the optional String formatting for the time interval. * @param format The format of the output for the time interval key @@ -243,7 +340,7 @@ public Builder setTimeZone(ZoneId timeZone) { } public DateHistogramGroupSource build() { - return new DateHistogramGroupSource(field, interval, dateHistogramInterval, format, timeZone); + return new DateHistogramGroupSource(field, interval, format, timeZone); } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java index c6a160d9b8b8d..32605f5c286ad 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java @@ -27,15 +27,20 @@ public class DateHistogramGroupSourceTests extends AbstractXContentTestCase { + public static DateHistogramGroupSource.Interval randomDateHistogramInterval() { + if (randomBoolean()) { + return new DateHistogramGroupSource.FixedInterval(new DateHistogramInterval(randomPositiveTimeValue())); + } else { + return new DateHistogramGroupSource.CalendarInterval(new DateHistogramInterval(randomTimeValue(1, 1, "m", "h", "d", "w"))); + } + } + public static DateHistogramGroupSource randomDateHistogramGroupSource() { String field = randomAlphaOfLengthBetween(1, 20); - boolean setInterval = randomBoolean(); return new DateHistogramGroupSource(field, - setInterval ? randomLongBetween(1, 10_000) : 0, - setInterval ? null : randomFrom(DateHistogramInterval.days(10), - DateHistogramInterval.minutes(1), DateHistogramInterval.weeks(1)), - randomBoolean() ? randomAlphaOfLength(10) : null, - randomBoolean() ? randomZone() : null); + randomDateHistogramInterval(), + randomBoolean() ? randomAlphaOfLength(10) : null, + randomBoolean() ? randomZone() : null); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java new file mode 100644 index 0000000000000..dc31004607dcd --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms.pivot.hlrc; + +import org.elasticsearch.client.AbstractResponseTestCase; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.xpack.core.dataframe.transforms.pivot.DateHistogramGroupSource; + +import static org.hamcrest.Matchers.equalTo; + +public class DateHistogramGroupSourceTests extends AbstractResponseTestCase< + DateHistogramGroupSource, + org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource> { + + public static DateHistogramGroupSource randomDateHistogramGroupSource() { + String field = randomAlphaOfLengthBetween(1, 20); + DateHistogramGroupSource dateHistogramGroupSource; // = new DateHistogramGroupSource(field); + if (randomBoolean()) { + dateHistogramGroupSource = new DateHistogramGroupSource(field, new DateHistogramGroupSource.FixedInterval( + new DateHistogramInterval(randomPositiveTimeValue()))); + } else { + dateHistogramGroupSource = new DateHistogramGroupSource(field, new DateHistogramGroupSource.CalendarInterval( + new DateHistogramInterval(randomTimeValue(1,1, "m", "h", "d", "w")))); + } + + if (randomBoolean()) { + dateHistogramGroupSource.setTimeZone(randomZone()); + } + if (randomBoolean()) { + dateHistogramGroupSource.setFormat(randomAlphaOfLength(10)); + } + return dateHistogramGroupSource; + } + + @Override + protected DateHistogramGroupSource createServerTestInstance() { + return randomDateHistogramGroupSource(); + } + + @Override + protected org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource doParseToClientInstance(XContentParser parser) { + return org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource.fromXContent(parser); + } + + @Override + protected void assertInstances(DateHistogramGroupSource serverTestInstance, + org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource clientInstance) { + assertThat(serverTestInstance.getField(), equalTo(clientInstance.getField())); + assertThat(serverTestInstance.getFormat(), equalTo(clientInstance.getFormat())); + assertSameInterval(serverTestInstance.getInterval(), clientInstance.getInterval()); + assertThat(serverTestInstance.getTimeZone(), equalTo(clientInstance.getTimeZone())); + assertThat(serverTestInstance.getType().name(), equalTo(clientInstance.getType().name())); + } + + private void assertSameInterval(DateHistogramGroupSource.Interval serverTestInstance, + org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource.Interval clientInstance) { + assertEquals(serverTestInstance.getName(), clientInstance.getName()); + assertEquals(serverTestInstance.getInterval(), clientInstance.getInterval()); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSource.java index f4bf094235ae4..a3861ef65f648 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSource.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSource.java @@ -8,10 +8,13 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import java.io.IOException; @@ -19,28 +22,186 @@ import java.time.ZoneOffset; import java.util.Objects; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + public class DateHistogramGroupSource extends SingleGroupSource { + private static final int CALENDAR_INTERVAL_ID = 1; + private static final int FIXED_INTERVAL_ID = 0; + + /** + * Interval can be specified in 2 ways: + * + * fixed_interval fixed intervals like 1h, 1m, 1d + * calendar_interval calendar aware intervals like 1M, 1Y, ... + * + * Note: data frames do not support the deprecated interval option + */ + public interface Interval extends Writeable, ToXContentFragment { + String getName(); + DateHistogramInterval getInterval(); + byte getIntervalTypeId(); + } + + public static class FixedInterval implements Interval { + private static final String NAME = "fixed_interval"; + private final DateHistogramInterval interval; + + public FixedInterval(DateHistogramInterval interval) { + this.interval = interval; + } + + public FixedInterval(StreamInput in) throws IOException { + this.interval = new DateHistogramInterval(in); + } + + @Override + public String getName() { + return NAME; + } + + @Override + public DateHistogramInterval getInterval() { + return interval; + } + + @Override + public byte getIntervalTypeId() { + return FIXED_INTERVAL_ID; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(NAME); + interval.toXContent(builder, params); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + interval.writeTo(out); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final FixedInterval that = (FixedInterval) other; + return Objects.equals(this.interval, that.interval); + } + + @Override + public int hashCode() { + return Objects.hash(interval); + } + } + + public static class CalendarInterval implements Interval { + private static final String NAME = "calendar_interval"; + private final DateHistogramInterval interval; + + public CalendarInterval(DateHistogramInterval interval) { + this.interval = interval; + if (DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(interval.toString()) == null) { + throw new IllegalArgumentException("The supplied interval [" + interval + "] could not be parsed " + + "as a calendar interval."); + } + } + + public CalendarInterval(StreamInput in) throws IOException { + this.interval = new DateHistogramInterval(in); + } + + @Override + public String getName() { + return NAME; + } + + @Override + public DateHistogramInterval getInterval() { + return interval; + } + + @Override + public byte getIntervalTypeId() { + return CALENDAR_INTERVAL_ID; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(NAME); + interval.toXContent(builder, params); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + interval.writeTo(out); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final CalendarInterval that = (CalendarInterval) other; + return Objects.equals(this.interval, that.interval); + } + + @Override + public int hashCode() { + return Objects.hash(interval); + } + } + + private Interval readInterval(StreamInput in) throws IOException { + byte id = in.readByte(); + switch (id) { + case FIXED_INTERVAL_ID: + return new FixedInterval(in); + case CALENDAR_INTERVAL_ID: + return new CalendarInterval(in); + default: + throw new IllegalArgumentException("unknown interval type [" + id + "]"); + } + } + + private void writeInterval(Interval interval, StreamOutput out) throws IOException { + out.write(interval.getIntervalTypeId()); + interval.writeTo(out); + } + private static final String NAME = "data_frame_date_histogram_group"; private static final ParseField TIME_ZONE = new ParseField("time_zone"); private static final ParseField FORMAT = new ParseField("format"); private static final ConstructingObjectParser STRICT_PARSER = createParser(false); private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); - private long interval = 0; - private DateHistogramInterval dateHistogramInterval; + + private final Interval interval; private String format; private ZoneId timeZone; - public DateHistogramGroupSource(String field) { + public DateHistogramGroupSource(String field, Interval interval) { super(field); + this.interval = interval; } public DateHistogramGroupSource(StreamInput in) throws IOException { super(in); - this.interval = in.readLong(); - this.dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new); + this.interval = readInterval(in); this.timeZone = in.readOptionalZoneId(); this.format = in.readOptionalString(); } @@ -48,24 +209,28 @@ public DateHistogramGroupSource(StreamInput in) throws IOException { private static ConstructingObjectParser createParser(boolean lenient) { ConstructingObjectParser parser = new ConstructingObjectParser<>(NAME, lenient, (args) -> { String field = (String) args[0]; - return new DateHistogramGroupSource(field); - }); + String fixedInterval = (String) args[1]; + String calendarInterval = (String) args[2]; - declareValuesSourceFields(parser); + Interval interval = null; - parser.declareField((histogram, interval) -> { - if (interval instanceof Long) { - histogram.setInterval((long) interval); + if (fixedInterval != null && calendarInterval != null) { + throw new IllegalArgumentException("You must specify either fixed_interval or calendar_interval, found both"); + } else if (fixedInterval != null) { + interval = new FixedInterval(new DateHistogramInterval(fixedInterval)); + } else if (calendarInterval != null) { + interval = new CalendarInterval(new DateHistogramInterval(calendarInterval)); } else { - histogram.setDateHistogramInterval((DateHistogramInterval) interval); + throw new IllegalArgumentException("You must specify either fixed_interval or calendar_interval, found none"); } - }, p -> { - if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) { - return p.longValue(); - } else { - return new DateHistogramInterval(p.text()); - } - }, HistogramGroupSource.INTERVAL, ObjectParser.ValueType.LONG); + + return new DateHistogramGroupSource(field, interval); + }); + + declareValuesSourceFields(parser); + + parser.declareString(optionalConstructorArg(), new ParseField(FixedInterval.NAME)); + parser.declareString(optionalConstructorArg(), new ParseField(CalendarInterval.NAME)); parser.declareField(DateHistogramGroupSource::setTimeZone, p -> { if (p.currentToken() == XContentParser.Token.VALUE_STRING) { @@ -88,28 +253,10 @@ public Type getType() { return Type.DATE_HISTOGRAM; } - public long getInterval() { + public Interval getInterval() { return interval; } - public void setInterval(long interval) { - if (interval < 1) { - throw new IllegalArgumentException("[interval] must be greater than or equal to 1."); - } - this.interval = interval; - } - - public DateHistogramInterval getDateHistogramInterval() { - return dateHistogramInterval; - } - - public void setDateHistogramInterval(DateHistogramInterval dateHistogramInterval) { - if (dateHistogramInterval == null) { - throw new IllegalArgumentException("[dateHistogramInterval] must not be null"); - } - this.dateHistogramInterval = dateHistogramInterval; - } - public String getFormat() { return format; } @@ -129,8 +276,7 @@ public void setTimeZone(ZoneId timeZone) { @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(field); - out.writeLong(interval); - out.writeOptionalWriteable(dateHistogramInterval); + writeInterval(interval, out); out.writeOptionalZoneId(timeZone); out.writeOptionalString(format); } @@ -141,11 +287,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (field != null) { builder.field(FIELD.getPreferredName(), field); } - if (dateHistogramInterval == null) { - builder.field(HistogramGroupSource.INTERVAL.getPreferredName(), interval); - } else { - builder.field(HistogramGroupSource.INTERVAL.getPreferredName(), dateHistogramInterval.toString()); - } + interval.toXContent(builder, params); if (timeZone != null) { builder.field(TIME_ZONE.getPreferredName(), timeZone.toString()); } @@ -170,13 +312,12 @@ public boolean equals(Object other) { return Objects.equals(this.field, that.field) && Objects.equals(interval, that.interval) && - Objects.equals(dateHistogramInterval, that.dateHistogramInterval) && Objects.equals(timeZone, that.timeZone) && Objects.equals(format, that.format); } @Override public int hashCode() { - return Objects.hash(field, interval, dateHistogramInterval, timeZone, format); + return Objects.hash(field, interval, timeZone, format); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java index e9d989d5e5f38..7ce0374331323 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java @@ -17,13 +17,15 @@ public class DateHistogramGroupSourceTests extends AbstractSerializingTestCase groups = new HashMap<>(); - groups.put("by-day", createDateHistogramGroupSource("timestamp", DateHistogramInterval.DAY, null, null)); + groups.put("by-day", createDateHistogramGroupSourceWithCalendarInterval("timestamp", DateHistogramInterval.DAY, null, null)); groups.put("by-user", TermsGroupSource.builder().setField("user_id").build()); groups.put("by-business", TermsGroupSource.builder().setField("business_id").build()); @@ -48,10 +48,8 @@ public void testDataFrameTransformCrud() throws Exception { "reviews-by-user-business-day", REVIEWS_INDEX_NAME); - final RequestOptions options = - expectWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); - assertTrue(putDataFrameTransform(config, options).isAcknowledged()); - assertTrue(startDataFrameTransform(config.getId(), options).isStarted()); + assertTrue(putDataFrameTransform(config, RequestOptions.DEFAULT).isAcknowledged()); + assertTrue(startDataFrameTransform(config.getId(), RequestOptions.DEFAULT).isStarted()); waitUntilCheckpoint(config.getId(), 1L); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index 770eaec7bd141..22586a7b37d27 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -216,7 +216,7 @@ public void testDateHistogramPivot() throws Exception { + " \"group_by\": {" + " \"by_hr\": {" + " \"date_histogram\": {" - + " \"interval\": \"1h\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-DD_HH\"" + + " \"fixed_interval\": \"1h\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-dd_HH\"" + " } } }," + " \"aggregations\": {" + " \"avg_rating\": {" @@ -226,14 +226,11 @@ public void testDateHistogramPivot() throws Exception { + "}"; createDataframeTransformRequest.setJsonEntity(config); - createDataframeTransformRequest.setOptions(expectWarnings("[interval] on [date_histogram] is deprecated, " + - "use [fixed_interval] or [calendar_interval] in the future.")); Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS, - "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); + startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); assertTrue(indexExists(dataFrameIndex)); Map indexStats = getAsMap(dataFrameIndex + "/_stats"); @@ -253,7 +250,7 @@ public void testPreviewTransform() throws Exception { config += " \"pivot\": {" + " \"group_by\": {" + " \"reviewer\": {\"terms\": { \"field\": \"user_id\" }}," - + " \"by_day\": {\"date_histogram\": {\"interval\": \"1d\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-DD\"}}}," + + " \"by_day\": {\"date_histogram\": {\"fixed_interval\": \"1d\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-dd\"}}}," + " \"aggregations\": {" + " \"avg_rating\": {" + " \"avg\": {" @@ -261,8 +258,6 @@ public void testPreviewTransform() throws Exception { + " } } } }" + "}"; createPreviewRequest.setJsonEntity(config); - createPreviewRequest.setOptions(expectWarnings("[interval] on [date_histogram] is deprecated, " + - "use [fixed_interval] or [calendar_interval] in the future.")); Map previewDataframeResponse = entityAsMap(client().performRequest(createPreviewRequest)); List> preview = (List>)previewDataframeResponse.get("preview"); @@ -290,7 +285,7 @@ public void testPivotWithMaxOnDateField() throws Exception { config +=" \"pivot\": { \n" + " \"group_by\": {\n" + " \"by_day\": {\"date_histogram\": {\n" + - " \"interval\": \"1d\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-DD\"\n" + + " \"fixed_interval\": \"1d\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-dd\"\n" + " }}\n" + " },\n" + " \n" + @@ -305,13 +300,11 @@ public void testPivotWithMaxOnDateField() throws Exception { + "}"; createDataframeTransformRequest.setJsonEntity(config); - createDataframeTransformRequest.setOptions(expectWarnings("[interval] on [date_histogram] is deprecated, " + - "use [fixed_interval] or [calendar_interval] in the future.")); + Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS, - "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); + startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); assertTrue(indexExists(dataFrameIndex)); // we expect 21 documents as there shall be 21 days worth of docs diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml index 1d4a190b24e14..5e58048b3bf0f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml @@ -67,12 +67,7 @@ setup: --- "Test preview transform": - - skip: - reason: date histo interval is deprecated - features: "warnings" - do: - warnings: - - "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future." data_frame.preview_data_frame_transform: body: > { @@ -80,7 +75,7 @@ setup: "pivot": { "group_by": { "airline": {"terms": {"field": "airline"}}, - "by-hour": {"date_histogram": {"interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, + "by-hour": {"date_histogram": {"fixed_interval": "1h", "field": "time", "format": "yyyy-MM-dd HH"}}}, "aggs": { "avg_response": {"avg": {"field": "responsetime"}}, "time.max": {"max": {"field": "time"}}, @@ -89,17 +84,17 @@ setup: } } - match: { preview.0.airline: foo } - - match: { preview.0.by-hour: "2017-02-49 00" } + - match: { preview.0.by-hour: "2017-02-18 00" } - match: { preview.0.avg_response: 1.0 } - match: { preview.0.time.max: "2017-02-18T00:30:00.000Z" } - match: { preview.0.time.min: "2017-02-18T00:00:00.000Z" } - match: { preview.1.airline: bar } - - match: { preview.1.by-hour: "2017-02-49 01" } + - match: { preview.1.by-hour: "2017-02-18 01" } - match: { preview.1.avg_response: 42.0 } - match: { preview.1.time.max: "2017-02-18T01:00:00.000Z" } - match: { preview.1.time.min: "2017-02-18T01:00:00.000Z" } - match: { preview.2.airline: foo } - - match: { preview.2.by-hour: "2017-02-49 01" } + - match: { preview.2.by-hour: "2017-02-18 01" } - match: { preview.2.avg_response: 42.0 } - match: { preview.2.time.max: "2017-02-18T01:01:00.000Z" } - match: { preview.2.time.min: "2017-02-18T01:01:00.000Z" } @@ -128,7 +123,7 @@ setup: "pivot": { "group_by": { "airline": {"terms": {"field": "airline"}}, - "by-hour": {"date_histogram": {"interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, + "by-hour": {"date_histogram": {"fixed_interval": "1h", "field": "time", "format": "yyyy-MM-dd HH"}}}, "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} } } From 8f030336353dc76e83ff6ddd069cae05f8a2e8d5 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Fri, 24 May 2019 15:03:11 -0400 Subject: [PATCH 105/224] [DOCS] Move callouts to end of line for Asciidoctor migration (#42356) --- docs/reference/sql/functions/geo.asciidoc | 29 +++++++++++++++++------ 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/docs/reference/sql/functions/geo.asciidoc b/docs/reference/sql/functions/geo.asciidoc index 112ddfffce6ed..72f69af85529f 100644 --- a/docs/reference/sql/functions/geo.asciidoc +++ b/docs/reference/sql/functions/geo.asciidoc @@ -27,7 +27,9 @@ interchangeably with the following exceptions: .Synopsis: [source, sql] -------------------------------------------------- -ST_AsWKT(geometry<1>) +ST_AsWKT( + geometry <1> +) -------------------------------------------------- *Input*: @@ -52,7 +54,9 @@ include-tagged::{sql-specs}/docs/geo.csv-spec[aswkt] .Synopsis: [source, sql] -------------------------------------------------- -ST_WKTToSQL(string<1>) +ST_WKTToSQL( + string <1> +) -------------------------------------------------- *Input*: @@ -78,7 +82,9 @@ include-tagged::{sql-specs}/docs/geo.csv-spec[aswkt] .Synopsis: [source, sql] -------------------------------------------------- -ST_GeometryType(geometry<1>) +ST_GeometryType( + geometry <1> +) -------------------------------------------------- *Input*: @@ -102,7 +108,9 @@ include-tagged::{sql-specs}/docs/geo.csv-spec[geometrytype] .Synopsis: [source, sql] -------------------------------------------------- -ST_X(geometry<1>) +ST_X( + geometry <1> +) -------------------------------------------------- *Input*: @@ -126,7 +134,9 @@ include-tagged::{sql-specs}/docs/geo.csv-spec[x] .Synopsis: [source, sql] -------------------------------------------------- -ST_Y(geometry<1>) +ST_Y( + geometry <1> +) -------------------------------------------------- *Input*: @@ -150,7 +160,9 @@ include-tagged::{sql-specs}/docs/geo.csv-spec[y] .Synopsis: [source, sql] -------------------------------------------------- -ST_Z(geometry<1>) +ST_Z( + geometry <1> +) -------------------------------------------------- *Input*: @@ -174,7 +186,10 @@ include-tagged::{sql-specs}/docs/geo.csv-spec[z] .Synopsis: [source, sql] -------------------------------------------------- -ST_Distance(geometry<1>, geometry<2>) +ST_Distance( + geometry, <1> + geometry <2> +) -------------------------------------------------- *Input*: From 5eb38ec51782651a2ed88f96d514ce6b3f253bb7 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Fri, 24 May 2019 20:04:05 +0100 Subject: [PATCH 106/224] [ML] Fix possible race condition when closing an opening job (#42506) This change fixes a race condition that would result in an in-memory data structure becoming out-of-sync with persistent tasks in cluster state. If repeated often enough this could result in it being impossible to open any ML jobs on the affected node, as the master node would think the node had capacity to open another job but the chosen node would error during the open sequence due to its in-memory data structure being full. The race could be triggered by opening a job and then closing it a tiny fraction of a second later. It is unlikely a user of the UI could open and close the job that fast, but a script or program calling the REST API could. The nasty thing is, from the externally observable states and stats everything would appear to be fine - the fast open then close sequence would appear to leave the job in the closed state. It's only later that the leftovers in the in-memory data structure might build up and cause a problem. --- .../autodetect/AutodetectProcessManager.java | 34 +++++++++++-------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index 0fd8d1b5b7411..cbcaf54b46b9e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -401,16 +401,12 @@ protected void doRun() { logger.debug("Aborted opening job [{}] as it has been closed", jobId); return; } - if (processContext.getState() != ProcessContext.ProcessStateName.NOT_RUNNING) { - logger.debug("Cannot open job [{}] when its state is [{}]", - jobId, processContext.getState().getClass().getName()); - return; - } try { - createProcessAndSetRunning(processContext, job, params, closeHandler); - processContext.getAutodetectCommunicator().restoreState(params.modelSnapshot()); - setJobState(jobTask, JobState.OPENED); + if (createProcessAndSetRunning(processContext, job, params, closeHandler)) { + processContext.getAutodetectCommunicator().restoreState(params.modelSnapshot()); + setJobState(jobTask, JobState.OPENED); + } } catch (Exception e1) { // No need to log here as the persistent task framework will log it try { @@ -447,19 +443,25 @@ protected void doRun() { ElasticsearchMappings::resultsMapping, client, clusterState, resultsMappingUpdateHandler); } - private void createProcessAndSetRunning(ProcessContext processContext, - Job job, - AutodetectParams params, - BiConsumer handler) throws IOException { + private boolean createProcessAndSetRunning(ProcessContext processContext, + Job job, + AutodetectParams params, + BiConsumer handler) throws IOException { // At this point we lock the process context until the process has been started. // The reason behind this is to ensure closing the job does not happen before // the process is started as that can result to the job getting seemingly closed // but the actual process is hanging alive. processContext.tryLock(); try { + if (processContext.getState() != ProcessContext.ProcessStateName.NOT_RUNNING) { + logger.debug("Cannot open job [{}] when its state is [{}]", + job.getId(), processContext.getState().getClass().getName()); + return false; + } AutodetectCommunicator communicator = create(processContext.getJobTask(), job, params, handler); communicator.writeHeader(); processContext.setRunning(communicator); + return true; } finally { // Now that the process is running and we have updated its state we can unlock. // It is important to unlock before we initialize the communicator (ie. load the model state) @@ -592,6 +594,8 @@ public void closeJob(JobTask jobTask, boolean restart, String reason) { try { if (processContext.setDying() == false) { logger.debug("Cannot close job [{}] as it has been marked as dying", jobId); + // The only way we can get here is if 2 close requests are made very close together. + // The other close has done the work so it's safe to return here without doing anything. return; } @@ -605,10 +609,10 @@ public void closeJob(JobTask jobTask, boolean restart, String reason) { if (communicator == null) { logger.debug("Job [{}] is being closed before its process is started", jobId); jobTask.markAsCompleted(); - return; + } else { + communicator.close(restart, reason); } - communicator.close(restart, reason); processByAllocation.remove(allocationId); } catch (Exception e) { // If the close failed because the process has explicitly been killed by us then just pass on that exception @@ -628,7 +632,7 @@ public void closeJob(JobTask jobTask, boolean restart, String reason) { try { nativeStorageProvider.cleanupLocalTmpStorage(jobTask.getDescription()); } catch (IOException e) { - logger.error(new ParameterizedMessage("[{}]Failed to delete temporary files", jobId), e); + logger.error(new ParameterizedMessage("[{}] Failed to delete temporary files", jobId), e); } } From 5720a329ad9bd38fa0ce330b1642d0ffe42538ea Mon Sep 17 00:00:00 2001 From: David Roberts Date: Fri, 24 May 2019 20:05:15 +0100 Subject: [PATCH 107/224] [ML] Use map and filter instead of flatMap in find_file_structure (#42534) Using map and filter avoids the garbage from all the Stream.of calls that flatMap necessitated. Performance is better when there are masses of fields. --- .../xpack/ml/filestructurefinder/FileStructureUtils.java | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java index 24a29f11e4355..90cc74c8d259c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java @@ -187,11 +187,8 @@ static Tuple, SortedMap> guessMapp for (String fieldName : uniqueFieldNames) { - List fieldValues = sampleRecords.stream().flatMap(record -> { - Object fieldValue = record.get(fieldName); - return (fieldValue == null) ? Stream.empty() : Stream.of(fieldValue); - } - ).collect(Collectors.toList()); + List fieldValues = sampleRecords.stream().map(record -> record.get(fieldName)).filter(fieldValue -> fieldValue != null) + .collect(Collectors.toList()); Tuple, FieldStats> mappingAndFieldStats = guessMappingAndCalculateFieldStats(explanation, fieldName, fieldValues, timeoutChecker); From eda3da31dace56623e84de78df183483ee2c8309 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 24 May 2019 12:10:32 -0700 Subject: [PATCH 108/224] Improve build configuration time (#41392) This commit moves the expensive configuration-time calculation of Java runtime version information to runtime instead and also makes that work cacheable. This roughly equates to about a 50% reduction in project configuration time. --- build.gradle | 9 +- buildSrc/build.gradle | 34 +- .../elasticsearch/gradle/BuildPlugin.groovy | 737 +++++++----------- .../gradle/plugin/PluginBuildPlugin.groovy | 5 +- .../gradle/precommit/PrecommitTasks.groovy | 31 +- .../gradle/test/ClusterFormationTasks.groovy | 27 +- .../gradle/test/RestIntegTestTask.groovy | 41 +- .../test/StandaloneRestTestPlugin.groovy | 8 +- .../elasticsearch/gradle/JdkJarHellCheck.java | 0 .../gradle/LazyFileOutputStream.java | 0 .../org/elasticsearch/gradle/LoggedExec.java | 0 .../org/elasticsearch/gradle/Version.java | 0 .../gradle/VersionProperties.java | 0 .../info/GenerateGlobalBuildInfoTask.java | 276 +++++++ .../gradle/info/GlobalBuildInfoPlugin.java | 198 +++++ .../gradle/info/GlobalInfoExtension.java | 12 + .../elasticsearch/gradle/info/JavaHome.java | 35 + .../gradle/info/PrintGlobalBuildInfoTask.java | 84 ++ .../gradle/precommit/ThirdPartyAuditTask.java | 11 +- ...elasticsearch.global-build-info.properties | 1 + distribution/tools/plugin-cli/build.gradle | 6 +- modules/transport-netty4/build.gradle | 14 +- plugins/ingest-attachment/build.gradle | 10 +- plugins/transport-nio/build.gradle | 14 +- server/build.gradle | 6 +- x-pack/plugin/ccr/qa/restart/build.gradle | 2 +- x-pack/plugin/security/cli/build.gradle | 26 +- .../sql/qa/security/with-ssl/build.gradle | 14 +- x-pack/qa/full-cluster-restart/build.gradle | 32 +- .../reindex-tests-with-security/build.gradle | 6 +- x-pack/qa/rolling-upgrade/build.gradle | 32 +- 31 files changed, 1022 insertions(+), 649 deletions(-) rename buildSrc/src/main/{minimumRuntime => java}/org/elasticsearch/gradle/JdkJarHellCheck.java (100%) rename buildSrc/src/main/{minimumRuntime => java}/org/elasticsearch/gradle/LazyFileOutputStream.java (100%) rename buildSrc/src/main/{minimumRuntime => java}/org/elasticsearch/gradle/LoggedExec.java (100%) rename buildSrc/src/main/{minimumRuntime => java}/org/elasticsearch/gradle/Version.java (100%) rename buildSrc/src/main/{minimumRuntime => java}/org/elasticsearch/gradle/VersionProperties.java (100%) create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/info/GenerateGlobalBuildInfoTask.java create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalInfoExtension.java create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/info/JavaHome.java create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/info/PrintGlobalBuildInfoTask.java create mode 100644 buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.global-build-info.properties diff --git a/build.gradle b/build.gradle index 7de02b814da86..1de3f919d9c49 100644 --- a/build.gradle +++ b/build.gradle @@ -31,6 +31,7 @@ import org.gradle.plugins.ide.eclipse.model.SourceFolder plugins { id 'com.gradle.build-scan' version '2.2.1' id 'base' + id 'elasticsearch.global-build-info' } if (Boolean.valueOf(project.findProperty('org.elasticsearch.acceptScanTOS') ?: "false")) { buildScan { @@ -262,7 +263,7 @@ allprojects { } project.afterEvaluate { - configurations.all { + configurations.matching { it.canBeResolved }.all { resolutionStrategy.dependencySubstitution { DependencySubstitutions subs -> projectSubstitutions.each { k,v -> subs.substitute(subs.module(k)).with(subs.project(v)) @@ -336,7 +337,7 @@ gradle.projectsEvaluated { if (tasks.findByPath('test') != null && tasks.findByPath('integTest') != null) { integTest.mustRunAfter test } - configurations.all { Configuration configuration -> + configurations.matching { it.canBeResolved }.all { Configuration configuration -> dependencies.all { Dependency dep -> Project upstreamProject = dependencyToProject(dep) if (upstreamProject != null) { @@ -617,7 +618,3 @@ allprojects { } } } - - - - diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 737bbca4cafb9..f239427330c58 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -69,37 +69,10 @@ processResources { if (JavaVersion.current() < JavaVersion.VERSION_11) { throw new GradleException('At least Java 11 is required to build elasticsearch gradle tools') } -// Gradle 4.10 does not support setting this to 11 yet -targetCompatibility = "10" -sourceCompatibility = "10" - -// We have a few classes that need to be compiled for older java versions because these are used to run checks against -// those -sourceSets { - minimumRuntime { - // We only want Java here, but the Groovy doesn't configure javadoc correctly if we don't define this as groovy - groovy { - srcDirs = ['src/main/minimumRuntime'] - } - } -} -compileMinimumRuntimeGroovy { - targetCompatibility = 8 - sourceCompatibility = 8 -} -dependencies { - if (project.ext.has("isEclipse") == false || project.ext.isEclipse == false) { - // eclipse is confused if this is set explicitly - compile sourceSets.minimumRuntime.output - } - minimumRuntimeCompile "junit:junit:${props.getProperty('junit')}" - minimumRuntimeCompile localGroovy() - minimumRuntimeCompile gradleApi() -} -jar { - from sourceSets.minimumRuntime.output -} +// Keep compatibility with Java 8 for external users of build-tools that haven't migrated to Java 11 +targetCompatibility = '8' +sourceCompatibility = '8' /***************************************************************************** * Dependencies used by the entire build * @@ -164,7 +137,6 @@ if (project != rootProject) { dependenciesInfo.enabled = false forbiddenApisMain.enabled = false forbiddenApisTest.enabled = false - forbiddenApisMinimumRuntime.enabled = false jarHell.enabled = false thirdPartyAudit.enabled = false diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 51300ffc628c9..3a058ca9310df 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -19,15 +19,24 @@ package org.elasticsearch.gradle import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin +import com.github.jengelman.gradle.plugins.shadow.tasks.ShadowJar +import groovy.transform.CompileDynamic +import groovy.transform.CompileStatic import org.apache.commons.io.IOUtils -import org.apache.tools.ant.taskdefs.condition.Os import org.eclipse.jgit.lib.Constants import org.eclipse.jgit.lib.RepositoryBuilder +import org.elasticsearch.gradle.info.GlobalBuildInfoPlugin +import org.elasticsearch.gradle.info.GlobalInfoExtension +import org.elasticsearch.gradle.info.JavaHome +import org.elasticsearch.gradle.precommit.DependencyLicensesTask import org.elasticsearch.gradle.precommit.PrecommitTasks import org.elasticsearch.gradle.test.ErrorReportingTestListener +import org.elasticsearch.gradle.testclusters.ElasticsearchCluster +import org.gradle.api.Action import org.gradle.api.GradleException import org.gradle.api.InvalidUserDataException import org.gradle.api.JavaVersion +import org.gradle.api.NamedDomainObjectContainer import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.Task @@ -41,22 +50,34 @@ import org.gradle.api.artifacts.ResolvedArtifact import org.gradle.api.artifacts.dsl.RepositoryHandler import org.gradle.api.artifacts.repositories.ArtifactRepository import org.gradle.api.artifacts.repositories.IvyArtifactRepository +import org.gradle.api.artifacts.repositories.IvyPatternRepositoryLayout import org.gradle.api.artifacts.repositories.MavenArtifactRepository import org.gradle.api.credentials.HttpHeaderCredentials import org.gradle.api.execution.TaskActionListener import org.gradle.api.execution.TaskExecutionGraph +import org.gradle.api.file.CopySpec +import org.gradle.api.plugins.BasePlugin +import org.gradle.api.plugins.BasePluginConvention +import org.gradle.api.plugins.ExtraPropertiesExtension import org.gradle.api.plugins.JavaPlugin +import org.gradle.api.plugins.JavaPluginExtension +import org.gradle.api.publish.PublishingExtension import org.gradle.api.publish.maven.MavenPublication import org.gradle.api.publish.maven.plugins.MavenPublishPlugin import org.gradle.api.publish.maven.tasks.GenerateMavenPom import org.gradle.api.tasks.SourceSet +import org.gradle.api.tasks.SourceSetContainer import org.gradle.api.tasks.bundling.Jar import org.gradle.api.tasks.compile.GroovyCompile import org.gradle.api.tasks.compile.JavaCompile import org.gradle.api.tasks.javadoc.Javadoc import org.gradle.api.tasks.testing.Test +import org.gradle.api.tasks.testing.logging.TestLoggingContainer import org.gradle.authentication.http.HttpHeaderAuthentication +import org.gradle.external.javadoc.CoreJavadocOptions import org.gradle.internal.jvm.Jvm +import org.gradle.language.base.plugins.LifecycleBasePlugin +import org.gradle.process.CommandLineArgumentProvider import org.gradle.process.ExecResult import org.gradle.process.ExecSpec import org.gradle.util.GradleVersion @@ -64,18 +85,19 @@ import org.gradle.util.GradleVersion import java.nio.charset.StandardCharsets import java.time.ZoneOffset import java.time.ZonedDateTime -import java.util.concurrent.ExecutorService -import java.util.concurrent.Executors -import java.util.concurrent.Future import java.util.regex.Matcher /** * Encapsulates build configuration for elasticsearch projects. */ +@CompileStatic class BuildPlugin implements Plugin { @Override void apply(Project project) { + // make sure the global build info plugin is applied to the root project + project.rootProject.pluginManager.apply(GlobalBuildInfoPlugin) + if (project.pluginManager.hasPlugin('elasticsearch.standalone-rest-test')) { throw new InvalidUserDataException('elasticsearch.standalone-test, ' + 'elasticsearch.standalone-rest-test, and elasticsearch.build ' @@ -105,9 +127,8 @@ class BuildPlugin implements Plugin { project.getTasks().create("buildResources", ExportElasticsearchBuildResourcesTask) setupSeed(project) - globalBuildInfo(project) configureRepositories(project) - project.ext.versions = VersionProperties.versions + project.extensions.getByType(ExtraPropertiesExtension).set('versions', VersionProperties.versions) configureSourceSets(project) configureCompile(project) configureJavadoc(project) @@ -118,175 +139,37 @@ class BuildPlugin implements Plugin { configureDependenciesInfo(project) // Common config when running with a FIPS-140 runtime JVM - // Need to do it here to support external plugins - if (project.ext.inFipsJvm) { - project.tasks.withType(Test) { - systemProperty 'javax.net.ssl.trustStorePassword', 'password' - systemProperty 'javax.net.ssl.keyStorePassword', 'password' - } - project.pluginManager.withPlugin("elasticsearch.testclusters") { - project.testClusters.all { - systemProperty 'javax.net.ssl.trustStorePassword', 'password' - systemProperty 'javax.net.ssl.keyStorePassword', 'password' - } - } - } - - } - - - - /** Performs checks on the build environment and prints information about the build environment. */ - static void globalBuildInfo(Project project) { - if (project.rootProject.ext.has('buildChecksDone') == false) { - JavaVersion minimumRuntimeVersion = JavaVersion.toVersion( - BuildPlugin.class.getClassLoader().getResourceAsStream("minimumRuntimeVersion").text.trim() - ) - JavaVersion minimumCompilerVersion = JavaVersion.toVersion( - BuildPlugin.class.getClassLoader().getResourceAsStream("minimumCompilerVersion").text.trim() - ) - String compilerJavaHome = findCompilerJavaHome() - String runtimeJavaHome = findRuntimeJavaHome(compilerJavaHome) - File gradleJavaHome = Jvm.current().javaHome - - String javaVendor = System.getProperty('java.vendor') - String gradleJavaVersion = System.getProperty('java.version') - String gradleJavaVersionDetails = "${javaVendor} ${gradleJavaVersion}" + - " [${System.getProperty('java.vm.name')} ${System.getProperty('java.vm.version')}]" - - String compilerJavaVersionDetails = gradleJavaVersionDetails - JavaVersion compilerJavaVersionEnum = JavaVersion.current() - if (new File(compilerJavaHome).canonicalPath != gradleJavaHome.canonicalPath) { - compilerJavaVersionDetails = findJavaVersionDetails(project, compilerJavaHome) - compilerJavaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, compilerJavaHome)) - } - - String runtimeJavaVersionDetails = gradleJavaVersionDetails - JavaVersion runtimeJavaVersionEnum = JavaVersion.current() - if (new File(runtimeJavaHome).canonicalPath != gradleJavaHome.canonicalPath) { - runtimeJavaVersionDetails = findJavaVersionDetails(project, runtimeJavaHome) - runtimeJavaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, runtimeJavaHome)) - } - - boolean inFipsJvm = false - if (new File(runtimeJavaHome).canonicalPath != gradleJavaHome.canonicalPath) { - // We don't expect Gradle to be running in a FIPS JVM - String inFipsJvmScript = 'print(java.security.Security.getProviders()[0].name.toLowerCase().contains("fips"));' - inFipsJvm = Boolean.parseBoolean(runJavaAsScript(project, runtimeJavaHome, inFipsJvmScript)) - } - - // Build debugging info - println '=======================================' - println 'Elasticsearch Build Hamster says Hello!' - println " Gradle Version : ${project.gradle.gradleVersion}" - println " OS Info : ${System.getProperty('os.name')} ${System.getProperty('os.version')} (${System.getProperty('os.arch')})" - if (gradleJavaVersionDetails != compilerJavaVersionDetails || gradleJavaVersionDetails != runtimeJavaVersionDetails) { - println " Compiler JDK Version : ${compilerJavaVersionEnum} (${compilerJavaVersionDetails})" - println " Compiler java.home : ${compilerJavaHome}" - println " Runtime JDK Version : ${runtimeJavaVersionEnum} (${runtimeJavaVersionDetails})" - println " Runtime java.home : ${runtimeJavaHome}" - println " Gradle JDK Version : ${JavaVersion.toVersion(gradleJavaVersion)} (${gradleJavaVersionDetails})" - println " Gradle java.home : ${gradleJavaHome}" - } else { - println " JDK Version : ${JavaVersion.toVersion(gradleJavaVersion)} (${gradleJavaVersionDetails})" - println " JAVA_HOME : ${gradleJavaHome}" - } - println " Random Testing Seed : ${project.testSeed}" - println '=======================================' - - // enforce Java version - if (compilerJavaVersionEnum < minimumCompilerVersion) { - final String message = - "the compiler java.home must be set to a JDK installation directory for Java ${minimumCompilerVersion}" + - " but is [${compilerJavaHome}] corresponding to [${compilerJavaVersionEnum}]" - throw new GradleException(message) - } - - if (runtimeJavaVersionEnum < minimumRuntimeVersion) { - final String message = - "the runtime java.home must be set to a JDK installation directory for Java ${minimumRuntimeVersion}" + - " but is [${runtimeJavaHome}] corresponding to [${runtimeJavaVersionEnum}]" - throw new GradleException(message) - } - - final Map javaVersions = [:] - for (int version = 8; version <= Integer.parseInt(minimumCompilerVersion.majorVersion); version++) { - if(System.getenv(getJavaHomeEnvVarName(version.toString())) != null) { - javaVersions.put(version, findJavaHome(version.toString())); - } - } - - final int numberOfPhysicalCores = numberOfPhysicalCores(project.rootProject) - if (javaVersions.isEmpty() == false) { - - ExecutorService exec = Executors.newFixedThreadPool(numberOfPhysicalCores) - Set> results = new HashSet<>() - - javaVersions.entrySet().stream() - .filter { it.getValue() != null } - .forEach { javaVersionEntry -> - results.add(exec.submit { - final String javaHome = javaVersionEntry.getValue() - final int version = javaVersionEntry.getKey() - if (project.file(javaHome).exists() == false) { - throw new GradleException("Invalid JAVA${version}_HOME=${javaHome} location does not exist") + // Need to do it here to support external plugins + if (project == project.rootProject) { + GlobalInfoExtension globalInfo = project.extensions.getByType(GlobalInfoExtension) + + // wait until global info is populated because we don't know if we are running in a fips jvm until execution time + globalInfo.ready { + project.subprojects { Project subproject -> + ExtraPropertiesExtension ext = subproject.extensions.getByType(ExtraPropertiesExtension) + // Common config when running with a FIPS-140 runtime JVM + if (ext.has('inFipsJvm') && ext.get('inFipsJvm')) { + subproject.tasks.withType(Test) { Test task -> + task.systemProperty 'javax.net.ssl.trustStorePassword', 'password' + task.systemProperty 'javax.net.ssl.keyStorePassword', 'password' } - - JavaVersion javaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, javaHome)) - final JavaVersion expectedJavaVersionEnum = version < 9 ? - JavaVersion.toVersion("1." + version) : - JavaVersion.toVersion(Integer.toString(version)) - - if (javaVersionEnum != expectedJavaVersionEnum) { - final String message = - "the environment variable JAVA" + version + "_HOME must be set to a JDK installation directory for Java" + - " ${expectedJavaVersionEnum} but is [${javaHome}] corresponding to [${javaVersionEnum}]" - throw new GradleException(message) + project.pluginManager.withPlugin("elasticsearch.testclusters") { + NamedDomainObjectContainer testClusters = subproject.extensions.getByName('testClusters') as NamedDomainObjectContainer + testClusters.all { ElasticsearchCluster cluster -> + cluster.systemProperty 'javax.net.ssl.trustStorePassword', 'password' + cluster.systemProperty 'javax.net.ssl.keyStorePassword', 'password' + } } - }) - } - - project.gradle.taskGraph.whenReady { - try { - results.forEach { it.get() } - } finally { - exec.shutdown(); } } } - - project.rootProject.ext.compilerJavaHome = compilerJavaHome - project.rootProject.ext.runtimeJavaHome = runtimeJavaHome - project.rootProject.ext.compilerJavaVersion = compilerJavaVersionEnum - project.rootProject.ext.runtimeJavaVersion = runtimeJavaVersionEnum - project.rootProject.ext.isRuntimeJavaHomeSet = compilerJavaHome.equals(runtimeJavaHome) == false - project.rootProject.ext.javaVersions = javaVersions - project.rootProject.ext.buildChecksDone = true - project.rootProject.ext.minimumCompilerVersion = minimumCompilerVersion - project.rootProject.ext.minimumRuntimeVersion = minimumRuntimeVersion - project.rootProject.ext.inFipsJvm = inFipsJvm - project.rootProject.ext.gradleJavaVersion = JavaVersion.toVersion(gradleJavaVersion) - project.rootProject.ext.java9Home = "${-> findJavaHome("9")}" - project.rootProject.ext.defaultParallel = numberOfPhysicalCores } - - project.targetCompatibility = project.rootProject.ext.minimumRuntimeVersion - project.sourceCompatibility = project.rootProject.ext.minimumRuntimeVersion - - // set java home for each project, so they dont have to find it in the root project - project.ext.compilerJavaHome = project.rootProject.ext.compilerJavaHome - project.ext.runtimeJavaHome = project.rootProject.ext.runtimeJavaHome - project.ext.compilerJavaVersion = project.rootProject.ext.compilerJavaVersion - project.ext.runtimeJavaVersion = project.rootProject.ext.runtimeJavaVersion - project.ext.isRuntimeJavaHomeSet = project.rootProject.ext.isRuntimeJavaHomeSet - project.ext.javaVersions = project.rootProject.ext.javaVersions - project.ext.inFipsJvm = project.rootProject.ext.inFipsJvm - project.ext.gradleJavaVersion = project.rootProject.ext.gradleJavaVersion - project.ext.java9Home = project.rootProject.ext.java9Home } static void requireDocker(final Task task) { final Project rootProject = task.project.rootProject + ExtraPropertiesExtension ext = rootProject.extensions.getByType(ExtraPropertiesExtension) + if (rootProject.hasProperty('requiresDocker') == false) { /* * This is our first time encountering a task that requires Docker. We will add an extension that will let us track the tasks @@ -314,11 +197,11 @@ class BuildPlugin implements Plugin { throw new IllegalArgumentException( "expected build.docker to be unset or one of \"true\" or \"false\" but was [" + buildDockerProperty + "]") } - rootProject.rootProject.ext.buildDocker = buildDocker - rootProject.rootProject.ext.requiresDocker = [] + + ext.set('buildDocker', buildDocker) + ext.set('requiresDocker', []) rootProject.gradle.taskGraph.whenReady { TaskExecutionGraph taskGraph -> - final List tasks = - ((List)rootProject.requiresDocker).findAll { taskGraph.hasTask(it) }.collect { " ${it.path}".toString()} + final List tasks = taskGraph.allTasks.intersect(ext.get('requiresDocker') as List).collect { " ${it.path}".toString()} if (tasks.isEmpty() == false) { /* * There are tasks in the task graph that require Docker. Now we are failing because either the Docker binary does not @@ -371,8 +254,9 @@ class BuildPlugin implements Plugin { } } } - if (rootProject.buildDocker) { - rootProject.requiresDocker.add(task) + + if (ext.get('buildDocker')) { + (ext.get('requiresDocker') as List).add(task) } else { task.enabled = false } @@ -400,130 +284,48 @@ class BuildPlugin implements Plugin { + "or by passing -Dbuild.docker=false") } - private static String findCompilerJavaHome() { - String compilerJavaHome = System.getenv('JAVA_HOME') - final String compilerJavaProperty = System.getProperty('compiler.java') - if (compilerJavaProperty != null) { - compilerJavaHome = findJavaHome(compilerJavaProperty) - } - if (compilerJavaHome == null) { - // if JAVA_HOME does not set,so we use the JDK that Gradle was run with. - return Jvm.current().javaHome - } - return compilerJavaHome - } - - private static String findJavaHome(String version) { - String versionedVarName = getJavaHomeEnvVarName(version) - String versionedJavaHome = System.getenv(versionedVarName); - if (versionedJavaHome == null) { - throw new GradleException( - "$versionedVarName must be set to build Elasticsearch. " + - "Note that if the variable was just set you might have to run `./gradlew --stop` for " + - "it to be picked up. See https://github.com/elastic/elasticsearch/issues/31399 details." - ) - } - return versionedJavaHome - } - - private static String getJavaHomeEnvVarName(String version) { - return 'JAVA' + version + '_HOME' - } - /** Add a check before gradle execution phase which ensures java home for the given java version is set. */ static void requireJavaHome(Task task, int version) { - Project rootProject = task.project.rootProject // use root project for global accounting + // use root project for global accounting + Project rootProject = task.project.rootProject + ExtraPropertiesExtension ext = rootProject.extensions.getByType(ExtraPropertiesExtension) + if (rootProject.hasProperty('requiredJavaVersions') == false) { - rootProject.rootProject.ext.requiredJavaVersions = [:] - rootProject.gradle.taskGraph.whenReady { TaskExecutionGraph taskGraph -> + ext.set('requiredJavaVersions', [:]) + rootProject.gradle.taskGraph.whenReady({ TaskExecutionGraph taskGraph -> List messages = [] - for (entry in rootProject.requiredJavaVersions) { - if (rootProject.javaVersions.get(entry.key) != null) { + Map> requiredJavaVersions = (Map>) ext.get('requiredJavaVersions') + for (Map.Entry> entry : requiredJavaVersions) { + List javaVersions = ext.get('javaVersions') as List + if (javaVersions.find { it.version == entry.key } != null) { continue } - List tasks = entry.value.findAll { taskGraph.hasTask(it) }.collect { " ${it.path}" } + List tasks = entry.value.findAll { taskGraph.hasTask(it) }.collect { " ${it.path}".toString() } if (tasks.isEmpty() == false) { - messages.add("JAVA${entry.key}_HOME required to run tasks:\n${tasks.join('\n')}") + messages.add("JAVA${entry.key}_HOME required to run tasks:\n${tasks.join('\n')}".toString()) } } if (messages.isEmpty() == false) { throw new GradleException(messages.join('\n')) } - rootProject.rootProject.ext.requiredJavaVersions = null // reset to null to indicate the pre-execution checks have executed - } - } else if (rootProject.rootProject.requiredJavaVersions == null) { + ext.set('requiredJavaVersions', null) // reset to null to indicate the pre-execution checks have executed + }) + } else if (ext.has('requiredJavaVersions') == false || ext.get('requiredJavaVersions') == null) { // check directly if the version is present since we are already executing - if (rootProject.javaVersions.get(version) == null) { + List javaVersions = ext.get('javaVersions') as List + if (javaVersions.find { it.version == version } == null) { throw new GradleException("JAVA${version}_HOME required to run task:\n${task}") } } else { - rootProject.requiredJavaVersions.getOrDefault(version, []).add(task) + (ext.get('requiredJavaVersions') as Map>).getOrDefault(version, []).add(task) } } /** A convenience method for getting java home for a version of java and requiring that version for the given task to execute */ static String getJavaHome(final Task task, final int version) { requireJavaHome(task, version) - return task.project.javaVersions.get(version) - } - - private static String findRuntimeJavaHome(final String compilerJavaHome) { - String runtimeJavaProperty = System.getProperty("runtime.java") - if (runtimeJavaProperty != null) { - return findJavaHome(runtimeJavaProperty) - } - return System.getenv('RUNTIME_JAVA_HOME') ?: compilerJavaHome - } - - /** Finds printable java version of the given JAVA_HOME */ - private static String findJavaVersionDetails(Project project, String javaHome) { - String versionInfoScript = 'print(' + - 'java.lang.System.getProperty("java.vendor") + " " + java.lang.System.getProperty("java.version") + ' + - '" [" + java.lang.System.getProperty("java.vm.name") + " " + java.lang.System.getProperty("java.vm.version") + "]");' - return runJavaAsScript(project, javaHome, versionInfoScript).trim() - } - - /** Finds the parsable java specification version */ - private static String findJavaSpecificationVersion(Project project, String javaHome) { - String versionScript = 'print(java.lang.System.getProperty("java.specification.version"));' - return runJavaAsScript(project, javaHome, versionScript) - } - - private static String findJavaVendor(Project project, String javaHome) { - String vendorScript = 'print(java.lang.System.getProperty("java.vendor"));' - return runJavaAsScript(project, javaHome, vendorScript) - } - - /** Finds the parsable java specification version */ - private static String findJavaVersion(Project project, String javaHome) { - String versionScript = 'print(java.lang.System.getProperty("java.version"));' - return runJavaAsScript(project, javaHome, versionScript) - } - - /** Runs the given javascript using jjs from the jdk, and returns the output */ - private static String runJavaAsScript(Project project, String javaHome, String script) { - ByteArrayOutputStream stdout = new ByteArrayOutputStream() - ByteArrayOutputStream stderr = new ByteArrayOutputStream() - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - // gradle/groovy does not properly escape the double quote for windows - script = script.replace('"', '\\"') - } - File jrunscriptPath = new File(javaHome, 'bin/jrunscript') - ExecResult result = project.exec { - executable = jrunscriptPath - args '-e', script - standardOutput = stdout - errorOutput = stderr - ignoreExitValue = true - } - if (result.exitValue != 0) { - project.logger.error("STDOUT:") - stdout.toString('UTF-8').eachLine { line -> project.logger.error(line) } - project.logger.error("STDERR:") - stderr.toString('UTF-8').eachLine { line -> project.logger.error(line) } - result.rethrowFailure() - } - return stdout.toString('UTF-8').trim() + List javaVersions = task.project.property('javaVersions') as List + return javaVersions.find { it.version == version }.javaHome.absolutePath } /** Return the configuration name used for finding transitive deps of the given dependency. */ @@ -549,7 +351,7 @@ class BuildPlugin implements Plugin { */ static void configureConfigurations(Project project) { // we want to test compileOnly deps! - project.configurations.testCompile.extendsFrom(project.configurations.compileOnly) + project.configurations.getByName(JavaPlugin.TEST_COMPILE_CONFIGURATION_NAME).extendsFrom(project.configurations.getByName(JavaPlugin.COMPILE_ONLY_CONFIGURATION_NAME)) // we are not shipping these jars, we act like dumb consumers of these things if (project.path.startsWith(':test:fixtures') || project.path == ':build-tools') { @@ -587,9 +389,9 @@ class BuildPlugin implements Plugin { } } - project.configurations.compile.dependencies.all(disableTransitiveDeps) - project.configurations.testCompile.dependencies.all(disableTransitiveDeps) - project.configurations.compileOnly.dependencies.all(disableTransitiveDeps) + project.configurations.getByName(JavaPlugin.COMPILE_CONFIGURATION_NAME).dependencies.all(disableTransitiveDeps) + project.configurations.getByName(JavaPlugin.TEST_COMPILE_CONFIGURATION_NAME).dependencies.all(disableTransitiveDeps) + project.configurations.getByName(JavaPlugin.COMPILE_ONLY_CONFIGURATION_NAME).dependencies.all(disableTransitiveDeps) project.plugins.withType(ShadowPlugin).whenPluginAdded { Configuration bundle = project.configurations.create('bundle') @@ -603,46 +405,45 @@ class BuildPlugin implements Plugin { if (repository instanceof MavenArtifactRepository) { final MavenArtifactRepository maven = (MavenArtifactRepository) repository assertRepositoryURIUsesHttps(maven, project, maven.getUrl()) - repository.getArtifactUrls().each { uri -> assertRepositoryURIUsesHttps(project, uri) } + repository.getArtifactUrls().each { uri -> assertRepositoryURIUsesHttps(maven, project, uri) } } else if (repository instanceof IvyArtifactRepository) { final IvyArtifactRepository ivy = (IvyArtifactRepository) repository assertRepositoryURIUsesHttps(ivy, project, ivy.getUrl()) } } RepositoryHandler repos = project.repositories - if (System.getProperty("repos.mavenLocal") != null) { + if (System.getProperty('repos.mavenLocal') != null) { // with -Drepos.mavenLocal=true we can force checking the local .m2 repo which is // useful for development ie. bwc tests where we install stuff in the local repository // such that we don't have to pass hardcoded files to gradle repos.mavenLocal() } repos.jcenter() - repos.ivy { - name "elasticsearch" - url "https://artifacts.elastic.co/downloads" - patternLayout { - artifact "elasticsearch/[module]-[revision](-[classifier]).[ext]" + repos.ivy { IvyArtifactRepository repo -> + repo.name = 'elasticsearch' + repo.url = 'https://artifacts.elastic.co/downloads' + repo.patternLayout { IvyPatternRepositoryLayout layout -> + layout.artifact 'elasticsearch/[module]-[revision](-[classifier]).[ext]' } // this header is not a credential but we hack the capability to send this header to avoid polluting our download stats - credentials(HttpHeaderCredentials) { - name = "X-Elastic-No-KPI" - value = "1" - } - authentication { - header(HttpHeaderAuthentication) - } + repo.credentials(HttpHeaderCredentials, { HttpHeaderCredentials creds -> + creds.name = 'X-Elastic-No-KPI' + creds.value = '1' + } as Action) + repo.authentication.create('header', HttpHeaderAuthentication) } - repos.maven { - name "elastic" - url "https://artifacts.elastic.co/maven" + repos.maven { MavenArtifactRepository repo -> + repo.name = 'elastic' + repo.url = 'https://artifacts.elastic.co/maven' } String luceneVersion = VersionProperties.lucene if (luceneVersion.contains('-snapshot')) { // extract the revision number from the version with a regex matcher - String revision = (luceneVersion =~ /\w+-snapshot-([a-z0-9]+)/)[0][1] - repos.maven { - name 'lucene-snapshots' - url "https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/${revision}" + List matches = (luceneVersion =~ /\w+-snapshot-([a-z0-9]+)/).getAt(0) as List + String revision = matches.get(1) + repos.maven { MavenArtifactRepository repo -> + repo.name = 'lucene-snapshots' + repo.url = "https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/${revision}" } } } @@ -664,6 +465,7 @@ class BuildPlugin implements Plugin { *
    • Set compile time deps back to compile from runtime (known issue with maven-publish plugin)
    • * */ + @CompileDynamic private static Closure fixupDependencies(Project project) { return { XmlProvider xml -> // first find if we have dependencies at all, and grab the node @@ -724,21 +526,22 @@ class BuildPlugin implements Plugin { } /**Configuration generation of maven poms. */ - public static void configurePomGeneration(Project project) { + static void configurePomGeneration(Project project) { // Only works with `enableFeaturePreview('STABLE_PUBLISHING')` // https://github.com/gradle/gradle/issues/5696#issuecomment-396965185 project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask -> // The GenerateMavenPom task is aggressive about setting the destination, instead of fighting it, // just make a copy. - generatePOMTask.ext.pomFileName = null - doLast { - project.copy { - from generatePOMTask.destination - into "${project.buildDir}/distributions" - rename { - generatePOMTask.ext.pomFileName == null ? - "${project.archivesBaseName}-${project.version}.pom" : - generatePOMTask.ext.pomFileName + ExtraPropertiesExtension ext = generatePOMTask.extensions.getByType(ExtraPropertiesExtension) + ext.set('pomFileName', null) + generatePOMTask.doLast { + project.copy { CopySpec spec -> + spec.from generatePOMTask.destination + spec.into "${project.buildDir}/distributions" + spec.rename { + ext.has('pomFileName') && ext.get('pomFileName') == null ? + "${project.convention.getPlugin(BasePluginConvention).archivesBaseName}-${project.version}.pom" : + ext.get('pomFileName') } } } @@ -748,22 +551,16 @@ class BuildPlugin implements Plugin { assemble.dependsOn(generatePOMTask) } } - project.plugins.withType(MavenPublishPlugin.class).whenPluginAdded { - project.publishing { - publications { - all { MavenPublication publication -> // we only deal with maven - // add exclusions to the pom directly, for each of the transitive deps of this project's deps - publication.pom.withXml(fixupDependencies(project)) - } - } + project.plugins.withType(MavenPublishPlugin).whenPluginAdded { + PublishingExtension publishing = project.extensions.getByType(PublishingExtension) + publishing.publications.all { MavenPublication publication -> // we only deal with maven + // add exclusions to the pom directly, for each of the transitive deps of this project's deps + publication.pom.withXml(fixupDependencies(project)) } project.plugins.withType(ShadowPlugin).whenPluginAdded { - project.publishing { - publications { - nebula(MavenPublication) { - artifacts = [ project.tasks.shadowJar ] - } - } + MavenPublication publication = publishing.publications.maybeCreate('nebula', MavenPublication) + publication.with { + artifacts = [ project.tasks.getByName('shadowJar') ] } } } @@ -775,9 +572,9 @@ class BuildPlugin implements Plugin { static void configureSourceSets(Project project) { project.plugins.withType(ShadowPlugin).whenPluginAdded { ['main', 'test'].each {name -> - SourceSet sourceSet = project.sourceSets.findByName(name) + SourceSet sourceSet = project.extensions.getByType(SourceSetContainer).findByName(name) if (sourceSet != null) { - sourceSet.compileClasspath += project.configurations.bundle + sourceSet.compileClasspath += project.configurations.getByName('bundle') } } } @@ -785,17 +582,23 @@ class BuildPlugin implements Plugin { /** Adds compiler settings to the project */ static void configureCompile(Project project) { - project.ext.compactProfile = 'full' + ExtraPropertiesExtension ext = project.extensions.getByType(ExtraPropertiesExtension) + ext.set('compactProfile', 'full') + + project.extensions.getByType(JavaPluginExtension).sourceCompatibility = ext.get('minimumRuntimeVersion') as JavaVersion + project.extensions.getByType(JavaPluginExtension).targetCompatibility = ext.get('minimumRuntimeVersion') as JavaVersion + project.afterEvaluate { - project.tasks.withType(JavaCompile) { - final JavaVersion targetCompatibilityVersion = JavaVersion.toVersion(it.targetCompatibility) - final compilerJavaHomeFile = new File(project.compilerJavaHome) + File compilerJavaHome = ext.get('compilerJavaHome') as File + + project.tasks.withType(JavaCompile) { JavaCompile compileTask -> + final JavaVersion targetCompatibilityVersion = JavaVersion.toVersion(compileTask.targetCompatibility) // we only fork if the Gradle JDK is not the same as the compiler JDK - if (compilerJavaHomeFile.canonicalPath == Jvm.current().javaHome.canonicalPath) { - options.fork = false + if (compilerJavaHome.canonicalPath == Jvm.current().javaHome.canonicalPath) { + compileTask.options.fork = false } else { - options.fork = true - options.forkOptions.javaHome = compilerJavaHomeFile + compileTask.options.fork = true + compileTask.options.forkOptions.javaHome = compilerJavaHome } /* * -path because gradle will send in paths that don't always exist. @@ -804,29 +607,28 @@ class BuildPlugin implements Plugin { */ // don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :) // fail on all javac warnings - options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial,-options,-deprecation,-try' << '-Xdoclint:all' << '-Xdoclint:-missing' + compileTask.options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial,-options,-deprecation,-try' << '-Xdoclint:all' << '-Xdoclint:-missing' // either disable annotation processor completely (default) or allow to enable them if an annotation processor is explicitly defined - if (options.compilerArgs.contains("-processor") == false) { - options.compilerArgs << '-proc:none' + if (compileTask.options.compilerArgs.contains("-processor") == false) { + compileTask.options.compilerArgs << '-proc:none' } - options.encoding = 'UTF-8' - options.incremental = true + compileTask.options.encoding = 'UTF-8' + compileTask.options.incremental = true // TODO: use native Gradle support for --release when available (cf. https://github.com/gradle/gradle/issues/2510) - options.compilerArgs << '--release' << targetCompatibilityVersion.majorVersion + compileTask.options.compilerArgs << '--release' << targetCompatibilityVersion.majorVersion } // also apply release flag to groovy, which is used in build-tools - project.tasks.withType(GroovyCompile) { - final compilerJavaHomeFile = new File(project.compilerJavaHome) + project.tasks.withType(GroovyCompile) { GroovyCompile compileTask -> // we only fork if the Gradle JDK is not the same as the compiler JDK - if (compilerJavaHomeFile.canonicalPath == Jvm.current().javaHome.canonicalPath) { - options.fork = false + if (compilerJavaHome.canonicalPath == Jvm.current().javaHome.canonicalPath) { + compileTask.options.fork = false } else { - options.fork = true - options.forkOptions.javaHome = compilerJavaHomeFile - options.compilerArgs << '--release' << JavaVersion.toVersion(it.targetCompatibility).majorVersion + compileTask.options.fork = true + compileTask.options.forkOptions.javaHome = compilerJavaHome + compileTask.options.compilerArgs << '--release' << JavaVersion.toVersion(compileTask.targetCompatibility).majorVersion } } } @@ -835,11 +637,12 @@ class BuildPlugin implements Plugin { static void configureJavadoc(Project project) { // remove compiled classes from the Javadoc classpath: http://mail.openjdk.java.net/pipermail/javadoc-dev/2018-January/000400.html final List classes = new ArrayList<>() - project.tasks.withType(JavaCompile) { javaCompile -> + project.tasks.withType(JavaCompile) { JavaCompile javaCompile -> classes.add(javaCompile.destinationDir) } - project.tasks.withType(Javadoc) { javadoc -> - javadoc.executable = new File(project.compilerJavaHome, 'bin/javadoc') + project.tasks.withType(Javadoc) { Javadoc javadoc -> + File compilerJavaHome = project.extensions.getByType(ExtraPropertiesExtension).get('compilerJavaHome') as File + javadoc.executable = new File(compilerJavaHome, 'bin/javadoc') javadoc.classpath = javadoc.getClasspath().filter { f -> return classes.contains(f) == false } @@ -847,34 +650,35 @@ class BuildPlugin implements Plugin { * Generate docs using html5 to suppress a warning from `javadoc` * that the default will change to html5 in the future. */ - javadoc.options.addBooleanOption('html5', true) + (javadoc.options as CoreJavadocOptions).addBooleanOption('html5', true) } configureJavadocJar(project) } /** Adds a javadocJar task to generate a jar containing javadocs. */ static void configureJavadocJar(Project project) { - Jar javadocJarTask = project.task('javadocJar', type: Jar) + Jar javadocJarTask = project.tasks.create('javadocJar', Jar) javadocJarTask.classifier = 'javadoc' javadocJarTask.group = 'build' javadocJarTask.description = 'Assembles a jar containing javadocs.' javadocJarTask.from(project.tasks.getByName(JavaPlugin.JAVADOC_TASK_NAME)) - project.assemble.dependsOn(javadocJarTask) + project.tasks.getByName(BasePlugin.ASSEMBLE_TASK_NAME).dependsOn(javadocJarTask) } static void configureSourcesJar(Project project) { - Jar sourcesJarTask = project.task('sourcesJar', type: Jar) + Jar sourcesJarTask = project.tasks.create('sourcesJar', Jar) sourcesJarTask.classifier = 'sources' sourcesJarTask.group = 'build' sourcesJarTask.description = 'Assembles a jar containing source files.' - sourcesJarTask.from(project.sourceSets.main.allSource) - project.assemble.dependsOn(sourcesJarTask) + sourcesJarTask.from(project.extensions.getByType(SourceSetContainer).getByName(SourceSet.MAIN_SOURCE_SET_NAME).allSource) + project.tasks.getByName(BasePlugin.ASSEMBLE_TASK_NAME).dependsOn(sourcesJarTask) } /** Adds additional manifest info to jars */ static void configureJars(Project project) { - project.ext.licenseFile = null - project.ext.noticeFile = null + ExtraPropertiesExtension ext = project.extensions.getByType(ExtraPropertiesExtension) + ext.set('licenseFile', null) + ext.set('noticeFile', null) project.tasks.withType(Jar) { Jar jarTask -> // we put all our distributable files under distributions jarTask.destinationDir = new File(project.buildDir, 'distributions') @@ -882,14 +686,15 @@ class BuildPlugin implements Plugin { jarTask.doFirst { // this doFirst is added before the info plugin, therefore it will run // after the doFirst added by the info plugin, and we can override attributes + JavaVersion compilerJavaVersion = ext.get('compilerJavaVersion') as JavaVersion jarTask.manifest.attributes( 'X-Compile-Elasticsearch-Version': VersionProperties.elasticsearch, 'X-Compile-Lucene-Version': VersionProperties.lucene, 'X-Compile-Elasticsearch-Snapshot': VersionProperties.isElasticsearchSnapshot(), 'Build-Date': ZonedDateTime.now(ZoneOffset.UTC), - 'Build-Java-Version': project.compilerJavaVersion) + 'Build-Java-Version': compilerJavaVersion) if (jarTask.manifest.attributes.containsKey('Change') == false) { - logger.warn('Building without git revision id.') + jarTask.logger.warn('Building without git revision id.') jarTask.manifest.attributes('Change': 'Unknown') } else { /* @@ -908,19 +713,24 @@ class BuildPlugin implements Plugin { jarTask.manifest.getAttributes().clear() } } + // add license/notice files project.afterEvaluate { - if (project.licenseFile == null || project.noticeFile == null) { + if (ext.has('licenseFile') == false || ext.get('licenseFile') == null || ext.has('noticeFile') == false || ext.get('noticeFile') == null) { throw new GradleException("Must specify license and notice file for project ${project.path}") } - jarTask.metaInf { - from(project.licenseFile.parent) { - include project.licenseFile.name - rename { 'LICENSE.txt' } + + File licenseFile = ext.get('licenseFile') as File + File noticeFile = ext.get('noticeFile') as File + + jarTask.metaInf { CopySpec spec -> + spec.from(licenseFile.parent) { CopySpec from -> + from.include licenseFile.name + from.rename { 'LICENSE.txt' } } - from(project.noticeFile.parent) { - include project.noticeFile.name - rename { 'NOTICE.txt' } + spec.from(noticeFile.parent) { CopySpec from -> + from.include noticeFile.name + from.rename { 'NOTICE.txt' } } } } @@ -931,35 +741,35 @@ class BuildPlugin implements Plugin { * normal jar with the shadow jar so we no longer want to run * the jar task. */ - project.tasks.jar.enabled = false - project.tasks.shadowJar { + project.tasks.getByName(JavaPlugin.JAR_TASK_NAME).enabled = false + project.tasks.getByName('shadowJar').configure { ShadowJar shadowJar -> /* * Replace the default "shadow" classifier with null * which will leave the classifier off of the file name. */ - classifier = null + shadowJar.classifier = null /* * Not all cases need service files merged but it is * better to be safe */ - mergeServiceFiles() + shadowJar.mergeServiceFiles() /* * Bundle dependencies of the "bundled" configuration. */ - configurations = [project.configurations.bundle] + shadowJar.configurations = [project.configurations.getByName('bundle')] } // Make sure we assemble the shadow jar - project.tasks.assemble.dependsOn project.tasks.shadowJar - project.artifacts { - apiElements project.tasks.shadowJar - } + project.tasks.getByName(BasePlugin.ASSEMBLE_TASK_NAME).dependsOn project.tasks.getByName('shadowJar') + project.artifacts.add('apiElements', project.tasks.getByName('shadowJar')) } } static void configureTestTasks(Project project) { + ExtraPropertiesExtension ext = project.extensions.getByType(ExtraPropertiesExtension) + // Default test task should run only unit tests - project.tasks.withType(Test).matching { it.name == 'test' }.all { - include '**/*Tests.class' + project.tasks.withType(Test).matching { Test task -> task.name == 'test' }.all { Test task -> + task.include '**/*Tests.class' } // none of this stuff is applicable to the `:buildSrc` project tests @@ -969,150 +779,127 @@ class BuildPlugin implements Plugin { project.tasks.withType(Test) { Test test -> File testOutputDir = new File(test.reports.junitXml.getDestination(), "output") - doFirst { + ErrorReportingTestListener listener = new ErrorReportingTestListener(test.testLogging, testOutputDir) + test.extensions.add(ErrorReportingTestListener, 'errorReportingTestListener', listener) + test.addTestOutputListener(listener) + test.addTestListener(listener) + + /* + * We use lazy-evaluated strings in order to configure system properties whose value will not be known until + * execution time (e.g. cluster port numbers). Adding these via the normal DSL doesn't work as these get treated + * as task inputs and therefore Gradle attempts to snapshot them before/after task execution. This fails due + * to the GStrings containing references to non-serializable objects. + * + * We bypass this by instead passing this system properties vi a CommandLineArgumentProvider. This has the added + * side-effect that these properties are NOT treated as inputs, therefore they don't influence things like the + * build cache key or up to date checking. + */ + SystemPropertyCommandLineArgumentProvider nonInputProperties = new SystemPropertyCommandLineArgumentProvider() + + test.doFirst { project.mkdir(testOutputDir) project.mkdir(heapdumpDir) project.mkdir(test.workingDir) + + if (project.property('inFipsJvm')) { + nonInputProperties.systemProperty('runtime.java', "${-> (ext.get('runtimeJavaVersion') as JavaVersion).getMajorVersion()}FIPS") + } else { + nonInputProperties.systemProperty('runtime.java', "${-> (ext.get('runtimeJavaVersion') as JavaVersion).getMajorVersion()}") + } } - def listener = new ErrorReportingTestListener(test.testLogging, testOutputDir) - test.extensions.add(ErrorReportingTestListener, 'errorReportingTestListener', listener) - addTestOutputListener(listener) - addTestListener(listener) + test.jvmArgumentProviders.add(nonInputProperties) + test.extensions.getByType(ExtraPropertiesExtension).set('nonInputProperties', nonInputProperties) - executable = "${project.runtimeJavaHome}/bin/java" - workingDir = project.file("${project.buildDir}/testrun/${test.name}") - maxParallelForks = project.rootProject.ext.defaultParallel + test.executable = "${ext.get('runtimeJavaHome')}/bin/java" + test.workingDir = project.file("${project.buildDir}/testrun/${test.name}") + test.maxParallelForks = project.rootProject.extensions.getByType(ExtraPropertiesExtension).get('defaultParallel') as Integer - exclude '**/*$*.class' + test.exclude '**/*$*.class' - jvmArgs "-Xmx${System.getProperty('tests.heap.size', '512m')}", + test.jvmArgs "-Xmx${System.getProperty('tests.heap.size', '512m')}", "-Xms${System.getProperty('tests.heap.size', '512m')}", '-XX:+HeapDumpOnOutOfMemoryError', - "-XX:HeapDumpPath=$heapdumpDir" + "-XX:HeapDumpPath=$heapdumpDir", + '--illegal-access=warn' - if (project.runtimeJavaVersion >= JavaVersion.VERSION_1_9) { - jvmArgs '--illegal-access=warn' - } if (System.getProperty('tests.jvm.argline')) { - jvmArgs System.getProperty('tests.jvm.argline').split(" ") + test.jvmArgs System.getProperty('tests.jvm.argline').split(" ") } if (Boolean.parseBoolean(System.getProperty('tests.asserts', 'true'))) { - jvmArgs '-ea', '-esa' + test.jvmArgs '-ea', '-esa' } // we use './temp' since this is per JVM and tests are forbidden from writing to CWD - systemProperties 'gradle.dist.lib': new File(project.class.location.toURI()).parent, + test.systemProperties 'gradle.dist.lib': new File(project.class.location.toURI()).parent, 'gradle.worker.jar': "${project.gradle.getGradleUserHomeDir()}/caches/${project.gradle.gradleVersion}/workerMain/gradle-worker.jar", 'gradle.user.home': project.gradle.getGradleUserHomeDir(), 'java.io.tmpdir': './temp', 'java.awt.headless': 'true', 'tests.gradle': 'true', 'tests.artifact': project.name, - 'tests.task': path, + 'tests.task': test.path, 'tests.security.manager': 'true', - 'tests.seed': project.testSeed, - 'jna.nosys': 'true', - 'compiler.java': project.ext.compilerJavaVersion.getMajorVersion() + 'tests.seed': project.property('testSeed'), + 'jna.nosys': 'true' + + nonInputProperties.systemProperty('compiler.java', "${-> (ext.get('compilerJavaVersion') as JavaVersion).getMajorVersion()}") - if (project.ext.inFipsJvm) { - systemProperty 'runtime.java', project.ext.runtimeJavaVersion.getMajorVersion() + "FIPS" - } else { - systemProperty 'runtime.java', project.ext.runtimeJavaVersion.getMajorVersion() - } // TODO: remove setting logging level via system property - systemProperty 'tests.logger.level', 'WARN' + test.systemProperty 'tests.logger.level', 'WARN' System.getProperties().each { key, value -> - if ((key.startsWith('tests.') || key.startsWith('es.'))) { - systemProperty key, value + if ((key.toString().startsWith('tests.') || key.toString().startsWith('es.'))) { + test.systemProperty key.toString(), value } } // TODO: remove this once ctx isn't added to update script params in 7.0 - systemProperty 'es.scripting.update.ctx_in_params', 'false' + test.systemProperty 'es.scripting.update.ctx_in_params', 'false' - testLogging { - showExceptions = true - showCauses = true - exceptionFormat = 'full' + test.testLogging { TestLoggingContainer logging -> + logging.showExceptions = true + logging.showCauses = true + logging.exceptionFormat = 'full' } project.plugins.withType(ShadowPlugin).whenPluginAdded { // Test against a shadow jar if we made one - classpath -= project.tasks.compileJava.outputs.files - classpath += project.tasks.shadowJar.outputs.files - - dependsOn project.tasks.shadowJar - } - } - } - } + test.classpath -= project.tasks.getByName('compileJava').outputs.files + test.classpath += project.tasks.getByName('shadowJar').outputs.files - private static int numberOfPhysicalCores(Project project) { - if (project.file("/proc/cpuinfo").exists()) { - // Count physical cores on any Linux distro ( don't count hyper-threading ) - Map socketToCore = [:] - String currentID = "" - project.file("/proc/cpuinfo").readLines().forEach({ line -> - if (line.contains(":")) { - List parts = line.split(":", 2).collect({it.trim()}) - String name = parts[0], value = parts[1] - // the ID of the CPU socket - if (name == "physical id") { - currentID = value - } - // number of cores not including hyper-threading - if (name == "cpu cores") { - assert currentID.isEmpty() == false - socketToCore[currentID] = Integer.valueOf(value) - currentID = "" - } + test.dependsOn project.tasks.getByName('shadowJar') } - }) - return socketToCore.values().sum() - } else if ('Mac OS X'.equals(System.getProperty('os.name'))) { - // Ask macOS to count physical CPUs for us - ByteArrayOutputStream stdout = new ByteArrayOutputStream() - project.exec { - executable 'sysctl' - args '-n', 'hw.physicalcpu' - standardOutput = stdout } - return Integer.parseInt(stdout.toString('UTF-8').trim()) - } else { - // guess that it is half the number of processors (which is wrong on systems that do not have simultaneous multi-threading) - // TODO: implement this on Windows - return Runtime.getRuntime().availableProcessors() / 2 } } private static configurePrecommit(Project project) { Task precommit = PrecommitTasks.create(project, true) - project.check.dependsOn(precommit) - project.test.mustRunAfter(precommit) + project.tasks.getByName(LifecycleBasePlugin.CHECK_TASK_NAME).dependsOn(precommit) + project.tasks.getByName(JavaPlugin.TEST_TASK_NAME).mustRunAfter(precommit) // only require dependency licenses for non-elasticsearch deps - project.dependencyLicenses.dependencies = project.configurations.runtime.fileCollection { - it.group.startsWith('org.elasticsearch') == false - } - project.configurations.compileOnly + (project.tasks.getByName('dependencyLicenses') as DependencyLicensesTask).dependencies = project.configurations.getByName(JavaPlugin.RUNTIME_CONFIGURATION_NAME).fileCollection { Dependency dependency -> + dependency.group.startsWith('org.elasticsearch') == false + } - project.configurations.getByName(JavaPlugin.COMPILE_ONLY_CONFIGURATION_NAME) project.plugins.withType(ShadowPlugin).whenPluginAdded { - project.dependencyLicenses.dependencies += project.configurations.bundle.fileCollection { - it.group.startsWith('org.elasticsearch') == false + (project.tasks.getByName('dependencyLicenses') as DependencyLicensesTask).dependencies += project.configurations.getByName('bundle').fileCollection { Dependency dependency -> + dependency.group.startsWith('org.elasticsearch') == false } } } private static configureDependenciesInfo(Project project) { - Task deps = project.tasks.create("dependenciesInfo", DependenciesInfoTask.class) - deps.runtimeConfiguration = project.configurations.runtime + DependenciesInfoTask deps = project.tasks.create("dependenciesInfo", DependenciesInfoTask) + deps.runtimeConfiguration = project.configurations.getByName(JavaPlugin.RUNTIME_CONFIGURATION_NAME) project.plugins.withType(ShadowPlugin).whenPluginAdded { deps.runtimeConfiguration = project.configurations.create('infoDeps') - deps.runtimeConfiguration.extendsFrom(project.configurations.runtime, project.configurations.bundle) + deps.runtimeConfiguration.extendsFrom(project.configurations.getByName(JavaPlugin.RUNTIME_CONFIGURATION_NAME), project.configurations.getByName('bundle')) } - deps.compileOnlyConfiguration = project.configurations.compileOnly + deps.compileOnlyConfiguration = project.configurations.getByName(JavaPlugin.COMPILE_ONLY_CONFIGURATION_NAME) project.afterEvaluate { - deps.mappings = project.dependencyLicenses.mappings + deps.mappings = (project.tasks.getByName('dependencyLicenses') as DependencyLicensesTask).mappings } } @@ -1124,11 +911,12 @@ class BuildPlugin implements Plugin { * the reproduction line from one run be useful on another run. */ static String setupSeed(Project project) { - if (project.rootProject.ext.has('testSeed')) { + ExtraPropertiesExtension ext = project.rootProject.extensions.getByType(ExtraPropertiesExtension) + if (ext.has('testSeed')) { /* Skip this if we've already pinned the testSeed. It is important * that this checks the rootProject so that we know we've only ever * initialized one time. */ - return project.rootProject.ext.testSeed + return ext.get('testSeed') } String testSeed = System.getProperty('tests.seed') @@ -1137,7 +925,7 @@ class BuildPlugin implements Plugin { testSeed = Long.toUnsignedString(seed, 16).toUpperCase(Locale.ROOT) } - project.rootProject.ext.testSeed = testSeed + ext.set('testSeed', testSeed) return testSeed } @@ -1169,4 +957,19 @@ class BuildPlugin implements Plugin { }) } } + + private static class SystemPropertyCommandLineArgumentProvider implements CommandLineArgumentProvider { + private final Map systemProperties = [:] + + void systemProperty(String key, Object value) { + systemProperties.put(key, value) + } + + @Override + Iterable asArguments() { + return systemProperties.collect { key, value -> + "-D${key}=${value.toString()}".toString() + } + } + } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index d5bdd2117023c..e04d0966c412d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -28,6 +28,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask import org.elasticsearch.gradle.test.RunTask import org.elasticsearch.gradle.testclusters.TestClustersPlugin import org.gradle.api.InvalidUserDataException +import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.Task import org.gradle.api.publish.maven.MavenPublication @@ -43,13 +44,13 @@ import java.util.regex.Pattern /** * Encapsulates build configuration for an Elasticsearch plugin. */ -class PluginBuildPlugin extends BuildPlugin { +class PluginBuildPlugin implements Plugin { public static final String PLUGIN_EXTENSION_NAME = 'esplugin' @Override void apply(Project project) { - super.apply(project) + project.pluginManager.apply(BuildPlugin) PluginPropertiesExtension extension = project.extensions.create(PLUGIN_EXTENSION_NAME, PluginPropertiesExtension, project) configureDependencies(project) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index e14a8f97ba81d..e5e4f021507f9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -118,15 +118,13 @@ class PrecommitTasks { } private static Task configureThirdPartyAudit(Project project) { - ThirdPartyAuditTask thirdPartyAuditTask = project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class) ExportElasticsearchBuildResourcesTask buildResources = project.tasks.getByName('buildResources') - thirdPartyAuditTask.configure { - dependsOn(buildResources) - signatureFile = buildResources.copy("forbidden/third-party-audit.txt") - javaHome = project.runtimeJavaHome - targetCompatibility = project.runtimeJavaVersion + return project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class) { task -> + task.dependsOn(buildResources) + task.signatureFile = buildResources.copy("forbidden/third-party-audit.txt") + task.javaHome = project.runtimeJavaHome + task.targetCompatibility.set(project.provider({ project.runtimeJavaVersion })) } - return thirdPartyAuditTask } private static Task configureForbiddenApisCli(Project project) { @@ -134,15 +132,18 @@ class PrecommitTasks { ExportElasticsearchBuildResourcesTask buildResources = project.tasks.getByName('buildResources') project.tasks.withType(CheckForbiddenApis) { dependsOn(buildResources) - targetCompatibility = project.runtimeJavaVersion.getMajorVersion() - if (project.runtimeJavaVersion > JavaVersion.VERSION_11) { - doLast { - project.logger.info( - "Forbidden APIs does not support java version past 11. Will use the signatures from 11 for ", - project.runtimeJavaVersion - ) + doFirst { + // we need to defer this configuration since we don't know the runtime java version until execution time + targetCompatibility = project.runtimeJavaVersion.getMajorVersion() + if (project.runtimeJavaVersion > JavaVersion.VERSION_11) { + doLast { + project.logger.info( + "Forbidden APIs does not support java version past 11. Will use the signatures from 11 for ", + project.runtimeJavaVersion + ) + } + targetCompatibility = JavaVersion.VERSION_11.getMajorVersion() } - targetCompatibility = JavaVersion.VERSION_11.getMajorVersion() } bundledSignatures = [ "jdk-unsafe", "jdk-deprecated", "jdk-non-portable", "jdk-system-out" diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index c0bf2a5dccee5..bc5c7ff0871bb 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -300,12 +300,6 @@ class ClusterFormationTasks { // its run after plugins have been installed, as the extra config files may belong to plugins setup = configureExtraConfigFilesTask(taskName(prefix, node, 'extraConfig'), project, setup, node) - // If the node runs in a FIPS 140-2 JVM, the BCFKS default keystore will be password protected - if (project.inFipsJvm){ - node.config.systemProperties.put('javax.net.ssl.trustStorePassword', 'password') - node.config.systemProperties.put('javax.net.ssl.keyStorePassword', 'password') - } - // extra setup commands for (Map.Entry command : node.config.setupCommands.entrySet()) { // the first argument is the actual script name, relative to home @@ -402,16 +396,17 @@ class ClusterFormationTasks { if (node.nodeVersion.major >= 7) { esConfig['indices.breaker.total.use_real_memory'] = false } - for (Map.Entry setting : node.config.settings) { - if (setting.value == null) { - esConfig.remove(setting.key) - } else { - esConfig.put(setting.key, setting.value) - } - } Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup) writeConfig.doFirst { + for (Map.Entry setting : node.config.settings) { + if (setting.value == null) { + esConfig.remove(setting.key) + } else { + esConfig.put(setting.key, setting.value) + } + } + esConfig = configFilter.call(esConfig) File configFile = new File(node.pathConf, 'elasticsearch.yml') logger.info("Configuring ${configFile}") @@ -732,6 +727,12 @@ class ClusterFormationTasks { } start.doLast(elasticsearchRunner) start.doFirst { + // If the node runs in a FIPS 140-2 JVM, the BCFKS default keystore will be password protected + if (project.inFipsJvm){ + node.config.systemProperties.put('javax.net.ssl.trustStorePassword', 'password') + node.config.systemProperties.put('javax.net.ssl.keyStorePassword', 'password') + } + // Configure ES JAVA OPTS - adds system properties, assertion flags, remote debug etc List esJavaOpts = [node.env.get('ES_JAVA_OPTS', '')] String collectedSystemProperties = node.config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ") diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 52c498aa98d79..40cefdcc25fb9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -86,52 +86,25 @@ class RestIntegTestTask extends DefaultTask { runner.include('**/*IT.class') runner.systemProperty('tests.rest.load_packaged', 'false') - /* - * We use lazy-evaluated strings in order to configure system properties whose value will not be known until - * execution time (e.g. cluster port numbers). Adding these via the normal DSL doesn't work as these get treated - * as task inputs and therefore Gradle attempts to snapshot them before/after task execution. This fails due - * to the GStrings containing references to non-serializable objects. - * - * We bypass this by instead passing this system properties vi a CommandLineArgumentProvider. This has the added - * side-effect that these properties are NOT treated as inputs, therefore they don't influence things like the - * build cache key or up to date checking. - */ - def nonInputProperties = new CommandLineArgumentProvider() { - private final Map systemProperties = [:] - - void systemProperty(String key, Object value) { - systemProperties.put(key, value) - } - - @Override - Iterable asArguments() { - return systemProperties.collect { key, value -> - "-D${key}=${value.toString()}".toString() - } - } - } - runner.jvmArgumentProviders.add(nonInputProperties) - runner.ext.nonInputProperties = nonInputProperties - if (System.getProperty("tests.rest.cluster") == null) { if (System.getProperty("tests.cluster") != null || System.getProperty("tests.clustername") != null) { throw new IllegalArgumentException("tests.rest.cluster, tests.cluster, and tests.clustername must all be null or non-null") } if (usesTestclusters == true) { ElasticsearchCluster cluster = project.testClusters."${name}" - nonInputProperties.systemProperty('tests.rest.cluster', "${-> cluster.allHttpSocketURI.join(",") }") - nonInputProperties.systemProperty('tests.cluster', "${-> cluster.transportPortURI }") - nonInputProperties.systemProperty('tests.clustername', "${-> cluster.getName() }") + runner.nonInputProperties.systemProperty('tests.rest.cluster', "${-> cluster.allHttpSocketURI.join(",") }") + runner.nonInputProperties.systemProperty('tests.cluster', "${-> cluster.transportPortURI }") + runner.nonInputProperties.systemProperty('tests.clustername', "${-> cluster.getName() }") } else { // we pass all nodes to the rest cluster to allow the clients to round-robin between them // this is more realistic than just talking to a single node - nonInputProperties.systemProperty('tests.rest.cluster', "${-> nodes.collect { it.httpUri() }.join(",")}") - nonInputProperties.systemProperty('tests.config.dir', "${-> nodes[0].pathConf}") + runner.nonInputProperties.systemProperty('tests.rest.cluster', "${-> nodes.collect { it.httpUri() }.join(",")}") + runner.nonInputProperties.systemProperty('tests.config.dir', "${-> nodes[0].pathConf}") // TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin // that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass // both as separate sysprops - nonInputProperties.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}") - nonInputProperties.systemProperty('tests.clustername', "${-> nodes[0].clusterName}") + runner.nonInputProperties.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}") + runner.nonInputProperties.systemProperty('tests.clustername', "${-> nodes[0].clusterName}") // dump errors and warnings from cluster log on failure TaskExecutionAdapter logDumpListener = new TaskExecutionAdapter() { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy index 2a858206ebd72..6d895abaa97c7 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy @@ -27,11 +27,14 @@ import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.precommit.PrecommitTasks import org.gradle.api.InvalidUserDataException +import org.gradle.api.JavaVersion import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.artifacts.Configuration +import org.gradle.api.plugins.ExtraPropertiesExtension import org.gradle.api.plugins.JavaBasePlugin import org.gradle.api.plugins.JavaPlugin +import org.gradle.api.plugins.JavaPluginExtension import org.gradle.api.tasks.SourceSet import org.gradle.api.tasks.SourceSetContainer import org.gradle.api.tasks.compile.JavaCompile @@ -57,10 +60,13 @@ class StandaloneRestTestPlugin implements Plugin { project.pluginManager.apply(JavaBasePlugin) project.getTasks().create("buildResources", ExportElasticsearchBuildResourcesTask) - BuildPlugin.globalBuildInfo(project) BuildPlugin.configureRepositories(project) BuildPlugin.configureTestTasks(project) + ExtraPropertiesExtension ext = project.extensions.getByType(ExtraPropertiesExtension) + project.extensions.getByType(JavaPluginExtension).sourceCompatibility = ext.get('minimumRuntimeVersion') as JavaVersion + project.extensions.getByType(JavaPluginExtension).targetCompatibility = ext.get('minimumRuntimeVersion') as JavaVersion + // only setup tests to build SourceSetContainer sourceSets = project.extensions.getByType(SourceSetContainer) SourceSet testSourceSet = sourceSets.create('test') diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/JdkJarHellCheck.java b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkJarHellCheck.java similarity index 100% rename from buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/JdkJarHellCheck.java rename to buildSrc/src/main/java/org/elasticsearch/gradle/JdkJarHellCheck.java diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LazyFileOutputStream.java b/buildSrc/src/main/java/org/elasticsearch/gradle/LazyFileOutputStream.java similarity index 100% rename from buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LazyFileOutputStream.java rename to buildSrc/src/main/java/org/elasticsearch/gradle/LazyFileOutputStream.java diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java b/buildSrc/src/main/java/org/elasticsearch/gradle/LoggedExec.java similarity index 100% rename from buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java rename to buildSrc/src/main/java/org/elasticsearch/gradle/LoggedExec.java diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/Version.java b/buildSrc/src/main/java/org/elasticsearch/gradle/Version.java similarity index 100% rename from buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/Version.java rename to buildSrc/src/main/java/org/elasticsearch/gradle/Version.java diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/VersionProperties.java b/buildSrc/src/main/java/org/elasticsearch/gradle/VersionProperties.java similarity index 100% rename from buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/VersionProperties.java rename to buildSrc/src/main/java/org/elasticsearch/gradle/VersionProperties.java diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/info/GenerateGlobalBuildInfoTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GenerateGlobalBuildInfoTask.java new file mode 100644 index 0000000000000..8537775ee129b --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GenerateGlobalBuildInfoTask.java @@ -0,0 +1,276 @@ +package org.elasticsearch.gradle.info; + +import org.elasticsearch.gradle.OS; +import org.gradle.api.DefaultTask; +import org.gradle.api.GradleException; +import org.gradle.api.JavaVersion; +import org.gradle.api.file.RegularFileProperty; +import org.gradle.api.model.ObjectFactory; +import org.gradle.api.tasks.CacheableTask; +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.InputDirectory; +import org.gradle.api.tasks.Nested; +import org.gradle.api.tasks.OutputFile; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; +import org.gradle.api.tasks.TaskAction; +import org.gradle.internal.jvm.Jvm; +import org.gradle.process.ExecResult; + +import javax.inject.Inject; +import java.io.BufferedWriter; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.io.Writer; +import java.nio.file.Files; +import java.util.Arrays; +import java.util.List; + +import static java.nio.charset.StandardCharsets.UTF_8; + +@CacheableTask +public class GenerateGlobalBuildInfoTask extends DefaultTask { + private JavaVersion minimumCompilerVersion; + private JavaVersion minimumRuntimeVersion; + private File compilerJavaHome; + private File runtimeJavaHome; + private List javaVersions; + private final RegularFileProperty outputFile; + private final RegularFileProperty compilerVersionFile; + private final RegularFileProperty runtimeVersionFile; + private final RegularFileProperty fipsJvmFile; + + @Inject + public GenerateGlobalBuildInfoTask(ObjectFactory objectFactory) { + this.outputFile = objectFactory.fileProperty(); + this.compilerVersionFile = objectFactory.fileProperty(); + this.runtimeVersionFile = objectFactory.fileProperty(); + this.fipsJvmFile = objectFactory.fileProperty(); + } + + @Input + public JavaVersion getMinimumCompilerVersion() { + return minimumCompilerVersion; + } + + public void setMinimumCompilerVersion(JavaVersion minimumCompilerVersion) { + this.minimumCompilerVersion = minimumCompilerVersion; + } + + @Input + public JavaVersion getMinimumRuntimeVersion() { + return minimumRuntimeVersion; + } + + public void setMinimumRuntimeVersion(JavaVersion minimumRuntimeVersion) { + this.minimumRuntimeVersion = minimumRuntimeVersion; + } + + @InputDirectory + @PathSensitive(PathSensitivity.RELATIVE) + public File getCompilerJavaHome() { + return compilerJavaHome; + } + + public void setCompilerJavaHome(File compilerJavaHome) { + this.compilerJavaHome = compilerJavaHome; + } + + @InputDirectory + @PathSensitive(PathSensitivity.RELATIVE) + public File getRuntimeJavaHome() { + return runtimeJavaHome; + } + + public void setRuntimeJavaHome(File runtimeJavaHome) { + this.runtimeJavaHome = runtimeJavaHome; + } + + @Nested + public List getJavaVersions() { + return javaVersions; + } + + public void setJavaVersions(List javaVersions) { + this.javaVersions = javaVersions; + } + + @OutputFile + public RegularFileProperty getOutputFile() { + return outputFile; + } + + @OutputFile + public RegularFileProperty getCompilerVersionFile() { + return compilerVersionFile; + } + + @OutputFile + public RegularFileProperty getRuntimeVersionFile() { + return runtimeVersionFile; + } + + @OutputFile + public RegularFileProperty getFipsJvmFile() { + return fipsJvmFile; + } + + @TaskAction + public void generate() { + String javaVendor = System.getProperty("java.vendor"); + String gradleJavaVersion = System.getProperty("java.version"); + String gradleJavaVersionDetails = javaVendor + " " + gradleJavaVersion + " [" + System.getProperty("java.vm.name") + + " " + System.getProperty("java.vm.version") + "]"; + + String compilerJavaVersionDetails = gradleJavaVersionDetails; + JavaVersion compilerJavaVersionEnum = JavaVersion.current(); + String runtimeJavaVersionDetails = gradleJavaVersionDetails; + JavaVersion runtimeJavaVersionEnum = JavaVersion.current(); + File gradleJavaHome = Jvm.current().getJavaHome(); + boolean inFipsJvm = false; + + try { + if (Files.isSameFile(compilerJavaHome.toPath(), gradleJavaHome.toPath()) == false) { + if (compilerJavaHome.exists()) { + compilerJavaVersionDetails = findJavaVersionDetails(compilerJavaHome); + compilerJavaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(compilerJavaHome)); + } else { + throw new RuntimeException("Compiler Java home path of '" + compilerJavaHome + "' does not exist"); + } + } + + if (Files.isSameFile(runtimeJavaHome.toPath(), gradleJavaHome.toPath()) == false) { + if (runtimeJavaHome.exists()) { + runtimeJavaVersionDetails = findJavaVersionDetails(runtimeJavaHome); + runtimeJavaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(runtimeJavaHome)); + + // We don't expect Gradle to be running in a FIPS JVM + String inFipsJvmScript = "print(java.security.Security.getProviders()[0].name.toLowerCase().contains(\"fips\"));"; + inFipsJvm = Boolean.parseBoolean(runJavaAsScript(runtimeJavaHome, inFipsJvmScript)); + } else { + throw new RuntimeException("Runtime Java home path of '" + compilerJavaHome + "' does not exist"); + } + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + + try (BufferedWriter writer = new BufferedWriter(new FileWriter(outputFile.getAsFile().get()))) { + writer.write(" Gradle Version : " + getProject().getGradle().getGradleVersion() + "\n"); + writer.write(" OS Info : " + System.getProperty("os.name") + " " + System.getProperty("os.version") + + " (" + System.getProperty("os.arch") + ")\n"); + if (gradleJavaVersionDetails.equals(compilerJavaVersionDetails) == false + || gradleJavaVersionDetails.equals(runtimeJavaVersionDetails) == false) { + writer.write(" Compiler JDK Version : " + compilerJavaVersionEnum + " (" + compilerJavaVersionDetails + ")\n"); + writer.write(" Compiler java.home : " + compilerJavaHome + "\n"); + writer.write(" Runtime JDK Version : " + runtimeJavaVersionEnum + " (" + runtimeJavaVersionDetails + ")\n"); + writer.write(" Runtime java.home : " + runtimeJavaHome + "\n"); + writer.write(" Gradle JDK Version : " + JavaVersion.toVersion(gradleJavaVersion) + + " (" + gradleJavaVersionDetails + ")\n"); + writer.write(" Gradle java.home : " + gradleJavaHome); + } else { + writer.write(" JDK Version : " + JavaVersion.toVersion(gradleJavaVersion) + + " (" + gradleJavaVersionDetails + ")\n"); + writer.write(" JAVA_HOME : " + gradleJavaHome); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + + // enforce Java version + if (compilerJavaVersionEnum.compareTo(minimumCompilerVersion) < 0) { + String message = "The compiler java.home must be set to a JDK installation directory for Java " + minimumCompilerVersion + + " but is [" + compilerJavaHome + "] corresponding to [" + compilerJavaVersionEnum + "]"; + throw new GradleException(message); + } + + if (runtimeJavaVersionEnum.compareTo(minimumRuntimeVersion) < 0) { + String message = "The runtime java.home must be set to a JDK installation directory for Java " + minimumRuntimeVersion + + " but is [" + runtimeJavaHome + "] corresponding to [" + runtimeJavaVersionEnum + "]"; + throw new GradleException(message); + } + + for (JavaHome javaVersion : javaVersions) { + File javaHome = javaVersion.getJavaHome(); + if (javaHome == null) { + continue; + } + JavaVersion javaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(javaHome)); + JavaVersion expectedJavaVersionEnum; + int version = javaVersion.getVersion(); + if (version < 9) { + expectedJavaVersionEnum = JavaVersion.toVersion("1." + version); + } else { + expectedJavaVersionEnum = JavaVersion.toVersion(Integer.toString(version)); + } + if (javaVersionEnum != expectedJavaVersionEnum) { + String message = "The environment variable JAVA" + version + "_HOME must be set to a JDK installation directory for Java " + + expectedJavaVersionEnum + " but is [" + javaHome + "] corresponding to [" + javaVersionEnum + "]"; + throw new GradleException(message); + } + } + + writeToFile(compilerVersionFile.getAsFile().get(), compilerJavaVersionEnum.name()); + writeToFile(runtimeVersionFile.getAsFile().get(), runtimeJavaVersionEnum.name()); + writeToFile(fipsJvmFile.getAsFile().get(), Boolean.toString(inFipsJvm)); + } + + private void writeToFile(File file, String content) { + try (Writer writer = new FileWriter(file)) { + writer.write(content); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + /** + * Finds printable java version of the given JAVA_HOME + */ + private String findJavaVersionDetails(File javaHome) { + String versionInfoScript = "print(" + + "java.lang.System.getProperty(\"java.vendor\") + \" \" + java.lang.System.getProperty(\"java.version\") + " + + "\" [\" + java.lang.System.getProperty(\"java.vm.name\") + \" \" + java.lang.System.getProperty(\"java.vm.version\") + \"]\");"; + return runJavaAsScript(javaHome, versionInfoScript).trim(); + } + + /** + * Finds the parsable java specification version + */ + private String findJavaSpecificationVersion(File javaHome) { + String versionScript = "print(java.lang.System.getProperty(\"java.specification.version\"));"; + return runJavaAsScript(javaHome, versionScript); + } + + /** + * Runs the given javascript using jjs from the jdk, and returns the output + */ + private String runJavaAsScript(File javaHome, String script) { + ByteArrayOutputStream stdout = new ByteArrayOutputStream(); + ByteArrayOutputStream stderr = new ByteArrayOutputStream(); + if (OS.current() == OS.WINDOWS) { + // gradle/groovy does not properly escape the double quote for windows + script = script.replace("\"", "\\\""); + } + File jrunscriptPath = new File(javaHome, "bin/jrunscript"); + String finalScript = script; + ExecResult result = getProject().exec(spec -> { + spec.setExecutable(jrunscriptPath); + spec.args("-e", finalScript); + spec.setStandardOutput(stdout); + spec.setErrorOutput(stderr); + spec.setIgnoreExitValue(true); + }); + + if (result.getExitValue() != 0) { + getLogger().error("STDOUT:"); + Arrays.stream(stdout.toString(UTF_8).split(System.getProperty("line.separator"))).forEach(getLogger()::error); + getLogger().error("STDERR:"); + Arrays.stream(stderr.toString(UTF_8).split(System.getProperty("line.separator"))).forEach(getLogger()::error); + result.rethrowFailure(); + } + return stdout.toString(UTF_8).trim(); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java new file mode 100644 index 0000000000000..f0f34e84261c6 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java @@ -0,0 +1,198 @@ +package org.elasticsearch.gradle.info; + +import org.elasticsearch.gradle.OS; +import org.gradle.api.GradleException; +import org.gradle.api.JavaVersion; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.plugins.ExtraPropertiesExtension; +import org.gradle.internal.jvm.Jvm; + +import java.io.BufferedReader; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.UncheckedIOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +public class GlobalBuildInfoPlugin implements Plugin { + private static final String GLOBAL_INFO_EXTENSION_NAME = "globalInfo"; + private static Integer _defaultParallel = null; + + @Override + public void apply(Project project) { + if (project != project.getRootProject()) { + throw new IllegalStateException(this.getClass().getName() + " can only be applied to the root project."); + } + + GlobalInfoExtension extension = project.getExtensions().create(GLOBAL_INFO_EXTENSION_NAME, GlobalInfoExtension.class); + + JavaVersion minimumCompilerVersion = JavaVersion.toVersion(getResourceContents("/minimumCompilerVersion")); + JavaVersion minimumRuntimeVersion = JavaVersion.toVersion(getResourceContents("/minimumRuntimeVersion")); + + File compilerJavaHome = findCompilerJavaHome(); + File runtimeJavaHome = findRuntimeJavaHome(compilerJavaHome); + + final List javaVersions = new ArrayList<>(); + for (int version = 8; version <= Integer.parseInt(minimumCompilerVersion.getMajorVersion()); version++) { + if (System.getenv(getJavaHomeEnvVarName(Integer.toString(version))) != null) { + javaVersions.add(JavaHome.of(version, new File(findJavaHome(Integer.toString(version))))); + } + } + + GenerateGlobalBuildInfoTask generateTask = project.getTasks().create("generateGlobalBuildInfo", + GenerateGlobalBuildInfoTask.class, task -> { + task.setJavaVersions(javaVersions); + task.setMinimumCompilerVersion(minimumCompilerVersion); + task.setMinimumRuntimeVersion(minimumRuntimeVersion); + task.setCompilerJavaHome(compilerJavaHome); + task.setRuntimeJavaHome(runtimeJavaHome); + task.getOutputFile().set(new File(project.getBuildDir(), "global-build-info")); + task.getCompilerVersionFile().set(new File(project.getBuildDir(), "java-compiler-version")); + task.getRuntimeVersionFile().set(new File(project.getBuildDir(), "java-runtime-version")); + task.getFipsJvmFile().set(new File(project.getBuildDir(), "in-fips-jvm")); + }); + + PrintGlobalBuildInfoTask printTask = project.getTasks().create("printGlobalBuildInfo", PrintGlobalBuildInfoTask.class, task -> { + task.getBuildInfoFile().set(generateTask.getOutputFile()); + task.getCompilerVersionFile().set(generateTask.getCompilerVersionFile()); + task.getRuntimeVersionFile().set(generateTask.getRuntimeVersionFile()); + task.getFipsJvmFile().set(generateTask.getFipsJvmFile()); + task.setGlobalInfoListeners(extension.listeners); + }); + + project.getExtensions().getByType(ExtraPropertiesExtension.class).set("defaultParallel", findDefaultParallel(project)); + + project.allprojects(p -> { + // Make sure than any task execution generates and prints build info + p.getTasks().all(task -> { + if (task != generateTask && task != printTask) { + task.dependsOn(printTask); + } + }); + + ExtraPropertiesExtension ext = p.getExtensions().getByType(ExtraPropertiesExtension.class); + + ext.set("compilerJavaHome", compilerJavaHome); + ext.set("runtimeJavaHome", runtimeJavaHome); + ext.set("isRuntimeJavaHomeSet", compilerJavaHome.equals(runtimeJavaHome) == false); + ext.set("javaVersions", javaVersions); + ext.set("minimumCompilerVersion", minimumCompilerVersion); + ext.set("minimumRuntimeVersion", minimumRuntimeVersion); + ext.set("gradleJavaVersion", Jvm.current().getJavaVersion()); + }); + } + + private static File findCompilerJavaHome() { + String compilerJavaHome = System.getenv("JAVA_HOME"); + String compilerJavaProperty = System.getProperty("compiler.java"); + + if (compilerJavaProperty != null) { + compilerJavaHome = findJavaHome(compilerJavaProperty); + } + + // if JAVA_HOME is not set,so we use the JDK that Gradle was run with. + return compilerJavaHome == null ? Jvm.current().getJavaHome() : new File(compilerJavaHome); + } + + private static File findRuntimeJavaHome(final File compilerJavaHome) { + String runtimeJavaProperty = System.getProperty("runtime.java"); + + if (runtimeJavaProperty != null) { + return new File(findJavaHome(runtimeJavaProperty)); + } + + return System.getenv("RUNTIME_JAVA_HOME") == null ? compilerJavaHome : new File(System.getenv("RUNTIME_JAVA_HOME")); + } + + private static String findJavaHome(String version) { + String versionedJavaHome = System.getenv(getJavaHomeEnvVarName(version)); + if (versionedJavaHome == null) { + throw new GradleException( + "$versionedVarName must be set to build Elasticsearch. " + + "Note that if the variable was just set you might have to run `./gradlew --stop` for " + + "it to be picked up. See https://github.com/elastic/elasticsearch/issues/31399 details." + ); + } + return versionedJavaHome; + } + + private static String getJavaHomeEnvVarName(String version) { + return "JAVA" + version + "_HOME"; + } + + private static String getResourceContents(String resourcePath) { + try (BufferedReader reader = new BufferedReader( + new InputStreamReader(GlobalBuildInfoPlugin.class.getResourceAsStream(resourcePath)) + )) { + StringBuilder b = new StringBuilder(); + for (String line = reader.readLine(); line != null; line = reader.readLine()) { + if (b.length() != 0) { + b.append('\n'); + } + b.append(line); + } + + return b.toString(); + } catch (IOException e) { + throw new UncheckedIOException("Error trying to read classpath resource: " + resourcePath, e); + } + } + + private static int findDefaultParallel(Project project) { + // Since it costs IO to compute this, and is done at configuration time we want to cache this if possible + // It's safe to store this in a static variable since it's just a primitive so leaking memory isn't an issue + if (_defaultParallel == null) { + File cpuInfoFile = new File("/proc/cpuinfo"); + if (cpuInfoFile.exists()) { + // Count physical cores on any Linux distro ( don't count hyper-threading ) + Map socketToCore = new HashMap<>(); + String currentID = ""; + + try (BufferedReader reader = new BufferedReader(new FileReader(cpuInfoFile))) { + for (String line = reader.readLine(); line != null; line = reader.readLine()) { + if (line.contains(":")) { + List parts = Arrays.stream(line.split(":", 2)).map(String::trim).collect(Collectors.toList()); + String name = parts.get(0); + String value = parts.get(1); + // the ID of the CPU socket + if (name.equals("physical id")) { + currentID = value; + } + // Number of cores not including hyper-threading + if (name.equals("cpu cores")) { + assert currentID.isEmpty() == false; + socketToCore.put("currentID", Integer.valueOf(value)); + currentID = ""; + } + } + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + _defaultParallel = socketToCore.values().stream().mapToInt(i -> i).sum(); + } else if (OS.current() == OS.MAC) { + // Ask macOS to count physical CPUs for us + ByteArrayOutputStream stdout = new ByteArrayOutputStream(); + project.exec(spec -> { + spec.setExecutable("sysctl"); + spec.args("-n", "hw.physicalcpu"); + spec.setStandardOutput(stdout); + }); + + _defaultParallel = Integer.parseInt(stdout.toString().trim()); + } + + _defaultParallel = Runtime.getRuntime().availableProcessors() / 2; + } + + return _defaultParallel; + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalInfoExtension.java b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalInfoExtension.java new file mode 100644 index 0000000000000..a2daa4a5767c0 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalInfoExtension.java @@ -0,0 +1,12 @@ +package org.elasticsearch.gradle.info; + +import java.util.ArrayList; +import java.util.List; + +public class GlobalInfoExtension { + final List listeners = new ArrayList<>(); + + public void ready(Runnable block) { + listeners.add(block); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/info/JavaHome.java b/buildSrc/src/main/java/org/elasticsearch/gradle/info/JavaHome.java new file mode 100644 index 0000000000000..29ca2bafc79dc --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/info/JavaHome.java @@ -0,0 +1,35 @@ +package org.elasticsearch.gradle.info; + +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.InputDirectory; +import org.gradle.api.tasks.Optional; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; + +import java.io.File; + +public class JavaHome { + private Integer version; + private File javaHome; + + private JavaHome(int version, File javaHome) { + this.version = version; + this.javaHome = javaHome; + } + + public static JavaHome of(int version, File javaHome) { + return new JavaHome(version, javaHome); + } + + @Input + public Integer getVersion() { + return version; + } + + @InputDirectory + @Optional + @PathSensitive(PathSensitivity.RELATIVE) + public File getJavaHome() { + return javaHome; + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/info/PrintGlobalBuildInfoTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/info/PrintGlobalBuildInfoTask.java new file mode 100644 index 0000000000000..b83fe29b073a6 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/info/PrintGlobalBuildInfoTask.java @@ -0,0 +1,84 @@ +package org.elasticsearch.gradle.info; + +import org.gradle.api.DefaultTask; +import org.gradle.api.JavaVersion; +import org.gradle.api.file.RegularFileProperty; +import org.gradle.api.model.ObjectFactory; +import org.gradle.api.plugins.ExtraPropertiesExtension; +import org.gradle.api.resources.TextResource; +import org.gradle.api.tasks.InputFile; +import org.gradle.api.tasks.TaskAction; + +import javax.inject.Inject; +import java.util.ArrayList; +import java.util.List; + +public class PrintGlobalBuildInfoTask extends DefaultTask { + private final RegularFileProperty buildInfoFile; + private final RegularFileProperty compilerVersionFile; + private final RegularFileProperty runtimeVersionFile; + private final RegularFileProperty fipsJvmFile; + private List globalInfoListeners = new ArrayList<>(); + + @Inject + public PrintGlobalBuildInfoTask(ObjectFactory objectFactory) { + this.buildInfoFile = objectFactory.fileProperty(); + this.compilerVersionFile = objectFactory.fileProperty(); + this.runtimeVersionFile = objectFactory.fileProperty(); + this.fipsJvmFile = objectFactory.fileProperty(); + } + + @InputFile + public RegularFileProperty getBuildInfoFile() { + return buildInfoFile; + } + + @InputFile + public RegularFileProperty getCompilerVersionFile() { + return compilerVersionFile; + } + + @InputFile + public RegularFileProperty getRuntimeVersionFile() { + return runtimeVersionFile; + } + + @InputFile + public RegularFileProperty getFipsJvmFile() { + return fipsJvmFile; + } + + public void setGlobalInfoListeners(List globalInfoListeners) { + this.globalInfoListeners = globalInfoListeners; + } + + @TaskAction + public void print() { + getLogger().quiet("======================================="); + getLogger().quiet("Elasticsearch Build Hamster says Hello!"); + getLogger().quiet(getFileText(getBuildInfoFile()).asString()); + getLogger().quiet(" Random Testing Seed : " + getProject().property("testSeed")); + getLogger().quiet("======================================="); + + setGlobalProperties(); + globalInfoListeners.forEach(Runnable::run); + + // Since all tasks depend on this task, and it always runs for every build, this makes sure that lifecycle tasks will still + // correctly report as UP-TO-DATE, since the convention is a lifecycle task (i.e. assemble, build, etc) will only be marked as + // UP-TO-DATE if all upstream tasks were also UP-TO-DATE. + setDidWork(false); + } + + private TextResource getFileText(RegularFileProperty regularFileProperty) { + return getProject().getResources().getText().fromFile(regularFileProperty.getAsFile().get()); + } + + private void setGlobalProperties() { + getProject().getRootProject().allprojects(p -> { + ExtraPropertiesExtension ext = p.getExtensions().getByType(ExtraPropertiesExtension.class); + ext.set("compilerJavaVersion", JavaVersion.valueOf(getFileText(getCompilerVersionFile()).asString())); + ext.set("runtimeJavaVersion", JavaVersion.valueOf(getFileText(getRuntimeVersionFile()).asString())); + ext.set("inFipsJvm", Boolean.valueOf(getFileText(getFipsJvmFile()).asString())); + }); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java index e73a9d1e585e3..7ddec2b887ec6 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java @@ -26,6 +26,7 @@ import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.Dependency; import org.gradle.api.file.FileTree; +import org.gradle.api.provider.Property; import org.gradle.api.specs.Spec; import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.Classpath; @@ -79,17 +80,13 @@ public class ThirdPartyAuditTask extends DefaultTask { private String javaHome; - private JavaVersion targetCompatibility; + private final Property targetCompatibility = getProject().getObjects().property(JavaVersion.class); @Input - public JavaVersion getTargetCompatibility() { + public Property getTargetCompatibility() { return targetCompatibility; } - public void setTargetCompatibility(JavaVersion targetCompatibility) { - this.targetCompatibility = targetCompatibility; - } - @InputFiles @PathSensitive(PathSensitivity.NAME_ONLY) public Configuration getForbiddenAPIsConfiguration() { @@ -287,7 +284,7 @@ private void extractJars(Set jars) { // pther version specific implementation of said classes. IntStream.rangeClosed( Integer.parseInt(JavaVersion.VERSION_1_9.getMajorVersion()), - Integer.parseInt(targetCompatibility.getMajorVersion()) + Integer.parseInt(targetCompatibility.get().getMajorVersion()) ).forEach(majorVersion -> getProject().copy(spec -> { spec.from(getProject().zipTree(jar)); spec.into(jarExpandDir); diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.global-build-info.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.global-build-info.properties new file mode 100644 index 0000000000000..7428707877242 --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.global-build-info.properties @@ -0,0 +1 @@ +implementation-class=org.elasticsearch.gradle.info.GlobalBuildInfoPlugin \ No newline at end of file diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 61e3546ed8919..48bc899cd29b4 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -40,8 +40,8 @@ test { systemProperty 'tests.security.manager', 'false' } -if (project.inFipsJvm) { +thirdPartyAudit.onlyIf { // FIPS JVM includes manny classes from bouncycastle which count as jar hell for the third party audit, // rather than provide a long list of exclusions, disable the check on FIPS. - thirdPartyAudit.enabled = false -} + project.inFipsJvm == false +} \ No newline at end of file diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 23de6a7f93b4f..d64e0aff7749d 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -172,10 +172,12 @@ thirdPartyAudit { ) } -if (project.inFipsJvm == false) { - // BouncyCastleFIPS provides this class, so the exclusion is invalid when running CI in - // a FIPS JVM with BouncyCastleFIPS Provider - thirdPartyAudit.ignoreMissingClasses ( - 'org.bouncycastle.asn1.x500.X500Name' - ) +rootProject.globalInfo.ready { + if (project.inFipsJvm == false) { + // BouncyCastleFIPS provides this class, so the exclusion is invalid when running CI in + // a FIPS JVM with BouncyCastleFIPS Provider + thirdPartyAudit.ignoreMissingClasses( + 'org.bouncycastle.asn1.x500.X500Name' + ) + } } diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index cbe417708d778..835147c255c8b 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -84,8 +84,8 @@ thirdPartyAudit{ ignoreMissingClasses() } -if (project.inFipsJvm) { - // FIPS JVM includes manny classes from bouncycastle which count as jar hell for the third party audit, - // rather than provide a long list of exclusions, disable the check on FIPS. - thirdPartyAudit.enabled = false -} +thirdPartyAudit.onlyIf { + // FIPS JVM includes manny classes from bouncycastle which count as jar hell for the third party audit, + // rather than provide a long list of exclusions, disable the check on FIPS. + project.inFipsJvm == false +} \ No newline at end of file diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 9f93d18a0e15e..7800ff6951a89 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -149,10 +149,12 @@ thirdPartyAudit { 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator' ) } -if (project.inFipsJvm == false) { - // BouncyCastleFIPS provides this class, so the exclusion is invalid when running CI in - // a FIPS JVM with BouncyCastleFIPS Provider - thirdPartyAudit.ignoreMissingClasses ( - 'org.bouncycastle.asn1.x500.X500Name' - ) +rootProject.globalInfo.ready { + if (project.inFipsJvm == false) { + // BouncyCastleFIPS provides this class, so the exclusion is invalid when running CI in + // a FIPS JVM with BouncyCastleFIPS Provider + thirdPartyAudit.ignoreMissingClasses( + 'org.bouncycastle.asn1.x500.X500Name' + ) + } } diff --git a/server/build.gradle b/server/build.gradle index ce5e4cc807fdf..391fdf46469f0 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -56,8 +56,10 @@ if (!isEclipse && !isIdea) { } forbiddenApisJava12 { - if (project.runtimeJavaVersion < JavaVersion.VERSION_12) { - targetCompatibility = JavaVersion.VERSION_12.getMajorVersion() + doFirst { + if (project.runtimeJavaVersion < JavaVersion.VERSION_12) { + targetCompatibility = JavaVersion.VERSION_12.getMajorVersion() + } } } diff --git a/x-pack/plugin/ccr/qa/restart/build.gradle b/x-pack/plugin/ccr/qa/restart/build.gradle index 8501de714fae6..cace98d97b015 100644 --- a/x-pack/plugin/ccr/qa/restart/build.gradle +++ b/x-pack/plugin/ccr/qa/restart/build.gradle @@ -41,7 +41,7 @@ followClusterTestRunner { task followClusterRestartTest(type: RestIntegTestTask) {} followClusterRestartTestCluster { - dependsOn followClusterTestRunner + dependsOn followClusterTestRunner, 'followClusterTestCluster#stop' numNodes = 1 clusterName = 'follow-cluster' dataDir = { nodeNumber -> followClusterTest.nodes[0].dataDir } diff --git a/x-pack/plugin/security/cli/build.gradle b/x-pack/plugin/security/cli/build.gradle index 29f278b95defa..5a95594b292ed 100644 --- a/x-pack/plugin/security/cli/build.gradle +++ b/x-pack/plugin/security/cli/build.gradle @@ -22,16 +22,18 @@ dependencyLicenses { mapping from: /bc.*/, to: 'bouncycastle' } -if (project.inFipsJvm) { - test.enabled = false - testingConventions.enabled = false - // Forbiden APIs non-portable checks fail because bouncy castle classes being used from the FIPS JDK since those are - // not part of the Java specification - all of this is as designed, so we have to relax this check for FIPS. - tasks.withType(CheckForbiddenApis) { - bundledSignatures -= "jdk-non-portable" - } - // FIPS JVM includes many classes from bouncycastle which count as jar hell for the third party audit, - // rather than provide a long list of exclusions, disable the check on FIPS. - thirdPartyAudit.enabled = false +rootProject.globalInfo.ready { + if (project.inFipsJvm) { + test.enabled = false + testingConventions.enabled = false + // Forbiden APIs non-portable checks fail because bouncy castle classes being used from the FIPS JDK since those are + // not part of the Java specification - all of this is as designed, so we have to relax this check for FIPS. + tasks.withType(CheckForbiddenApis) { + bundledSignatures -= "jdk-non-portable" + } + // FIPS JVM includes many classes from bouncycastle which count as jar hell for the third party audit, + // rather than provide a long list of exclusions, disable the check on FIPS. + thirdPartyAudit.enabled = false -} + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/qa/security/with-ssl/build.gradle b/x-pack/plugin/sql/qa/security/with-ssl/build.gradle index de4e173463612..19459bade97a8 100644 --- a/x-pack/plugin/sql/qa/security/with-ssl/build.gradle +++ b/x-pack/plugin/sql/qa/security/with-ssl/build.gradle @@ -207,18 +207,16 @@ integTestCluster { return tmpFile.exists() } } -Closure notRunningFips = { - Boolean.parseBoolean(BuildPlugin.runJavaAsScript(project, project.runtimeJavaHome, - 'print(java.security.Security.getProviders()[0].name.toLowerCase().contains("fips"));')) == false -} // Do not attempt to form a cluster in a FIPS JVM, as doing so with a JKS keystore will fail. // TODO Revisit this when SQL CLI client can handle key/certificate instead of only Keystores. // https://github.com/elastic/elasticsearch/issues/32306 -tasks.matching({ it.name == "integTestCluster#init" }).all { onlyIf notRunningFips } -tasks.matching({ it.name == "integTestCluster#start" }).all { onlyIf notRunningFips } -tasks.matching({ it.name == "integTestCluster#wait" }).all { onlyIf notRunningFips } -tasks.matching({ it.name == "integTestRunner" }).all { onlyIf notRunningFips } +tasks.matching { it.name in ["integTestCluster#init", "integTestCluster#start", "integTestCluster#wait", "integTestRunner"] }.all { + onlyIf { + project.inFipsJvm == false + } +} + /** A lazy evaluator to find the san to use for certificate generation. */ class SanEvaluator { diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index 7f0e14d2a53bf..70767faf33499 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -116,13 +116,15 @@ for (Version version : bwcVersions.indexCompatible) { setting 'xpack.security.enabled', 'true' setting 'xpack.security.transport.ssl.enabled', 'true' - if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + rootProject.globalInfo.ready { + if (project.inFipsJvm) { + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + } else { + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + } } setting 'xpack.license.self_generated.type', 'trial' dependsOn copyTestNodeKeyMaterial @@ -160,13 +162,15 @@ for (Version version : bwcVersions.indexCompatible) { // some tests rely on the translog not being flushed setting 'indices.memory.shard_inactive_time', '20m' setting 'xpack.security.enabled', 'true' - if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + rootProject.globalInfo.ready { + if (project.inFipsJvm) { + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + } else { + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + } } setting 'xpack.license.self_generated.type', 'trial' dependsOn copyTestNodeKeyMaterial diff --git a/x-pack/qa/reindex-tests-with-security/build.gradle b/x-pack/qa/reindex-tests-with-security/build.gradle index 5a201832e7c39..7f878e6356b73 100644 --- a/x-pack/qa/reindex-tests-with-security/build.gradle +++ b/x-pack/qa/reindex-tests-with-security/build.gradle @@ -41,8 +41,10 @@ integTestCluster { setting 'reindex.ssl.truststore.password', 'password' // Workaround for JDK-8212885 - if (project.ext.runtimeJavaVersion.isJava12Compatible() == false) { - setting 'reindex.ssl.supported_protocols', 'TLSv1.2' + rootProject.globalInfo.ready { + if (project.ext.runtimeJavaVersion.isJava12Compatible() == false) { + setting 'reindex.ssl.supported_protocols', 'TLSv1.2' + } } extraConfigFile 'roles.yml', 'roles.yml' diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index 8d5b133454a73..d75ecbd7a55ed 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -123,13 +123,15 @@ for (Version version : bwcVersions.wireCompatible) { setting 'xpack.security.authc.token.timeout', '60m' setting 'logger.org.elasticsearch.xpack.security.authc.TokenService', 'trace' setting 'xpack.security.audit.enabled', 'true' - if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + rootProject.globalInfo.ready { + if (project.inFipsJvm) { + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + } else { + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + } } dependsOn copyTestNodeKeyMaterial extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') @@ -188,13 +190,15 @@ for (Version version : bwcVersions.wireCompatible) { setting 'xpack.security.transport.ssl.enabled', 'true' setting 'xpack.security.authc.token.timeout', '60m' setting 'logger.org.elasticsearch.xpack.security.authc.TokenService', 'trace' - if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + rootProject.globalInfo.ready { + if (project.inFipsJvm) { + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + } else { + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + } } setting 'node.attr.upgraded', 'true' setting 'xpack.security.authc.token.enabled', 'true' From 5f3d0e4ab16c45221e406839d009f1f16bb7aab2 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 24 May 2019 15:42:59 -0400 Subject: [PATCH 109/224] Adjust load SplitIndexIT#testSplitIndexPrimaryTerm (#42477) SplitIndexIT#testSplitIndexPrimaryTerm sometimes timeout due to relocating many shards. This change adjusts loads and increases the timeout. --- .../admin/indices/create/SplitIndexIT.java | 32 ++++--------------- 1 file changed, 7 insertions(+), 25 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 05d1c5dcd803f..0fecff449f9b5 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -45,8 +45,8 @@ import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; -import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; @@ -62,7 +62,6 @@ import java.io.UncheckedIOException; import java.util.Arrays; import java.util.HashSet; -import java.util.List; import java.util.Set; import java.util.function.BiFunction; import java.util.stream.IntStream; @@ -75,7 +74,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class SplitIndexIT extends ESIntegTestCase { @@ -184,9 +182,6 @@ private void splitToN(int sourceShards, int firstSplitShards, int secondSplitSha } } - ImmutableOpenMap dataNodes = client().admin().cluster().prepareState().get().getState().nodes() - .getDataNodes(); - assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); ensureYellow(); client().admin().indices().prepareUpdateSettings("source") .setSettings(Settings.builder() @@ -287,19 +282,13 @@ public void assertAllUniqueDocs(SearchResponse response, int numDocs) { } public void testSplitIndexPrimaryTerm() throws Exception { - final List factors = Arrays.asList(1, 2, 4, 8); - final List numberOfShardsFactors = randomSubsetOf(scaledRandomIntBetween(1, factors.size()), factors); - final int numberOfShards = randomSubsetOf(numberOfShardsFactors).stream().reduce(1, (x, y) -> x * y); - final int numberOfTargetShards = numberOfShardsFactors.stream().reduce(2, (x, y) -> x * y); + int numberOfTargetShards = randomIntBetween(2, 20); + int numberOfShards = randomValueOtherThanMany(n -> numberOfTargetShards % n != 0, () -> between(1, numberOfTargetShards - 1)); internalCluster().ensureAtLeastNumDataNodes(2); prepareCreate("source").setSettings(Settings.builder().put(indexSettings()) .put("number_of_shards", numberOfShards) .put("index.number_of_routing_shards", numberOfTargetShards)).get(); - - final ImmutableOpenMap dataNodes = - client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); - assertThat(dataNodes.size(), greaterThanOrEqualTo(2)); - ensureYellow(); + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to allocate many shards // fail random primary shards to force primary terms to increase final Index source = resolveIndex("source"); @@ -352,7 +341,7 @@ public void testSplitIndexPrimaryTerm() throws Exception { .setResizeType(ResizeType.SPLIT) .setSettings(splitSettings).get()); - ensureGreen(); + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to relocate many shards final IndexMetaData aftersplitIndexMetaData = indexMetaData(client(), "target"); for (int shardId = 0; shardId < numberOfTargetShards; shardId++) { @@ -365,9 +354,7 @@ private static IndexMetaData indexMetaData(final Client client, final String ind return clusterStateResponse.getState().metaData().index(index); } - public void testCreateSplitIndex() { - internalCluster().ensureAtLeastNumDataNodes(2); - + public void testCreateSplitIndex() throws Exception { Version version = VersionUtils.randomIndexCompatibleVersion(random()); prepareCreate("source").setSettings(Settings.builder().put(indexSettings()) .put("number_of_shards", 1) @@ -378,9 +365,7 @@ public void testCreateSplitIndex() { client().prepareIndex("source", "type") .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } - ImmutableOpenMap dataNodes = - client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); - assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + internalCluster().ensureAtLeastNumDataNodes(2); // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due // to the require._name below. @@ -486,9 +471,6 @@ public void testCreateSplitWithIndexSort() throws Exception { client().prepareIndex("source", "type", Integer.toString(i)) .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON).get(); } - ImmutableOpenMap dataNodes = client().admin().cluster().prepareState().get().getState().nodes() - .getDataNodes(); - assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due // to the require._name below. From 8bbd3e3096295ce20d79b210dcd04a851b0f2736 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Fri, 24 May 2019 15:01:26 -0500 Subject: [PATCH 110/224] Address test failures for SmokeTestWatcherWithSecurityIT (#42092) * Address test failures for SmokeTestWatcherWithSecurityIT There are likely multiple root causes to the seemingly random failures generated by SmokeTestWatcherWithSecurityIT. This commit un-mutes this this test, address one known cause and adds debug logging for this test. The known root cause for one failure is that we can have a Watch running that is reading data from an index. Before we stop Watcher we delete that index. If Watcher happens to execute after deletion of the index but before the stop of Watcher the test can fail. The fix here is to simply move the index deletion after the stop of Watcher. Related #35361 Related #30777 Related #33291 Related #29893 --- x-pack/qa/smoke-test-watcher-with-security/build.gradle | 2 ++ .../smoketest/SmokeTestWatcherWithSecurityIT.java | 5 ++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/x-pack/qa/smoke-test-watcher-with-security/build.gradle b/x-pack/qa/smoke-test-watcher-with-security/build.gradle index 0b622fc446b38..fc66785d47957 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/build.gradle +++ b/x-pack/qa/smoke-test-watcher-with-security/build.gradle @@ -24,6 +24,8 @@ integTestCluster { setting 'xpack.notification.email.account._email.smtp.user', '_user' keystoreSetting 'xpack.notification.email.account._email.smtp.secure_password', '_passwd' setting 'xpack.license.self_generated.type', 'trial' + setting 'logger.org.elasticsearch.xpack.watcher', 'debug' + setting 'logger.org.elasticsearch.xpack.core.watcher', 'debug' extraConfigFile 'roles.yml', 'roles.yml' setupCommand 'setupTestAdminUser', 'bin/elasticsearch-users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser' diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java index bf53dfa83103e..e184ef19596af 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.smoketest; import org.apache.http.util.EntityUtils; -import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; @@ -32,7 +31,6 @@ import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.is; -@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35361") public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { private static final String TEST_ADMIN_USERNAME = "test_admin"; @@ -91,7 +89,6 @@ public void startWatcher() throws Exception { @After public void stopWatcher() throws Exception { - adminClient().performRequest(new Request("DELETE", "/my_test_index")); assertBusy(() -> { try { @@ -119,6 +116,8 @@ public void stopWatcher() throws Exception { throw new AssertionError(e); } }); + + adminClient().performRequest(new Request("DELETE", "/my_test_index")); } @Override From a7cf2994cec34f5131f8004da4e5123d2ae98aeb Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 24 May 2019 13:19:56 -0700 Subject: [PATCH 111/224] Fix issue with using runtime JDK of Java12 --- .../gradle/precommit/PrecommitTasks.groovy | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index e5e4f021507f9..7e8fbd0658698 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -136,12 +136,10 @@ class PrecommitTasks { // we need to defer this configuration since we don't know the runtime java version until execution time targetCompatibility = project.runtimeJavaVersion.getMajorVersion() if (project.runtimeJavaVersion > JavaVersion.VERSION_11) { - doLast { - project.logger.info( - "Forbidden APIs does not support java version past 11. Will use the signatures from 11 for ", - project.runtimeJavaVersion - ) - } + project.logger.info( + "Forbidden APIs does not support java version past 11. Will use the signatures from 11 for ", + project.runtimeJavaVersion + ) targetCompatibility = JavaVersion.VERSION_11.getMajorVersion() } } From 9772574f9d0b942a1ee8dba5ff503b4cd286e36c Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 24 May 2019 13:46:59 -0700 Subject: [PATCH 112/224] Use reproducible method of generating properties file for better caching (#42539) --- buildSrc/build.gradle | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index f239427330c58..d3a16f55277d7 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -45,21 +45,17 @@ if (project == rootProject) { // we update the version property to reflect if we are building a snapshot or a release build // we write this back out below to load it in the Build.java which will be shown in rest main action // to indicate this being a snapshot build or a release build. -File propsFile = project.file('version.properties') -Properties props = VersionPropertiesLoader.loadBuildSrcVersion(propsFile) +Properties props = VersionPropertiesLoader.loadBuildSrcVersion(project.file('version.properties')) version = props.getProperty("elasticsearch") + +task generateVersionProperties(type: WriteProperties) { + outputFile = "${buildDir}/version.properties" + comment = 'Generated version properties' + properties(props) +} + processResources { - inputs.file(propsFile) - // We need to be explicit with the version because we add snapshot and qualifier to it based on properties - inputs.property("dynamic_elasticsearch_version", props.getProperty("elasticsearch")) - doLast { - Writer writer = file("$destinationDir/version.properties").newWriter() - try { - props.store(writer, "Generated version properties") - } finally { - writer.close() - } - } + from(generateVersionProperties) } /***************************************************************************** From 5b0b98b7c79ef1de84d216ba57ceb852bfa86c08 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 21 May 2019 17:20:27 -0400 Subject: [PATCH 113/224] Recovery with syncId should verify seqno infos (#41265) This change verifies and aborts recovery if source and target have the same syncId but different sequenceId. This commit also adds an upgrade test to ensure that we always utilize syncId. --- .../upgrades/AbstractRollingTestCase.java | 1 + .../elasticsearch/upgrades/RecoveryIT.java | 127 +++++++++++++++--- .../recovery/RecoverySourceHandler.java | 46 ++++--- .../recovery/RecoverySourceHandlerTests.java | 39 ++++++ 4 files changed, 174 insertions(+), 39 deletions(-) diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java index 1c57be7abbaa1..f6041bc2af754 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java @@ -42,6 +42,7 @@ public static ClusterType parse(String value) { } protected static final ClusterType CLUSTER_TYPE = ClusterType.parse(System.getProperty("tests.rest.suite")); + protected static final boolean firstMixedRound = Boolean.parseBoolean(System.getProperty("tests.first_round", "false")); @Override protected final boolean preserveIndicesUponCompletion() { diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index bbc6d27472467..32ddc77113bc8 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -33,6 +33,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.rest.action.document.RestIndexAction; import org.elasticsearch.test.rest.yaml.ObjectPath; +import org.hamcrest.Matcher; import java.io.IOException; import java.util.ArrayList; @@ -40,6 +41,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.function.Predicate; @@ -172,6 +174,25 @@ public void testRecoveryWithConcurrentIndexing() throws Exception { } } + private void assertDocCountOnAllCopies(String index, int expectedCount) throws Exception { + assertBusy(() -> { + Map state = entityAsMap(client().performRequest(new Request("GET", "/_cluster/state"))); + String xpath = "routing_table.indices." + index + ".shards.0.node"; + @SuppressWarnings("unchecked") List assignedNodes = (List) XContentMapValues.extractValue(xpath, state); + assertNotNull(state.toString(), assignedNodes); + for (String assignedNode : assignedNodes) { + try { + assertCount(index, "_only_nodes:" + assignedNode, expectedCount); + } catch (ResponseException e) { + if (e.getMessage().contains("no data nodes with criteria [" + assignedNode + "found for shard: [" + index + "][0]")) { + throw new AssertionError(e); // shard is relocating - ask assert busy to retry + } + throw e; + } + } + }); + } + private void assertCount(final String index, final String preference, final int expectedCount) throws IOException { final int actualDocs; try { @@ -275,34 +296,52 @@ public void testRelocationWithConcurrentIndexing() throws Exception { } } + /** + * This test ensures that peer recovery won't get stuck in a situation where the recovery target and recovery source + * have an identical sync id but different local checkpoint in the commit in particular the target does not have + * sequence numbers yet. This is possible if the primary is on 6.x while the replica was on 5.x and some write + * operations with sequence numbers have taken place. If this is not the case, then peer recovery should utilize + * syncId and skip copying files. + */ public void testRecoverSyncedFlushIndex() throws Exception { final String index = "recover_synced_flush_index"; if (CLUSTER_TYPE == ClusterType.OLD) { Settings.Builder settings = Settings.builder() .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - // if the node with the replica is the first to be restarted, while a replica is still recovering - // then delayed allocation will kick in. When the node comes back, the master will search for a copy - // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN - // before timing out - .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") - .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2); + if (randomBoolean()) { + settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), "-1") + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1") + .put(IndexSettings.INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING.getKey(), "256b"); + } createIndex(index, settings.build()); - indexDocs(index, 0, randomInt(5)); - // We have to spin synced-flush requests here because we fire the global checkpoint sync for the last write operation. - // A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit. - assertBusy(() -> { - try { - Response resp = client().performRequest(new Request("POST", index + "/_flush/synced")); - Map result = ObjectPath.createFromResponse(resp).evaluate("_shards"); - assertThat(result.get("successful"), equalTo(result.get("total"))); - assertThat(result.get("failed"), equalTo(0)); - } catch (ResponseException ex) { - throw new AssertionError(ex); // cause assert busy to retry + ensureGreen(index); + indexDocs(index, 0, 40); + syncedFlush(index); + } else if (CLUSTER_TYPE == ClusterType.MIXED) { + ensureGreen(index); + if (firstMixedRound) { + assertPeerRecoveredFiles("peer recovery with syncId should not copy files", index, "upgraded-node-0", equalTo(0)); + assertDocCountOnAllCopies(index, 40); + indexDocs(index, 40, 50); + syncedFlush(index); + } else { + assertPeerRecoveredFiles("peer recovery with syncId should not copy files", index, "upgraded-node-1", equalTo(0)); + assertDocCountOnAllCopies(index, 90); + indexDocs(index, 90, 60); + syncedFlush(index); + // exclude node-2 from allocation-filter so we can trim translog on the primary before node-2 starts recover + if (randomBoolean()) { + updateIndexSettings(index, Settings.builder().put("index.routing.allocation.include._name", "upgraded-*")); } - }); + } + } else { + final int docsAfterUpgraded = randomIntBetween(0, 100); + indexDocs(index, 150, docsAfterUpgraded); + ensureGreen(index); + assertPeerRecoveredFiles("peer recovery with syncId should not copy files", index, "upgraded-node-2", equalTo(0)); + assertDocCountOnAllCopies(index, 150 + docsAfterUpgraded); } - ensureGreen(index); } public void testRecoveryWithSoftDeletes() throws Exception { @@ -480,4 +519,52 @@ private void assertClosedIndex(final String index, final boolean checkRoutingTab assertThat(XContentMapValues.extractValue("index.verified_before_close", settings), nullValue()); } } + + private void syncedFlush(String index) throws Exception { + // We have to spin synced-flush requests here because we fire the global checkpoint sync for the last write operation. + // A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit. + assertBusy(() -> { + try { + Response resp = client().performRequest(new Request("POST", index + "/_flush/synced")); + Map result = ObjectPath.createFromResponse(resp).evaluate("_shards"); + assertThat(result.get("failed"), equalTo(0)); + } catch (ResponseException ex) { + throw new AssertionError(ex); // cause assert busy to retry + } + }); + // ensure the global checkpoint is synced; otherwise we might trim the commit with syncId + ensureGlobalCheckpointSynced(index); + } + + @SuppressWarnings("unchecked") + private void assertPeerRecoveredFiles(String reason, String index, String targetNode, Matcher sizeMatcher) throws IOException { + Map recoveryStats = entityAsMap(client().performRequest(new Request("GET", index + "/_recovery"))); + List> shards = (List>) XContentMapValues.extractValue(index + "." + "shards", recoveryStats); + for (Map shard : shards) { + if (Objects.equals(XContentMapValues.extractValue("type", shard), "PEER")) { + if (Objects.equals(XContentMapValues.extractValue("target.name", shard), targetNode)) { + Integer recoveredFileSize = (Integer) XContentMapValues.extractValue("index.files.recovered", shard); + assertThat(reason + " target node [" + targetNode + "] stats [" + recoveryStats + "]", recoveredFileSize, sizeMatcher); + } + } + } + } + + @SuppressWarnings("unchecked") + private void ensureGlobalCheckpointSynced(String index) throws Exception { + assertBusy(() -> { + Map stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards"))); + List> shardStats = (List>) XContentMapValues.extractValue("indices." + index + ".shards.0", stats); + shardStats.stream() + .map(shard -> (Map) XContentMapValues.extractValue("seq_no", shard)) + .filter(Objects::nonNull) + .forEach(seqNoStat -> { + long globalCheckpoint = ((Number) XContentMapValues.extractValue("global_checkpoint", seqNoStat)).longValue(); + long localCheckpoint = ((Number) XContentMapValues.extractValue("local_checkpoint", seqNoStat)).longValue(); + long maxSeqNo = ((Number) XContentMapValues.extractValue("max_seq_no", seqNoStat)).longValue(); + assertThat(shardStats.toString(), localCheckpoint, equalTo(maxSeqNo)); + assertThat(shardStats.toString(), globalCheckpoint, equalTo(maxSeqNo)); + }); + }, 60, TimeUnit.SECONDS); + } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 4e82798e34128..4b89e75691a76 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -359,25 +359,10 @@ public SendFileResult phase1(final IndexCommit snapshot, final long globalCheckp recoverySourceMetadata.asMap().size() + " files", name); } } - // Generate a "diff" of all the identical, different, and missing - // segment files on the target node, using the existing files on - // the source node - String recoverySourceSyncId = recoverySourceMetadata.getSyncId(); - String recoveryTargetSyncId = request.metadataSnapshot().getSyncId(); - final boolean recoverWithSyncId = recoverySourceSyncId != null && - recoverySourceSyncId.equals(recoveryTargetSyncId); - if (recoverWithSyncId) { - final long numDocsTarget = request.metadataSnapshot().getNumDocs(); - final long numDocsSource = recoverySourceMetadata.getNumDocs(); - if (numDocsTarget != numDocsSource) { - throw new IllegalStateException("try to recover " + request.shardId() + " from primary shard with sync id but number " + - "of docs differ: " + numDocsSource + " (" + request.sourceNode().getName() + ", primary) vs " + numDocsTarget - + "(" + request.targetNode().getName() + ")"); - } - // we shortcut recovery here because we have nothing to copy. but we must still start the engine on the target. - // so we don't return here - logger.trace("skipping [phase1]- identical sync id [{}] found on both source and target", recoverySourceSyncId); - } else { + if (canSkipPhase1(recoverySourceMetadata, request.metadataSnapshot()) == false) { + // Generate a "diff" of all the identical, different, and missing + // segment files on the target node, using the existing files on + // the source node final Store.RecoveryDiff diff = recoverySourceMetadata.recoveryDiff(request.metadataSnapshot()); for (StoreFileMetaData md : diff.identical) { phase1ExistingFileNames.add(md.name()); @@ -458,6 +443,9 @@ public SendFileResult phase1(final IndexCommit snapshot, final long globalCheckp throw targetException; } } + } else { + logger.trace("skipping [phase1]- identical sync id [{}] found on both source and target", + recoverySourceMetadata.getSyncId()); } final TimeValue took = stopWatch.totalTime(); logger.trace("recovery [phase1]: took [{}]", took); @@ -470,6 +458,26 @@ public SendFileResult phase1(final IndexCommit snapshot, final long globalCheckp } } + boolean canSkipPhase1(Store.MetadataSnapshot source, Store.MetadataSnapshot target) { + if (source.getSyncId() == null || source.getSyncId().equals(target.getSyncId()) == false) { + return false; + } + if (source.getNumDocs() != target.getNumDocs()) { + throw new IllegalStateException("try to recover " + request.shardId() + " from primary shard with sync id but number " + + "of docs differ: " + source.getNumDocs() + " (" + request.sourceNode().getName() + ", primary) vs " + target.getNumDocs() + + "(" + request.targetNode().getName() + ")"); + } + SequenceNumbers.CommitInfo sourceSeqNos = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(source.getCommitUserData().entrySet()); + SequenceNumbers.CommitInfo targetSeqNos = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(target.getCommitUserData().entrySet()); + if (sourceSeqNos.localCheckpoint != targetSeqNos.localCheckpoint || targetSeqNos.maxSeqNo != sourceSeqNos.maxSeqNo) { + final String message = "try to recover " + request.shardId() + " with sync id but " + + "seq_no stats are mismatched: [" + source.getCommitUserData() + "] vs [" + target.getCommitUserData() + "]"; + assert false : message; + throw new IllegalStateException(message); + } + return true; + } + void prepareTargetForTranslog(boolean fileBasedRecovery, int totalTranslogOps, ActionListener listener) { StopWatch stopWatch = new StopWatch().start(); final ActionListener wrappedListener = ActionListener.wrap( diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index b49bef57aceb1..b00e89575ccd5 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -89,7 +89,9 @@ import java.util.Arrays; import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -649,6 +651,43 @@ public void writeFileChunk(StoreFileMetaData md, long position, BytesReference c store.close(); } + public void testVerifySeqNoStatsWhenRecoverWithSyncId() throws Exception { + IndexShard shard = mock(IndexShard.class); + when(shard.state()).thenReturn(IndexShardState.STARTED); + RecoverySourceHandler handler = new RecoverySourceHandler( + shard, new TestRecoveryTargetHandler(), getStartRecoveryRequest(), between(1, 16), between(1, 4)); + + String syncId = UUIDs.randomBase64UUID(); + int numDocs = between(0, 1000); + long localCheckpoint = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + long maxSeqNo = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + assertTrue(handler.canSkipPhase1( + newMetadataSnapshot(syncId, Long.toString(localCheckpoint), Long.toString(maxSeqNo), numDocs), + newMetadataSnapshot(syncId, Long.toString(localCheckpoint), Long.toString(maxSeqNo), numDocs))); + + AssertionError error = expectThrows(AssertionError.class, () -> { + long localCheckpointOnTarget = randomValueOtherThan(localCheckpoint, + () -> randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE)); + long maxSeqNoOnTarget = randomValueOtherThan(maxSeqNo, + () -> randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE)); + handler.canSkipPhase1(newMetadataSnapshot(syncId, Long.toString(localCheckpoint), Long.toString(maxSeqNo), numDocs), + newMetadataSnapshot(syncId, Long.toString(localCheckpointOnTarget), Long.toString(maxSeqNoOnTarget), numDocs)); + }); + assertThat(error.getMessage(), containsString("try to recover [index][1] with sync id but seq_no stats are mismatched:")); + } + + private Store.MetadataSnapshot newMetadataSnapshot(String syncId, String localCheckpoint, String maxSeqNo, int numDocs) { + Map userData = new HashMap<>(); + userData.put(Engine.SYNC_COMMIT_ID, syncId); + if (localCheckpoint != null) { + userData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, localCheckpoint); + } + if (maxSeqNo != null) { + userData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, maxSeqNo); + } + return new Store.MetadataSnapshot(Collections.emptyMap(), userData, numDocs); + } + private Store newStore(Path path) throws IOException { return newStore(path, true); } From 6362328b93d0bb972d5c9ffd1fcceee9a3a47404 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 24 May 2019 15:39:27 -0700 Subject: [PATCH 114/224] Remove renewal in retention lease recovery test (#42536) This commit removes the act of renewing some retention leases during a retention lease recovery test. Having renewal does not add anything extra to this test, but does allow for some situations where the test can fail spuriously (i.e., in a way that does not indicate that production code is broken). --- .../java/org/elasticsearch/index/seqno/RetentionLeaseIT.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java index bbe05accb2813..debb6d219a5f1 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java @@ -36,7 +36,6 @@ import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -356,7 +355,6 @@ public void testRetentionLeasesBackgroundSyncWithSoftDeletesDisabled() throws Ex assertFalse("retention leases background sync must be a noop if soft deletes is disabled", backgroundSyncRequestSent.get()); } - @TestLogging(value = "org.elasticsearch.index:debug,org.elasticsearch.indices.recovery:trace") public void testRetentionLeasesSyncOnRecovery() throws Exception { final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2); internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas); @@ -393,7 +391,6 @@ public void testRetentionLeasesSyncOnRecovery() throws Exception { final ActionListener listener = countDownLatchListener(latch); currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener)); latch.await(); - currentRetentionLeases.put(id, primary.renewRetentionLease(id, retainingSequenceNumber, source)); } logger.info("finished adding [{}] retention leases", length); From d5281fc96f6fb2f022c87699bdad64d88614e04c Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 24 May 2019 18:08:51 -0700 Subject: [PATCH 115/224] Ignore JAR manifests when snapshotting runtime classpaths (#42548) --- .../elasticsearch/gradle/BuildPlugin.groovy | 8 +++++ .../test/StandaloneRestTestPlugin.groovy | 1 + .../org/elasticsearch/gradle/LoggedExec.java | 36 +++++++++++-------- plugins/discovery-azure-classic/build.gradle | 14 ++++++-- 4 files changed, 41 insertions(+), 18 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 3a058ca9310df..b5c69a418cceb 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -129,6 +129,7 @@ class BuildPlugin implements Plugin { setupSeed(project) configureRepositories(project) project.extensions.getByType(ExtraPropertiesExtension).set('versions', VersionProperties.versions) + configureInputNormalization(project) configureSourceSets(project) configureCompile(project) configureJavadoc(project) @@ -580,6 +581,13 @@ class BuildPlugin implements Plugin { } } + /** + * Apply runtime classpath input normalization so that changes in JAR manifests don't break build cacheability + */ + static void configureInputNormalization(Project project) { + project.normalization.runtimeClasspath.ignore('META-INF/MANIFEST.MF') + } + /** Adds compiler settings to the project */ static void configureCompile(Project project) { ExtraPropertiesExtension ext = project.extensions.getByType(ExtraPropertiesExtension) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy index 6d895abaa97c7..c9a26eb74b54d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy @@ -62,6 +62,7 @@ class StandaloneRestTestPlugin implements Plugin { project.getTasks().create("buildResources", ExportElasticsearchBuildResourcesTask) BuildPlugin.configureRepositories(project) BuildPlugin.configureTestTasks(project) + BuildPlugin.configureInputNormalization(project) ExtraPropertiesExtension ext = project.extensions.getByType(ExtraPropertiesExtension) project.extensions.getByType(JavaPluginExtension).sourceCompatibility = ext.get('minimumRuntimeVersion') as JavaVersion diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/LoggedExec.java b/buildSrc/src/main/java/org/elasticsearch/gradle/LoggedExec.java index c71b7ba183562..0921c611895fa 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/LoggedExec.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/LoggedExec.java @@ -3,6 +3,7 @@ import org.gradle.api.Action; import org.gradle.api.GradleException; import org.gradle.api.Project; +import org.gradle.api.Task; import org.gradle.api.logging.Logger; import org.gradle.api.tasks.Exec; import org.gradle.api.tasks.Internal; @@ -34,22 +35,27 @@ public LoggedExec() { if (getLogger().isInfoEnabled() == false) { setIgnoreExitValue(true); setSpoolOutput(false); - doLast(task -> { - if (getExecResult().getExitValue() != 0) { - try { - getLogger().error("Output for " + getExecutable() + ":"); - outputLogger.accept(getLogger()); - } catch (Exception e) { - throw new GradleException("Failed to read exec output", e); + // We use an anonymous inner class here because Gradle cannot properly snapshot this input for the purposes of + // incremental build if we use a lambda. This ensures LoggedExec tasks that declare output can be UP-TO-DATE. + doLast(new Action() { + @Override + public void execute(Task task) { + if (LoggedExec.this.getExecResult().getExitValue() != 0) { + try { + LoggedExec.this.getLogger().error("Output for " + LoggedExec.this.getExecutable() + ":"); + outputLogger.accept(LoggedExec.this.getLogger()); + } catch (Exception e) { + throw new GradleException("Failed to read exec output", e); + } + throw new GradleException( + String.format( + "Process '%s %s' finished with non-zero exit value %d", + LoggedExec.this.getExecutable(), + LoggedExec.this.getArgs(), + LoggedExec.this.getExecResult().getExitValue() + ) + ); } - throw new GradleException( - String.format( - "Process '%s %s' finished with non-zero exit value %d", - getExecutable(), - getArgs(), - getExecResult().getExitValue() - ) - ); } }); } diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index d7847259defaf..cb024d8ac00d7 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -56,7 +56,7 @@ dependencies { } // needed to be consistent with ssl host checking -String host = InetAddress.getLoopbackAddress().getHostAddress(); +String host = InetAddress.getLoopbackAddress().getHostAddress() // location of keystore and files to generate it File keystore = new File(project.buildDir, 'keystore/test-node.jks') @@ -67,6 +67,7 @@ task createKey(type: LoggedExec) { project.delete(keystore.parentFile) keystore.parentFile.mkdirs() } + outputs.file(keystore).withPropertyName('keystoreFile') executable = new File(project.runtimeJavaHome, 'bin/keytool') standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8')) args '-genkey', @@ -81,8 +82,15 @@ task createKey(type: LoggedExec) { } // add keystore to test classpath: it expects it there -sourceSets.test.resources.srcDir(keystore.parentFile) -processTestResources.dependsOn(createKey) +processTestResources { + from createKey +} + +normalization { + runtimeClasspath { + ignore 'test-node.jks' + } +} dependencyLicenses { mapping from: /azure-.*/, to: 'azure' From 0bb46d73cb89016ab9d96e76693bb0d7cee267a1 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 24 May 2019 18:29:40 -0700 Subject: [PATCH 116/224] Make LoggerUsageTask cacheable (#42550) --- .../gradle/precommit/PrecommitTasks.groovy | 1 - .../gradle/precommit/LoggerUsageTask.java | 27 +++++++------------ .../gradle/precommit/PrecommitTask.java | 1 - 3 files changed, 10 insertions(+), 19 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 7e8fbd0658698..3693609b4de70 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -236,7 +236,6 @@ class PrecommitTasks { "org.elasticsearch.test:logger-usage:${VersionProperties.elasticsearch}") return project.tasks.create('loggerUsageCheck', LoggerUsageTask.class) { classpath = project.configurations.loggerUsagePlugin - javaHome = project.runtimeJavaHome } } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LoggerUsageTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LoggerUsageTask.java index fb1831bda4dcd..a730e069d5a7f 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LoggerUsageTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LoggerUsageTask.java @@ -22,10 +22,13 @@ import org.elasticsearch.gradle.LoggedExec; import org.gradle.api.file.FileCollection; import org.gradle.api.plugins.JavaPluginConvention; +import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.Classpath; -import org.gradle.api.tasks.Input; import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.SkipWhenEmpty; +import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.TaskAction; import java.io.File; @@ -33,13 +36,13 @@ /** * Runs LoggerUsageCheck on a set of directories. */ +@CacheableTask public class LoggerUsageTask extends PrecommitTask { + private FileCollection classpath; + public LoggerUsageTask() { setDescription("Runs LoggerUsageCheck on output directories of all source sets"); - getProject().getConvention().getPlugin(JavaPluginConvention.class).getSourceSets().all(sourceSet -> { - dependsOn(sourceSet.getClassesTaskName()); - }); } @TaskAction @@ -47,7 +50,6 @@ public void runLoggerUsageTask() { LoggedExec.javaexec(getProject(), spec -> { spec.setMain("org.elasticsearch.test.loggerusage.ESLoggerUsageChecker"); spec.classpath(getClasspath()); - spec.executable(getJavaHome() + "/bin/java"); getClassDirectories().forEach(spec::args); }); } @@ -62,26 +64,17 @@ public void setClasspath(FileCollection classpath) { } @InputFiles + @PathSensitive(PathSensitivity.RELATIVE) @SkipWhenEmpty public FileCollection getClassDirectories() { return getProject().getConvention().getPlugin(JavaPluginConvention.class).getSourceSets().stream() // Don't pick up all source sets like the java9 ones as logger-check doesn't support the class format - .filter(sourceSet -> sourceSet.getName().equals("main") || sourceSet.getName().equals("test")) + .filter(sourceSet -> sourceSet.getName().equals(SourceSet.MAIN_SOURCE_SET_NAME) + || sourceSet.getName().equals(SourceSet.TEST_SOURCE_SET_NAME)) .map(sourceSet -> sourceSet.getOutput().getClassesDirs()) .reduce(FileCollection::plus) .orElse(getProject().files()) .filter(File::exists); } - @Input - public Object getJavaHome() { - return javaHome; - } - - public void setJavaHome(Object javaHome) { - this.javaHome = javaHome; - } - - private FileCollection classpath; - private Object javaHome; } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/PrecommitTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/PrecommitTask.java index 6f99e901ec47a..5a29c4a4a3570 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/PrecommitTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/PrecommitTask.java @@ -36,7 +36,6 @@ public File getSuccessMarker() { @TaskAction public void writeMarker() throws IOException { - getSuccessMarker().getParentFile().mkdirs(); Files.write(getSuccessMarker().toPath(), new byte[]{}, StandardOpenOption.CREATE); } From 1ed7b616afa99582d88dd82389ce510e27971119 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 24 May 2019 21:14:12 -0400 Subject: [PATCH 117/224] Adjust bwc version Relates #39687 --- .../action/admin/indices/close/CloseIndexResponse.java | 4 ++-- .../action/admin/indices/close/CloseIndexResponseTests.java | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java index ea7d14655c594..c653c264e95db 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java @@ -60,7 +60,7 @@ public void readFrom(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_7_2_0)) { readShardsAcknowledged(in); } - if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + if (in.getVersion().onOrAfter(Version.V_7_3_0)) { indices = unmodifiableList(in.readList(IndexResult::new)); } else { indices = unmodifiableList(emptyList()); @@ -73,7 +73,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_7_2_0)) { writeShardsAcknowledged(out); } - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getVersion().onOrAfter(Version.V_7_3_0)) { out.writeList(indices); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java index 40c34af51598d..aa1c0b3bbb445 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java @@ -100,7 +100,7 @@ public void testBwcSerialization() throws Exception { } assertThat(deserializedResponse.isAcknowledged(), equalTo(response.isAcknowledged())); assertThat(deserializedResponse.isShardsAcknowledged(), equalTo(response.isShardsAcknowledged())); - if (version.onOrAfter(Version.V_8_0_0)) { + if (version.onOrAfter(Version.V_7_3_0)) { assertThat(deserializedResponse.getIndices(), hasSize(response.getIndices().size())); } else { assertThat(deserializedResponse.getIndices(), empty()); From 5ca06a5cef5950ee3839321320d098f420293042 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 24 May 2019 19:28:22 -0700 Subject: [PATCH 118/224] Make JarHell task cacheable (#42551) --- .../gradle/precommit/PrecommitTasks.groovy | 12 ++++----- .../gradle/precommit/JarHellTask.java | 25 +++++++------------ 2 files changed, 14 insertions(+), 23 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 3693609b4de70..f656f177ce67b 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -107,14 +107,12 @@ class PrecommitTasks { } private static Task configureJarHell(Project project) { - Task task = project.tasks.create('jarHell', JarHellTask.class) - task.classpath = project.sourceSets.test.runtimeClasspath - if (project.plugins.hasPlugin(ShadowPlugin)) { - task.classpath += project.configurations.bundle + return project.tasks.create('jarHell', JarHellTask) { task -> + task.classpath = project.sourceSets.test.runtimeClasspath + if (project.plugins.hasPlugin(ShadowPlugin)) { + task.classpath += project.configurations.bundle + } } - task.dependsOn(project.sourceSets.test.classesTaskName) - task.javaHome = project.runtimeJavaHome - return task } private static Task configureThirdPartyAudit(Project project) { diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java index fd5b0c5790773..c9152486a1c51 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java @@ -21,19 +21,20 @@ import org.elasticsearch.gradle.LoggedExec; import org.gradle.api.file.FileCollection; -import org.gradle.api.tasks.Classpath; -import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.CacheableTask; +import org.gradle.api.tasks.CompileClasspath; import org.gradle.api.tasks.TaskAction; +import java.io.File; + /** * Runs CheckJarHell on a classpath. */ +@CacheableTask public class JarHellTask extends PrecommitTask { private FileCollection classpath; - private Object javaHome; - public JarHellTask() { setDescription("Runs CheckJarHell on the configured classpath"); } @@ -42,23 +43,15 @@ public JarHellTask() { public void runJarHellCheck() { LoggedExec.javaexec(getProject(), spec -> { spec.classpath(getClasspath()); - spec.executable(getJavaHome() + "/bin/java"); spec.setMain("org.elasticsearch.bootstrap.JarHell"); }); } - @Input - public Object getJavaHome() { - return javaHome; - } - - public void setJavaHome(Object javaHome) { - this.javaHome = javaHome; - } - - @Classpath + // We use compile classpath normalization here because class implementation changes are irrelevant for the purposes of jar hell. + // We only care about the runtime classpath ABI here. + @CompileClasspath public FileCollection getClasspath() { - return classpath.filter(file -> file.exists()); + return classpath.filter(File::exists); } public void setClasspath(FileCollection classpath) { From 13af50a3f4152995eaf4365cd1f969086eb76844 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 24 May 2019 22:13:48 -0400 Subject: [PATCH 119/224] Enable recoveries trace log in CcrRetentionLeaseIT Tracked #41679 --- .../xpack/ccr/CcrRetentionLeaseIT.java | 27 +++++++++---------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java index 2cf6e3bdaf332..9595485e2721c 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java @@ -88,7 +88,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; -@TestLogging(value = "org.elasticsearch.xpack.ccr:trace") +@TestLogging(value = "org.elasticsearch.xpack.ccr:trace,org.elasticsearch.indices.recovery:trace") public class CcrRetentionLeaseIT extends CcrIntegTestCase { public static final class RetentionLeaseRenewIntervalSettingPlugin extends Plugin { @@ -192,7 +192,7 @@ public void testRetentionLeaseIsTakenAtTheStartOfRecovery() throws Exception { final List shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); - assertThat(currentRetentionLeases.leases(), hasSize(1)); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); final RetentionLease retentionLease = currentRetentionLeases.leases().iterator().next(); assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, leaderIndex))); @@ -268,7 +268,6 @@ public void testRetentionLeaseIsRenewedDuringRecovery() throws Exception { } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39331") public void testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes() throws Exception { final String leaderIndex = "leader"; final int numberOfShards = randomIntBetween(1, 3); @@ -316,7 +315,7 @@ public void testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes() throws final List shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); - assertThat(currentRetentionLeases.leases(), hasSize(1)); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); final ClusterStateResponse followerIndexClusterState = followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); @@ -354,7 +353,7 @@ public void testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes() throws continue; } final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); - assertThat(currentRetentionLeases.leases(), hasSize(1)); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); final ClusterStateResponse followerIndexClusterState = followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); @@ -392,7 +391,7 @@ public void testUnfollowRemovesRetentionLeases() throws Exception { leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); final List shardsStats = getShardsStats(stats); for (final ShardStats shardStats : shardsStats) { - assertThat(shardStats.getRetentionLeaseStats().retentionLeases().leases(), hasSize(1)); + assertThat(Strings.toString(shardStats), shardStats.getRetentionLeaseStats().retentionLeases().leases(), hasSize(1)); assertThat( shardStats.getRetentionLeaseStats().retentionLeases().leases().iterator().next().id(), equalTo(retentionLeaseId)); @@ -454,7 +453,7 @@ public void testUnfollowRemovesRetentionLeases() throws Exception { leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); final List afterUnfollowShardsStats = getShardsStats(afterUnfollowStats); for (final ShardStats shardStats : afterUnfollowShardsStats) { - assertThat(shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); + assertThat(Strings.toString(shardStats), shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); } } finally { for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getDataNodes().values()) { @@ -605,7 +604,7 @@ public void testRetentionLeaseAdvancesWhileFollowing() throws Exception { final List shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); - assertThat(currentRetentionLeases.leases(), hasSize(1)); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); final RetentionLease retentionLease = currentRetentionLeases.leases().iterator().next(); assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, leaderIndex))); @@ -668,7 +667,7 @@ public void testRetentionLeaseRenewalIsCancelledWhenFollowingIsPaused() throws E final List shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); - assertThat(currentRetentionLeases.leases(), hasSize(1)); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); final ClusterStateResponse followerIndexClusterState = followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); @@ -706,7 +705,7 @@ public void testRetentionLeaseRenewalIsCancelledWhenFollowingIsPaused() throws E continue; } final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); - assertThat(currentRetentionLeases.leases(), hasSize(1)); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); final ClusterStateResponse followerIndexClusterState = followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); @@ -912,7 +911,7 @@ public void onResponseReceived( leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); final List afterUnfollowShardsStats = getShardsStats(afterUnfollowStats); for (final ShardStats shardStats : afterUnfollowShardsStats) { - assertThat(shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); + assertThat(Strings.toString(shardStats), shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); } } finally { for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getDataNodes().values()) { @@ -962,7 +961,7 @@ public void testForgetFollower() throws Exception { leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); final List afterForgetFollowerShardsStats = getShardsStats(afterForgetFollowerStats); for (final ShardStats shardStats : afterForgetFollowerShardsStats) { - assertThat(shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); + assertThat(Strings.toString(shardStats), shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); } } @@ -982,7 +981,7 @@ private void assertRetentionLeaseRenewal( final List shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); - assertThat(currentRetentionLeases.leases(), hasSize(1)); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); final RetentionLease retentionLease = currentRetentionLeases.leases().iterator().next(); assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, leaderIndex))); @@ -999,7 +998,7 @@ private void assertRetentionLeaseRenewal( final List shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); - assertThat(currentRetentionLeases.leases(), hasSize(1)); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); final RetentionLease retentionLease = currentRetentionLeases.leases().iterator().next(); assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, leaderIndex))); From d6d032d0c708d3ef31fe3c8ff391af42aa4afeee Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Sat, 25 May 2019 05:51:39 +0200 Subject: [PATCH 120/224] Fix Test Failures from MockNioTransport Logger (#42545) * This call can fail when it tries to re-schedule the timeout check after the threadpool was shut down already failing tests with RejectedExecutionException --- .../java/org/elasticsearch/transport/nio/MockNioTransport.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index 42dae39146605..9470b7548adfb 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -372,7 +372,7 @@ private void logLongRunningExecutions() { } } if (stopped == false) { - threadPool.schedule(this::logLongRunningExecutions, CHECK_INTERVAL, ThreadPool.Names.GENERIC); + threadPool.scheduleUnlessShuttingDown(CHECK_INTERVAL, ThreadPool.Names.GENERIC, this::logLongRunningExecutions); } } From f95071d0af299c3fda326cb1a3f7db18f4ddc3db Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Sat, 25 May 2019 08:53:50 +0200 Subject: [PATCH 121/224] Upgrade to Netty 4.1.36 (#42543) --- buildSrc/version.properties | 2 +- .../licenses/netty-buffer-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.36.Final.jar.sha1 | 1 + .../transport-netty4/licenses/netty-codec-4.1.35.Final.jar.sha1 | 1 - .../transport-netty4/licenses/netty-codec-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.36.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-buffer-4.1.35.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-buffer-4.1.36.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-codec-4.1.35.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-codec-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.36.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-common-4.1.35.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-common-4.1.36.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-handler-4.1.35.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-handler-4.1.36.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-resolver-4.1.35.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-resolver-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.36.Final.jar.sha1 | 1 + 29 files changed, 15 insertions(+), 15 deletions(-) delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.36.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.36.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.36.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.36.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.36.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.36.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.36.Final.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index a3214c789a47d..cbcbf00fc6d01 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -20,7 +20,7 @@ slf4j = 1.6.2 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 4.5.1 -netty = 4.1.35.Final +netty = 4.1.36.Final joda = 2.10.2 # when updating this version, you need to ensure compatibility with: diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.35.Final.jar.sha1 deleted file mode 100644 index 6112faf2d0103..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a244722975cddaef5f9bbd45e7a44d0db5f058d8 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..90895a5e168c9 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +7f2db0921dd57df4db076229830ab09bba713aeb \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.35.Final.jar.sha1 deleted file mode 100644 index 811797decc1e8..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b86f6b9eedbe38d6fa0bbbefa961d566e293e13e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..efd6e5a327745 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +8462116d327bb3d1ec24258071f2e7345a73dbfc \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.35.Final.jar.sha1 deleted file mode 100644 index 3b0f1f7daa3c5..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f7a38b0a3ee2fff3d9dd2bb44f5e16140b70b354 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..4e86fef0e121a --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +62b73d439dbddf3c0dde092b048580139695ab46 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.35.Final.jar.sha1 deleted file mode 100644 index 26576f8e9ccdd..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c776487b782046e1399b00cd40c63ef51d26e953 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..d9d50d776e9ba --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +f6f38fde652a70ea579897edc80e52353e487ae6 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.35.Final.jar.sha1 deleted file mode 100644 index 0956313b2aa40..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b23efe31416942718ac46ad00bb3e91e4b3f6ab7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..d943140f3634c --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +1c38a5920a10c01b1cce4cdc964447ec76abf1b5 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.35.Final.jar.sha1 deleted file mode 100644 index cdd335d059196..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d60c4f4e12f0703dff477c9bf595f37a41ecacbc \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..1499233b60d33 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +e4d243fbf4e6837fa294f892bf97149e18129100 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.35.Final.jar.sha1 deleted file mode 100644 index 8f52a39c4f7a0..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -526b2646885c57adb54e2f89b2e2b80bebce3962 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..f36c1b17d74e0 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +8546e6be47be587acab86bbd106ca023678f07d9 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.35.Final.jar.sha1 deleted file mode 100644 index 6112faf2d0103..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a244722975cddaef5f9bbd45e7a44d0db5f058d8 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..90895a5e168c9 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +7f2db0921dd57df4db076229830ab09bba713aeb \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.35.Final.jar.sha1 deleted file mode 100644 index 811797decc1e8..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b86f6b9eedbe38d6fa0bbbefa961d566e293e13e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..efd6e5a327745 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +8462116d327bb3d1ec24258071f2e7345a73dbfc \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.35.Final.jar.sha1 deleted file mode 100644 index 3b0f1f7daa3c5..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f7a38b0a3ee2fff3d9dd2bb44f5e16140b70b354 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..4e86fef0e121a --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +62b73d439dbddf3c0dde092b048580139695ab46 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.35.Final.jar.sha1 deleted file mode 100644 index 26576f8e9ccdd..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c776487b782046e1399b00cd40c63ef51d26e953 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..d9d50d776e9ba --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +f6f38fde652a70ea579897edc80e52353e487ae6 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.35.Final.jar.sha1 deleted file mode 100644 index 0956313b2aa40..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b23efe31416942718ac46ad00bb3e91e4b3f6ab7 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..d943140f3634c --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +1c38a5920a10c01b1cce4cdc964447ec76abf1b5 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.35.Final.jar.sha1 deleted file mode 100644 index cdd335d059196..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d60c4f4e12f0703dff477c9bf595f37a41ecacbc \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..1499233b60d33 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +e4d243fbf4e6837fa294f892bf97149e18129100 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.35.Final.jar.sha1 deleted file mode 100644 index 8f52a39c4f7a0..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -526b2646885c57adb54e2f89b2e2b80bebce3962 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..f36c1b17d74e0 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +8546e6be47be587acab86bbd106ca023678f07d9 \ No newline at end of file From 5d837fa312b0e41a77a65462667a2d92d1114567 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Sat, 25 May 2019 07:51:21 +0100 Subject: [PATCH 122/224] [ML Data Frame] Mute Data Frame tests Relates to https://github.com/elastic/elasticsearch/issues/42344 --- .../xpack/dataframe/integration/DataFrameTransformIT.java | 1 + .../xpack/dataframe/integration/DataFrameAuditorIT.java | 2 ++ .../dataframe/integration/DataFrameConfigurationIndexIT.java | 2 ++ .../xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java | 2 ++ .../xpack/dataframe/integration/DataFrameMetaDataIT.java | 2 ++ .../xpack/dataframe/integration/DataFramePivotRestIT.java | 2 ++ .../xpack/dataframe/integration/DataFrameTaskFailedStateIT.java | 2 ++ .../xpack/dataframe/integration/DataFrameUsageIT.java | 2 ++ 8 files changed, 15 insertions(+) diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java index b98367979bff9..486ea5e5d7403 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java @@ -30,6 +30,7 @@ public void cleanTransforms() throws IOException { cleanUp(); } + @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public void testDataFrameTransformCrud() throws Exception { createReviewsIndex(); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java index 9884c9bb6793b..7dc79c1ae8fbe 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; import org.junit.Before; @@ -22,6 +23,7 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.is; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameAuditorIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_admin_plus_data"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java index 681599331c8af..d7e12cf2bee4d 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java @@ -8,6 +8,7 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -22,6 +23,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameConfigurationIndexIT extends DataFrameRestTestCase { /** diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java index d9927cd09ed8f..9bac6ca0b4049 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.xpack.core.dataframe.DataFrameField; @@ -21,6 +22,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_user"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java index 26a957ea055c2..5b95d1daead53 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -15,6 +16,7 @@ import java.io.IOException; import java.util.Map; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameMetaDataIT extends DataFrameRestTestCase { private boolean indicesCreated = false; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index 22586a7b37d27..a0bec6ec13c34 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.junit.Before; @@ -21,6 +22,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFramePivotRestIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_admin_plus_data"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java index 96aeeda8755f4..7b63644dd34ad 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestStatus; @@ -19,6 +20,7 @@ import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase { public void testDummy() { diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java index 4f209c5a9f3f4..f98fa6a271365 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -22,6 +23,7 @@ import static org.elasticsearch.xpack.core.dataframe.DataFrameField.INDEX_DOC_TYPE; import static org.elasticsearch.xpack.dataframe.DataFrameFeatureSet.PROVIDED_STATS; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameUsageIT extends DataFrameRestTestCase { private boolean indicesCreated = false; From 0291f9464ebbcd4306932a459a6d99813ae72624 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sun, 26 May 2019 11:18:39 -0400 Subject: [PATCH 123/224] Unmute FullClusterRestartIT#testClosedIndices Fixed in #39566 Closes #39576 --- .../java/org/elasticsearch/upgrades/FullClusterRestartIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 50eee32810adc..190523a3bc7c6 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -961,7 +961,6 @@ public void testSoftDeletes() throws Exception { * This test creates an index in the old cluster and then closes it. When the cluster is fully restarted in a newer version, * it verifies that the index exists and is replicated if the old version supports replication. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39576") public void testClosedIndices() throws Exception { if (isRunningAgainstOldCluster()) { createIndex(index, Settings.builder() From 70b4f67747c1de0c25ce737e0232cbb106072046 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sun, 26 May 2019 16:03:42 -0400 Subject: [PATCH 124/224] Add debug log for retention leases (#42557) We need more information to understand why CcrRetentionLeaseIT is failing. This commit adds some debug log to retention leases and enables them in CcrRetentionLeaseIT. --- .../org/elasticsearch/index/seqno/ReplicationTracker.java | 7 ++++++- .../org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 892056674019f..cf0fe6a5d25e1 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -221,12 +221,15 @@ public synchronized Tuple getRetentionLeases(final boo .leases() .stream() .collect(Collectors.groupingBy(lease -> currentTimeMillis - lease.timestamp() > retentionLeaseMillis)); - if (partitionByExpiration.get(true) == null) { + final Collection expiredLeases = partitionByExpiration.get(true); + if (expiredLeases == null) { // early out as no retention leases have expired + logger.debug("no retention leases are expired from current retention leases [{}]", retentionLeases); return Tuple.tuple(false, retentionLeases); } final Collection nonExpiredLeases = partitionByExpiration.get(false) != null ? partitionByExpiration.get(false) : Collections.emptyList(); + logger.debug("expiring retention leases [{}] from current retention leases [{}]", expiredLeases, retentionLeases); retentionLeases = new RetentionLeases(operationPrimaryTerm, retentionLeases.version() + 1, nonExpiredLeases); return Tuple.tuple(true, retentionLeases); } @@ -255,6 +258,7 @@ public RetentionLease addRetentionLease( throw new RetentionLeaseAlreadyExistsException(id); } retentionLease = new RetentionLease(id, retainingSequenceNumber, currentTimeMillisSupplier.getAsLong(), source); + logger.debug("adding new retention lease [{}] to current retention leases [{}]", retentionLease, retentionLeases); retentionLeases = new RetentionLeases( operationPrimaryTerm, retentionLeases.version() + 1, @@ -312,6 +316,7 @@ public void removeRetentionLease(final String id, final ActionListener Date: Mon, 27 May 2019 09:21:30 +0200 Subject: [PATCH 125/224] Improve how internal representation of pipelines are updated (#42257) If a single pipeline is updated then the internal representation of all pipelines was updated. With this change, only the internal representation of the pipelines that have been modified will be updated. Prior to this change the IngestMetadata of the previous and current cluster was used to determine whether the internal representation of pipelines should be updated. If applying the previous cluster state change failed then subsequent cluster state changes that have no changes to IngestMetadata will not attempt to update the internal representation of the pipelines. This commit, changes how the IngestService updates the internal representation by keeping track of the underlying configuration and use that to detect against the new IngestMetadata whether a pipeline configuration has been changed and if so, then the internal pipeline representation will be updated. --- .../elasticsearch/ingest/IngestService.java | 219 +++++++++++------- .../ingest/IngestServiceTests.java | 70 +++++- 2 files changed, 203 insertions(+), 86 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index b2143d72ae65f..9e7d1b7b5bdbd 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -80,7 +80,7 @@ public class IngestService implements ClusterStateApplier { // We know of all the processor factories when a node with all its plugin have been initialized. Also some // processor factories rely on other node services. Custom metadata is statically registered when classes // are loaded, so in the cluster state we just save the pipeline config and here we keep the actual pipelines around. - private volatile Map pipelines = new HashMap<>(); + private volatile Map pipelines = Map.of(); private final ThreadPool threadPool; private final IngestMetric totalMetrics = new IngestMetric(); @@ -236,7 +236,12 @@ public ClusterState execute(ClusterState currentState) { * Returns the pipeline by the specified id */ public Pipeline getPipeline(String id) { - return pipelines.get(id); + PipelineHolder holder = pipelines.get(id); + if (holder != null) { + return holder.pipeline; + } else { + return null; + } } public Map getProcessorFactories() { @@ -252,52 +257,10 @@ public IngestInfo info() { return new IngestInfo(processorInfoList); } - Map pipelines() { + Map pipelines() { return pipelines; } - @Override - public void applyClusterState(final ClusterChangedEvent event) { - ClusterState state = event.state(); - Map originalPipelines = pipelines; - try { - innerUpdatePipelines(event.previousState(), state); - } catch (ElasticsearchParseException e) { - logger.warn("failed to update ingest pipelines", e); - } - //pipelines changed, so add the old metrics to the new metrics - if (originalPipelines != pipelines) { - pipelines.forEach((id, pipeline) -> { - Pipeline originalPipeline = originalPipelines.get(id); - if (originalPipeline != null) { - pipeline.getMetrics().add(originalPipeline.getMetrics()); - List> oldPerProcessMetrics = new ArrayList<>(); - List> newPerProcessMetrics = new ArrayList<>(); - getProcessorMetrics(originalPipeline.getCompoundProcessor(), oldPerProcessMetrics); - getProcessorMetrics(pipeline.getCompoundProcessor(), newPerProcessMetrics); - //Best attempt to populate new processor metrics using a parallel array of the old metrics. This is not ideal since - //the per processor metrics may get reset when the arrays don't match. However, to get to an ideal model, unique and - //consistent id's per processor and/or semantic equals for each processor will be needed. - if (newPerProcessMetrics.size() == oldPerProcessMetrics.size()) { - Iterator> oldMetricsIterator = oldPerProcessMetrics.iterator(); - for (Tuple compositeMetric : newPerProcessMetrics) { - String type = compositeMetric.v1().getType(); - IngestMetric metric = compositeMetric.v2(); - if (oldMetricsIterator.hasNext()) { - Tuple oldCompositeMetric = oldMetricsIterator.next(); - String oldType = oldCompositeMetric.v1().getType(); - IngestMetric oldMetric = oldCompositeMetric.v2(); - if (type.equals(oldType)) { - metric.add(oldMetric); - } - } - } - } - } - }); - } - } - /** * Recursive method to obtain all of the non-failure processors for given compoundProcessor. Since conditionals are implemented as * wrappers to the actual processor, always prefer the actual processor's metric over the conditional processor's metric. @@ -324,25 +287,6 @@ private static List> getProcessorMetrics(Compound return processorMetrics; } - private static Pipeline substitutePipeline(String id, ElasticsearchParseException e) { - String tag = e.getHeaderKeys().contains("processor_tag") ? e.getHeader("processor_tag").get(0) : null; - String type = e.getHeaderKeys().contains("processor_type") ? e.getHeader("processor_type").get(0) : "unknown"; - String errorMessage = "pipeline with id [" + id + "] could not be loaded, caused by [" + e.getDetailedMessage() + "]"; - Processor failureProcessor = new AbstractProcessor(tag) { - @Override - public IngestDocument execute(IngestDocument ingestDocument) { - throw new IllegalStateException(errorMessage); - } - - @Override - public String getType() { - return type; - } - }; - String description = "this is a place holder pipeline, because pipeline with id [" + id + "] could not be loaded"; - return new Pipeline(id, description, null, new CompoundProcessor(failureProcessor)); - } - static ClusterState innerPut(PutPipelineRequest request, ClusterState currentState) { IngestMetadata currentIngestMetadata = currentState.metaData().custom(IngestMetadata.TYPE); Map pipelines; @@ -403,10 +347,11 @@ protected void doRun() { String pipelineId = indexRequest.getPipeline(); if (NOOP_PIPELINE_NAME.equals(pipelineId) == false) { try { - Pipeline pipeline = pipelines.get(pipelineId); - if (pipeline == null) { + PipelineHolder holder = pipelines.get(pipelineId); + if (holder == null) { throw new IllegalArgumentException("pipeline with id [" + pipelineId + "] does not exist"); } + Pipeline pipeline = holder.pipeline; innerExecute(indexRequest, pipeline, itemDroppedHandler); //this shouldn't be needed here but we do it for consistency with index api // which requires it to prevent double execution @@ -424,7 +369,8 @@ protected void doRun() { public IngestStats stats() { IngestStats.Builder statsBuilder = new IngestStats.Builder(); statsBuilder.addTotalMetrics(totalMetrics); - pipelines.forEach((id, pipeline) -> { + pipelines.forEach((id, holder) -> { + Pipeline pipeline = holder.pipeline; CompoundProcessor rootProcessor = pipeline.getCompoundProcessor(); statsBuilder.addPipelineMetrics(id, pipeline.getMetrics()); List> processorMetrics = new ArrayList<>(); @@ -503,37 +449,146 @@ private void innerExecute(IndexRequest indexRequest, Pipeline pipeline, Consumer } } - private void innerUpdatePipelines(ClusterState previousState, ClusterState state) { + @Override + public void applyClusterState(final ClusterChangedEvent event) { + ClusterState state = event.state(); if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { return; } - IngestMetadata ingestMetadata = state.getMetaData().custom(IngestMetadata.TYPE); - IngestMetadata previousIngestMetadata = previousState.getMetaData().custom(IngestMetadata.TYPE); - if (Objects.equals(ingestMetadata, previousIngestMetadata)) { + IngestMetadata newIngestMetadata = state.getMetaData().custom(IngestMetadata.TYPE); + if (newIngestMetadata == null) { return; } - Map pipelines = new HashMap<>(); - List exceptions = new ArrayList<>(); - for (PipelineConfiguration pipeline : ingestMetadata.getPipelines().values()) { + try { + innerUpdatePipelines(newIngestMetadata); + } catch (ElasticsearchParseException e) { + logger.warn("failed to update ingest pipelines", e); + } + } + + void innerUpdatePipelines(IngestMetadata newIngestMetadata) { + Map existingPipelines = this.pipelines; + + // Lazy initialize these variables in order to favour the most like scenario that there are no pipeline changes: + Map newPipelines = null; + List exceptions = null; + // Iterate over pipeline configurations in ingest metadata and constructs a new pipeline if there is no pipeline + // or the pipeline configuration has been modified + for (PipelineConfiguration newConfiguration : newIngestMetadata.getPipelines().values()) { + PipelineHolder previous = existingPipelines.get(newConfiguration.getId()); + if (previous != null && previous.configuration.equals(newConfiguration)) { + continue; + } + + if (newPipelines == null) { + newPipelines = new HashMap<>(existingPipelines); + } try { - pipelines.put( - pipeline.getId(), - Pipeline.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactories, scriptService) + Pipeline newPipeline = + Pipeline.create(newConfiguration.getId(), newConfiguration.getConfigAsMap(), processorFactories, scriptService); + newPipelines.put( + newConfiguration.getId(), + new PipelineHolder(newConfiguration, newPipeline) ); + + if (previous == null) { + continue; + } + Pipeline oldPipeline = previous.pipeline; + newPipeline.getMetrics().add(oldPipeline.getMetrics()); + List> oldPerProcessMetrics = new ArrayList<>(); + List> newPerProcessMetrics = new ArrayList<>(); + getProcessorMetrics(oldPipeline.getCompoundProcessor(), oldPerProcessMetrics); + getProcessorMetrics(newPipeline.getCompoundProcessor(), newPerProcessMetrics); + //Best attempt to populate new processor metrics using a parallel array of the old metrics. This is not ideal since + //the per processor metrics may get reset when the arrays don't match. However, to get to an ideal model, unique and + //consistent id's per processor and/or semantic equals for each processor will be needed. + if (newPerProcessMetrics.size() == oldPerProcessMetrics.size()) { + Iterator> oldMetricsIterator = oldPerProcessMetrics.iterator(); + for (Tuple compositeMetric : newPerProcessMetrics) { + String type = compositeMetric.v1().getType(); + IngestMetric metric = compositeMetric.v2(); + if (oldMetricsIterator.hasNext()) { + Tuple oldCompositeMetric = oldMetricsIterator.next(); + String oldType = oldCompositeMetric.v1().getType(); + IngestMetric oldMetric = oldCompositeMetric.v2(); + if (type.equals(oldType)) { + metric.add(oldMetric); + } + } + } + } } catch (ElasticsearchParseException e) { - pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), e)); + Pipeline pipeline = substitutePipeline(newConfiguration.getId(), e); + newPipelines.put(newConfiguration.getId(), new PipelineHolder(newConfiguration, pipeline)); + if (exceptions == null) { + exceptions = new ArrayList<>(); + } exceptions.add(e); } catch (Exception e) { ElasticsearchParseException parseException = new ElasticsearchParseException( - "Error updating pipeline with id [" + pipeline.getId() + "]", e); - pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), parseException)); + "Error updating pipeline with id [" + newConfiguration.getId() + "]", e); + Pipeline pipeline = substitutePipeline(newConfiguration.getId(), parseException); + newPipelines.put(newConfiguration.getId(), new PipelineHolder(newConfiguration, pipeline)); + if (exceptions == null) { + exceptions = new ArrayList<>(); + } exceptions.add(parseException); } } - this.pipelines = Collections.unmodifiableMap(pipelines); - ExceptionsHelper.rethrowAndSuppress(exceptions); + + // Iterate over the current active pipelines and check whether they are missing in the pipeline configuration and + // if so delete the pipeline from new Pipelines map: + for (Map.Entry entry : existingPipelines.entrySet()) { + if (newIngestMetadata.getPipelines().get(entry.getKey()) == null) { + if (newPipelines == null) { + newPipelines = new HashMap<>(existingPipelines); + } + newPipelines.remove(entry.getKey()); + } + } + + if (newPipelines != null) { + // Update the pipelines: + this.pipelines = Map.copyOf(newPipelines); + + // Rethrow errors that may have occurred during creating new pipeline instances: + if (exceptions != null) { + ExceptionsHelper.rethrowAndSuppress(exceptions); + } + } + } + + private static Pipeline substitutePipeline(String id, ElasticsearchParseException e) { + String tag = e.getHeaderKeys().contains("processor_tag") ? e.getHeader("processor_tag").get(0) : null; + String type = e.getHeaderKeys().contains("processor_type") ? e.getHeader("processor_type").get(0) : "unknown"; + String errorMessage = "pipeline with id [" + id + "] could not be loaded, caused by [" + e.getDetailedMessage() + "]"; + Processor failureProcessor = new AbstractProcessor(tag) { + @Override + public IngestDocument execute(IngestDocument ingestDocument) { + throw new IllegalStateException(errorMessage); + } + + @Override + public String getType() { + return type; + } + }; + String description = "this is a place holder pipeline, because pipeline with id [" + id + "] could not be loaded"; + return new Pipeline(id, description, null, new CompoundProcessor(failureProcessor)); + } + + static class PipelineHolder { + + final PipelineConfiguration configuration; + final Pipeline pipeline; + + PipelineHolder(PipelineConfiguration configuration, Pipeline pipeline) { + this.configuration = Objects.requireNonNull(configuration); + this.pipeline = Objects.requireNonNull(pipeline); + } } } diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index e5aea1f5d5ce1..43e2a8a584979 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -153,10 +153,72 @@ public void testUpdatePipelines() { .build(); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); assertThat(ingestService.pipelines().size(), is(1)); - assertThat(ingestService.pipelines().get("_id").getId(), equalTo("_id")); - assertThat(ingestService.pipelines().get("_id").getDescription(), nullValue()); - assertThat(ingestService.pipelines().get("_id").getProcessors().size(), equalTo(1)); - assertThat(ingestService.pipelines().get("_id").getProcessors().get(0).getType(), equalTo("set")); + assertThat(ingestService.pipelines().get("_id").pipeline.getId(), equalTo("_id")); + assertThat(ingestService.pipelines().get("_id").pipeline.getDescription(), nullValue()); + assertThat(ingestService.pipelines().get("_id").pipeline.getProcessors().size(), equalTo(1)); + assertThat(ingestService.pipelines().get("_id").pipeline.getProcessors().get(0).getType(), equalTo("set")); + } + + public void testInnerUpdatePipelines() { + IngestService ingestService = createWithProcessors(); + assertThat(ingestService.pipelines().size(), is(0)); + + PipelineConfiguration pipeline1 = new PipelineConfiguration("_id1", new BytesArray("{\"processors\": []}"), XContentType.JSON); + IngestMetadata ingestMetadata = new IngestMetadata(Map.of("_id1", pipeline1)); + + ingestService.innerUpdatePipelines(ingestMetadata); + assertThat(ingestService.pipelines().size(), is(1)); + assertThat(ingestService.pipelines().get("_id1").pipeline.getId(), equalTo("_id1")); + assertThat(ingestService.pipelines().get("_id1").pipeline.getProcessors().size(), equalTo(0)); + + PipelineConfiguration pipeline2 = new PipelineConfiguration("_id2", new BytesArray("{\"processors\": []}"), XContentType.JSON); + ingestMetadata = new IngestMetadata(Map.of("_id1", pipeline1, "_id2", pipeline2)); + + ingestService.innerUpdatePipelines(ingestMetadata); + assertThat(ingestService.pipelines().size(), is(2)); + assertThat(ingestService.pipelines().get("_id1").pipeline.getId(), equalTo("_id1")); + assertThat(ingestService.pipelines().get("_id1").pipeline.getProcessors().size(), equalTo(0)); + assertThat(ingestService.pipelines().get("_id2").pipeline.getId(), equalTo("_id2")); + assertThat(ingestService.pipelines().get("_id2").pipeline.getProcessors().size(), equalTo(0)); + + PipelineConfiguration pipeline3 = new PipelineConfiguration("_id3", new BytesArray("{\"processors\": []}"), XContentType.JSON); + ingestMetadata = new IngestMetadata(Map.of("_id1", pipeline1, "_id2", pipeline2, "_id3", pipeline3)); + + ingestService.innerUpdatePipelines(ingestMetadata); + assertThat(ingestService.pipelines().size(), is(3)); + assertThat(ingestService.pipelines().get("_id1").pipeline.getId(), equalTo("_id1")); + assertThat(ingestService.pipelines().get("_id1").pipeline.getProcessors().size(), equalTo(0)); + assertThat(ingestService.pipelines().get("_id2").pipeline.getId(), equalTo("_id2")); + assertThat(ingestService.pipelines().get("_id2").pipeline.getProcessors().size(), equalTo(0)); + assertThat(ingestService.pipelines().get("_id3").pipeline.getId(), equalTo("_id3")); + assertThat(ingestService.pipelines().get("_id3").pipeline.getProcessors().size(), equalTo(0)); + + ingestMetadata = new IngestMetadata(Map.of("_id1", pipeline1, "_id3", pipeline3)); + + ingestService.innerUpdatePipelines(ingestMetadata); + assertThat(ingestService.pipelines().size(), is(2)); + assertThat(ingestService.pipelines().get("_id1").pipeline.getId(), equalTo("_id1")); + assertThat(ingestService.pipelines().get("_id1").pipeline.getProcessors().size(), equalTo(0)); + assertThat(ingestService.pipelines().get("_id3").pipeline.getId(), equalTo("_id3")); + assertThat(ingestService.pipelines().get("_id3").pipeline.getProcessors().size(), equalTo(0)); + + pipeline3 = new PipelineConfiguration( + "_id3",new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON + ); + ingestMetadata = new IngestMetadata(Map.of("_id1", pipeline1, "_id3", pipeline3)); + + ingestService.innerUpdatePipelines(ingestMetadata); + assertThat(ingestService.pipelines().size(), is(2)); + assertThat(ingestService.pipelines().get("_id1").pipeline.getId(), equalTo("_id1")); + assertThat(ingestService.pipelines().get("_id1").pipeline.getProcessors().size(), equalTo(0)); + assertThat(ingestService.pipelines().get("_id3").pipeline.getId(), equalTo("_id3")); + assertThat(ingestService.pipelines().get("_id3").pipeline.getProcessors().size(), equalTo(1)); + assertThat(ingestService.pipelines().get("_id3").pipeline.getProcessors().get(0).getType(), equalTo("set")); + + // Perform an update with no changes: + Map pipelines = ingestService.pipelines(); + ingestService.innerUpdatePipelines(ingestMetadata); + assertThat(ingestService.pipelines(), sameInstance(pipelines)); } public void testDelete() { From cdb482eaae0517f202efdfaf445e8847917182f1 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 27 May 2019 11:31:42 +0200 Subject: [PATCH 126/224] Fix RareClusterStateIT (#42430) * It looks like we might be cancelling a previous publication instead of the one triggered by the given request with a very low likelihood. * Fixed by adding a wait for no in-progress publications * Also added debug logging that would've identified this problem * Closes #36813 --- .../cluster/coordination/Coordinator.java | 10 +++++++--- .../cluster/coordination/RareClusterStateIT.java | 4 ++++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 1e7b38e50d1e9..376dd640c56b2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -1165,9 +1165,13 @@ public Iterable getFoundPeers() { */ boolean cancelCommittedPublication() { synchronized (mutex) { - if (currentPublication.isPresent() && currentPublication.get().isCommitted()) { - currentPublication.get().cancel("cancelCommittedPublication"); - return true; + if (currentPublication.isPresent()) { + final CoordinatorPublication publication = currentPublication.get(); + if (publication.isCommitted()) { + publication.cancel("cancelCommittedPublication"); + logger.debug("Cancelled publication of [{}].", publication); + return true; + } } return false; } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java index 27036680880b2..62491724b9221 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java @@ -141,6 +141,10 @@ public void onFailure(String source, Exception e) { private ActionFuture executeAndCancelCommittedPublication( ActionRequestBuilder req) throws Exception { + // Wait for no publication in progress to not accidentally cancel a publication different from the one triggered by the given + // request. + assertBusy( + () -> assertFalse(((Coordinator) internalCluster().getCurrentMasterNodeInstance(Discovery.class)).publicationInProgress())); ActionFuture future = req.execute(); assertBusy( () -> assertTrue(((Coordinator)internalCluster().getCurrentMasterNodeInstance(Discovery.class)).cancelCommittedPublication())); From 71e978bcc03b568afa945570315a37d346a2d364 Mon Sep 17 00:00:00 2001 From: bellengao Date: Mon, 27 May 2019 17:47:46 +0800 Subject: [PATCH 127/224] Update script-fields.asciidoc (#42490) --- docs/reference/search/request/script-fields.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/search/request/script-fields.asciidoc b/docs/reference/search/request/script-fields.asciidoc index da5868ea7d65e..1bd61e0048182 100644 --- a/docs/reference/search/request/script-fields.asciidoc +++ b/docs/reference/search/request/script-fields.asciidoc @@ -33,7 +33,7 @@ GET /_search // CONSOLE // TEST[setup:sales] -Script fields can work on fields that are not stored (`my_field_name` in +Script fields can work on fields that are not stored (`price` in the above case), and allow to return custom values to be returned (the evaluated value of the script). From 50ed840e5275f89e868b554c387f6b2e76093370 Mon Sep 17 00:00:00 2001 From: Travis Steel Date: Mon, 27 May 2019 04:53:51 -0500 Subject: [PATCH 128/224] Fixed typo in docker.asciidoc (#42455) --- docs/reference/setup/install/docker.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 1fcc261d68e1f..9037a292168de 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -332,7 +332,7 @@ data through a bind-mount: As a last resort, you can also force the container to mutate the ownership of any bind-mounts used for the <> through the -environment variable `TAKE_FILE_OWNERSHIP`. Inn this case, they will be owned by +environment variable `TAKE_FILE_OWNERSHIP`. In this case, they will be owned by uid:gid `1000:0` providing read/write access to the {es} process as required. -- From c57b5750874da9050d2a773bd92169a91b8eb589 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Mon, 27 May 2019 12:57:01 +0200 Subject: [PATCH 129/224] Remove unused mapStringsOrdered method (#42513) Remove unused mapStringsOrdered method --- .../common/xcontent/XContentParser.java | 4 +--- .../common/xcontent/XContentSubParser.java | 5 ----- .../support/AbstractXContentParser.java | 17 +---------------- .../common/xcontent/XContentParserTests.java | 2 +- .../support/xcontent/WatcherXContentParser.java | 5 ----- 5 files changed, 3 insertions(+), 30 deletions(-) diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java index 6d4da08bfaa59..93321048d86c4 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java @@ -136,11 +136,9 @@ enum NumberType { Map mapStrings() throws IOException; - Map mapStringsOrdered() throws IOException; - /** * Returns an instance of {@link Map} holding parsed map. - * Serves as a replacement for the "map", "mapOrdered", "mapStrings" and "mapStringsOrdered" methods above. + * Serves as a replacement for the "map", "mapOrdered" and "mapStrings" methods above. * * @param mapFactory factory for creating new {@link Map} objects * @param mapValueParser parser for parsing a single map value diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java index 252bfea7ca9c0..9a8686001e2dc 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java @@ -108,11 +108,6 @@ public Map mapStrings() throws IOException { return parser.mapStrings(); } - @Override - public Map mapStringsOrdered() throws IOException { - return parser.mapStringsOrdered(); - } - @Override public Map map( Supplier> mapFactory, CheckedFunction mapValueParser) throws IOException { diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java index 68e03e34a1a17..043293b0cc319 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java @@ -276,11 +276,6 @@ public Map mapStrings() throws IOException { return readMapStrings(this); } - @Override - public Map mapStringsOrdered() throws IOException { - return readOrderedMapStrings(this); - } - @Override public Map map( Supplier> mapFactory, CheckedFunction mapValueParser) throws IOException { @@ -303,8 +298,6 @@ public List listOrderedMap() throws IOException { static final Supplier> SIMPLE_MAP_STRINGS_FACTORY = HashMap::new; - static final Supplier> ORDERED_MAP_STRINGS_FACTORY = LinkedHashMap::new; - static Map readMap(XContentParser parser) throws IOException { return readMap(parser, SIMPLE_MAP_FACTORY); } @@ -314,11 +307,7 @@ static Map readOrderedMap(XContentParser parser) throws IOExcept } static Map readMapStrings(XContentParser parser) throws IOException { - return readMapStrings(parser, SIMPLE_MAP_STRINGS_FACTORY); - } - - static Map readOrderedMapStrings(XContentParser parser) throws IOException { - return readMapStrings(parser, ORDERED_MAP_STRINGS_FACTORY); + return readGenericMap(parser, SIMPLE_MAP_STRINGS_FACTORY, XContentParser::text); } static List readList(XContentParser parser) throws IOException { @@ -333,10 +322,6 @@ static Map readMap(XContentParser parser, Supplier readValue(p, mapFactory)); } - static Map readMapStrings(XContentParser parser, Supplier> mapFactory) throws IOException { - return readGenericMap(parser, mapFactory, XContentParser::text); - } - static Map readGenericMap( XContentParser parser, Supplier> mapFactory, diff --git a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java index c519880224ccb..31a00c4025ab2 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java @@ -189,7 +189,7 @@ private Map readMapStrings(String source) throws IOException { assertThat(parser.currentName(), equalTo("foo")); token = parser.nextToken(); assertThat(token, equalTo(XContentParser.Token.START_OBJECT)); - return randomBoolean() ? parser.mapStringsOrdered() : parser.mapStrings(); + return parser.mapStrings(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java index 1d155a5f0c02d..20b0086c1e4e2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java @@ -120,11 +120,6 @@ public Map mapStrings() throws IOException { return parser.mapStrings(); } - @Override - public Map mapStringsOrdered() throws IOException { - return parser.mapStringsOrdered(); - } - @Override public Map map( Supplier> mapFactory, CheckedFunction mapValueParser) throws IOException { From 074da02f441b0407b4da446ab877db6444ea1109 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 27 May 2019 19:29:51 +0200 Subject: [PATCH 130/224] Dry up BlobStoreRepository#basePath Implementations (#42578) * This method is just a getter in every implementation => moved the field and concrete getter to the base class to simplify implementations --- .../repositories/url/URLRepository.java | 10 +----- .../repositories/azure/AzureRepository.java | 34 ++++++++----------- .../gcs/GoogleCloudStorageRepository.java | 25 ++++++-------- .../repositories/hdfs/HdfsRepository.java | 8 +---- .../repositories/s3/S3Repository.java | 25 ++++++-------- .../blobstore/BlobStoreRepository.java | 10 ++++-- .../repositories/fs/FsRepository.java | 10 +----- 7 files changed, 47 insertions(+), 75 deletions(-) diff --git a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java index 0ea2a1b72c574..a27b091cfc037 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java @@ -75,8 +75,6 @@ public class URLRepository extends BlobStoreRepository { private final Environment environment; - private final BlobPath basePath; - private final URL url; /** @@ -84,7 +82,7 @@ public class URLRepository extends BlobStoreRepository { */ public URLRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, ThreadPool threadPool) { - super(metadata, environment.settings(), namedXContentRegistry, threadPool); + super(metadata, environment.settings(), namedXContentRegistry, threadPool, BlobPath.cleanPath()); if (URL_SETTING.exists(metadata.settings()) == false && REPOSITORIES_URL_SETTING.exists(environment.settings()) == false) { throw new RepositoryException(metadata.name(), "missing url"); @@ -92,7 +90,6 @@ public URLRepository(RepositoryMetaData metadata, Environment environment, this.environment = environment; supportedProtocols = SUPPORTED_PROTOCOLS_SETTING.get(environment.settings()); urlWhiteList = ALLOWED_URLS_SETTING.get(environment.settings()).toArray(new URIPattern[]{}); - basePath = BlobPath.cleanPath(); url = URL_SETTING.exists(metadata.settings()) ? URL_SETTING.get(metadata.settings()) : REPOSITORIES_URL_SETTING.get(environment.settings()); } @@ -115,11 +112,6 @@ protected BlobStore getBlobStore() { return super.getBlobStore(); } - @Override - protected BlobPath basePath() { - return basePath; - } - /** * Makes sure that the url is white listed or if it points to the local file system it matches one on of the root path in path.repo */ diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 7c3520918fc58..403ef10d3444d 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -79,17 +79,27 @@ public static final class Repository { public static final Setting READONLY_SETTING = Setting.boolSetting("readonly", false, Property.NodeScope); } - private final BlobPath basePath; private final ByteSizeValue chunkSize; private final AzureStorageService storageService; private final boolean readonly; public AzureRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, AzureStorageService storageService, ThreadPool threadPool) { - super(metadata, environment.settings(), namedXContentRegistry, threadPool); + super(metadata, environment.settings(), namedXContentRegistry, threadPool, buildBasePath(metadata)); this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings()); this.storageService = storageService; + // If the user explicitly did not define a readonly value, we set it by ourselves depending on the location mode setting. + // For secondary_only setting, the repository should be read only + final LocationMode locationMode = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); + if (Repository.READONLY_SETTING.exists(metadata.settings())) { + this.readonly = Repository.READONLY_SETTING.get(metadata.settings()); + } else { + this.readonly = locationMode == LocationMode.SECONDARY_ONLY; + } + } + + private static BlobPath buildBasePath(RepositoryMetaData metadata) { final String basePath = Strings.trimLeadingCharacter(Repository.BASE_PATH_SETTING.get(metadata.settings()), '/'); if (Strings.hasLength(basePath)) { // Remove starting / if any @@ -97,18 +107,9 @@ public AzureRepository(RepositoryMetaData metadata, Environment environment, Nam for(final String elem : basePath.split("/")) { path = path.add(elem); } - this.basePath = path; - } else { - this.basePath = BlobPath.cleanPath(); - } - - // If the user explicitly did not define a readonly value, we set it by ourselves depending on the location mode setting. - // For secondary_only setting, the repository should be read only - final LocationMode locationMode = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); - if (Repository.READONLY_SETTING.exists(metadata.settings())) { - this.readonly = Repository.READONLY_SETTING.get(metadata.settings()); + return path; } else { - this.readonly = locationMode == LocationMode.SECONDARY_ONLY; + return BlobPath.cleanPath(); } } @@ -123,15 +124,10 @@ protected AzureBlobStore createBlobStore() { logger.debug(() -> new ParameterizedMessage( "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", - blobStore, chunkSize, isCompress(), basePath)); + blobStore, chunkSize, isCompress(), basePath())); return blobStore; } - @Override - protected BlobPath basePath() { - return basePath; - } - @Override protected ByteSizeValue chunkSize() { return chunkSize; diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index 804fafd5e855e..6382a537c4682 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -57,7 +57,6 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { static final Setting CLIENT_NAME = new Setting<>("client", "default", Function.identity()); private final GoogleCloudStorageService storageService; - private final BlobPath basePath; private final ByteSizeValue chunkSize; private final String bucket; private final String clientName; @@ -65,24 +64,27 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { GoogleCloudStorageRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, GoogleCloudStorageService storageService, ThreadPool threadPool) { - super(metadata, environment.settings(), namedXContentRegistry, threadPool); + super(metadata, environment.settings(), namedXContentRegistry, threadPool, buildBasePath(metadata)); this.storageService = storageService; + this.chunkSize = getSetting(CHUNK_SIZE, metadata); + this.bucket = getSetting(BUCKET, metadata); + this.clientName = CLIENT_NAME.get(metadata.settings()); + logger.debug( + "using bucket [{}], base_path [{}], chunk_size [{}], compress [{}]", bucket, basePath(), chunkSize, isCompress()); + } + + private static BlobPath buildBasePath(RepositoryMetaData metadata) { String basePath = BASE_PATH.get(metadata.settings()); if (Strings.hasLength(basePath)) { BlobPath path = new BlobPath(); for (String elem : basePath.split("/")) { path = path.add(elem); } - this.basePath = path; + return path; } else { - this.basePath = BlobPath.cleanPath(); + return BlobPath.cleanPath(); } - - this.chunkSize = getSetting(CHUNK_SIZE, metadata); - this.bucket = getSetting(BUCKET, metadata); - this.clientName = CLIENT_NAME.get(metadata.settings()); - logger.debug("using bucket [{}], base_path [{}], chunk_size [{}], compress [{}]", bucket, basePath, chunkSize, isCompress()); } @Override @@ -90,11 +92,6 @@ protected GoogleCloudStorageBlobStore createBlobStore() { return new GoogleCloudStorageBlobStore(bucket, clientName, storageService); } - @Override - protected BlobPath basePath() { - return basePath; - } - @Override protected ByteSizeValue chunkSize() { return chunkSize; diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java index b614753d83883..b51f843162a74 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java @@ -59,7 +59,6 @@ public final class HdfsRepository extends BlobStoreRepository { private final Environment environment; private final ByteSizeValue chunkSize; - private final BlobPath basePath = BlobPath.cleanPath(); private final URI uri; private final String pathSetting; @@ -69,7 +68,7 @@ public final class HdfsRepository extends BlobStoreRepository { public HdfsRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, ThreadPool threadPool) { - super(metadata, environment.settings(), namedXContentRegistry, threadPool); + super(metadata, environment.settings(), namedXContentRegistry, threadPool, BlobPath.cleanPath()); this.environment = environment; this.chunkSize = metadata.settings().getAsBytesSize("chunk_size", null); @@ -233,11 +232,6 @@ protected HdfsBlobStore createBlobStore() { return blobStore; } - @Override - protected BlobPath basePath() { - return basePath; - } - @Override protected ByteSizeValue chunkSize() { return chunkSize; diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index a91e7bf663f92..c099d5a2e064c 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -150,8 +150,6 @@ class S3Repository extends BlobStoreRepository { private final ByteSizeValue chunkSize; - private final BlobPath basePath; - private final boolean serverSideEncryption; private final String storageClass; @@ -165,7 +163,7 @@ class S3Repository extends BlobStoreRepository { final Settings settings, final NamedXContentRegistry namedXContentRegistry, final S3Service service, final ThreadPool threadPool) { - super(metadata, settings, namedXContentRegistry, threadPool); + super(metadata, settings, namedXContentRegistry, threadPool, buildBasePath(metadata)); this.service = service; // Parse and validate the user's S3 Storage Class setting @@ -183,13 +181,6 @@ class S3Repository extends BlobStoreRepository { ") can't be lower than " + BUFFER_SIZE_SETTING.getKey() + " (" + bufferSize + ")."); } - final String basePath = BASE_PATH_SETTING.get(metadata.settings()); - if (Strings.hasLength(basePath)) { - this.basePath = new BlobPath().add(basePath); - } else { - this.basePath = BlobPath.cleanPath(); - } - this.serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); this.storageClass = STORAGE_CLASS_SETTING.get(metadata.settings()); @@ -211,6 +202,15 @@ class S3Repository extends BlobStoreRepository { storageClass); } + private static BlobPath buildBasePath(RepositoryMetaData metadata) { + final String basePath = BASE_PATH_SETTING.get(metadata.settings()); + if (Strings.hasLength(basePath)) { + return new BlobPath().add(basePath); + } else { + return BlobPath.cleanPath(); + } + } + @Override protected S3BlobStore createBlobStore() { return new S3BlobStore(service, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass, metadata); @@ -228,11 +228,6 @@ protected BlobStore getBlobStore() { return super.getBlobStore(); } - @Override - protected BlobPath basePath() { - return basePath; - } - @Override protected ByteSizeValue chunkSize() { return chunkSize; diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 49b551b26b796..1cb50f0f1a0da 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -197,6 +197,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private final SetOnce blobStore = new SetOnce<>(); + private final BlobPath basePath; + /** * Constructs new BlobStoreRepository * @param metadata The metadata for this repository including name and settings @@ -204,7 +206,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp * @param threadPool Threadpool to run long running repository manipulations on asynchronously */ protected BlobStoreRepository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry, - ThreadPool threadPool) { + ThreadPool threadPool, BlobPath basePath) { this.settings = settings; this.metadata = metadata; this.threadPool = threadPool; @@ -212,6 +214,7 @@ protected BlobStoreRepository(RepositoryMetaData metadata, Settings settings, Na snapshotRateLimiter = getRateLimiter(metadata.settings(), "max_snapshot_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB)); restoreRateLimiter = getRateLimiter(metadata.settings(), "max_restore_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB)); readOnly = metadata.settings().getAsBoolean("readonly", false); + this.basePath = basePath; indexShardSnapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT, BlobStoreIndexShardSnapshot::fromXContent, namedXContentRegistry, compress); @@ -317,8 +320,11 @@ protected BlobStore blobStore() { /** * Returns base path of the repository + * Public for testing. */ - protected abstract BlobPath basePath(); + public BlobPath basePath() { + return basePath; + } /** * Returns true if metadata and snapshot files should be compressed diff --git a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java index 8f495f2d4842a..f338e0ee4cb08 100644 --- a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java @@ -66,14 +66,12 @@ public class FsRepository extends BlobStoreRepository { private final ByteSizeValue chunkSize; - private final BlobPath basePath; - /** * Constructs a shared file system repository. */ public FsRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, ThreadPool threadPool) { - super(metadata, environment.settings(), namedXContentRegistry, threadPool); + super(metadata, environment.settings(), namedXContentRegistry, threadPool, BlobPath.cleanPath()); this.environment = environment; String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings()); if (location.isEmpty()) { @@ -101,7 +99,6 @@ public FsRepository(RepositoryMetaData metadata, Environment environment, NamedX } else { this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(environment.settings()); } - this.basePath = BlobPath.cleanPath(); } @Override @@ -115,9 +112,4 @@ protected BlobStore createBlobStore() throws Exception { protected ByteSizeValue chunkSize() { return chunkSize; } - - @Override - protected BlobPath basePath() { - return basePath; - } } From 349d2ce153d4be78bf0d8ae2472bfc88e18530c1 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 27 May 2019 22:40:23 +0200 Subject: [PATCH 131/224] Add Infrastructure to Run 3rd Party Repository Tests (#42586) * Add Infrastructure to Run 3rd Party Repository Tests * Add infrastructure to run third party repository tests using our standard JUnit infrastructure * This is a prerequisite of #42189 --- plugins/repository-azure/build.gradle | 21 +++++ .../AzureStorageCleanupThirdPartyTests.java | 65 +++++++++++++ plugins/repository-gcs/build.gradle | 21 +++++ .../GoogleCloudStorageThirdPartyTests.java | 64 +++++++++++++ plugins/repository-s3/build.gradle | 43 ++++++++- .../s3/S3RepositoryThirdPartyTests.java | 73 +++++++++++++++ .../AbstractThirdPartyRepositoryTestCase.java | 91 +++++++++++++++++++ 7 files changed, 373 insertions(+), 5 deletions(-) create mode 100644 plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java create mode 100644 plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java create mode 100644 plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java create mode 100644 test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index a7c1af412d949..2669e4bf6092a 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -71,3 +71,24 @@ testClusters { keystore 'azure.client.integration_test.key', 'azure_key' } } + +String azureAccount = System.getenv("azure_storage_account") +String azureKey = System.getenv("azure_storage_key") +String azureContainer = System.getenv("azure_storage_container") +String azureBasePath = System.getenv("azure_storage_base_path") + +test { + exclude '**/AzureStorageCleanupThirdPartyTests.class' +} + +task thirdPartyTest(type: Test) { + include '**/AzureStorageCleanupThirdPartyTests.class' + systemProperty 'test.azure.account', azureAccount ? azureAccount : "" + systemProperty 'test.azure.key', azureKey ? azureKey : "" + systemProperty 'test.azure.container', azureContainer ? azureContainer : "" + systemProperty 'test.azure.base', azureBasePath ? azureBasePath : "" +} + +if (azureAccount || azureKey || azureContainer || azureBasePath) { + check.dependsOn(thirdPartyTest) +} diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java new file mode 100644 index 0000000000000..596fdf73342eb --- /dev/null +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; + +import java.util.Collection; + +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { + + @Override + protected Collection> getPlugins() { + return pluginList(AzureRepositoryPlugin.class); + } + + @Override + protected SecureSettings credentials() { + assertThat(System.getProperty("test.azure.account"), not(blankOrNullString())); + assertThat(System.getProperty("test.azure.key"), not(blankOrNullString())); + assertThat(System.getProperty("test.azure.container"), not(blankOrNullString())); + assertThat(System.getProperty("test.azure.base"), not(blankOrNullString())); + + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("azure.client.default.account", System.getProperty("test.azure.account")); + secureSettings.setString("azure.client.default.key", System.getProperty("test.azure.key")); + return secureSettings; + } + + @Override + protected void createRepository(String repoName) { + AcknowledgedResponse putRepositoryResponse = client().admin().cluster().preparePutRepository(repoName) + .setType("azure") + .setSettings(Settings.builder() + .put("container", System.getProperty("test.azure.container")) + .put("base_path", System.getProperty("test.azure.base")) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + } +} diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index e5af9081ca189..288ab3c99f17b 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -1,3 +1,5 @@ +import java.nio.file.Files + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -122,3 +124,22 @@ check { // also execute the QA tests when testing the plugin dependsOn 'qa:google-cloud-storage:check' } + +String gcsServiceAccount = System.getenv("google_storage_service_account") +String gcsBucket = System.getenv("google_storage_bucket") +String gcsBasePath = System.getenv("google_storage_base_path") + +test { + exclude '**/GoogleCloudStorageThirdPartyTests.class' +} + +task thirdPartyTest(type: Test) { + include '**/GoogleCloudStorageThirdPartyTests.class' + systemProperty 'test.google.account', gcsServiceAccount ? Base64.encoder.encodeToString(Files.readAllBytes(file(gcsServiceAccount).toPath())) : "" + systemProperty 'test.google.bucket', gcsBucket ? gcsBucket : "" + systemProperty 'test.google.base', gcsBasePath ? gcsBasePath : "/" +} + +if (gcsServiceAccount || gcsBucket || gcsBasePath) { + check.dependsOn(thirdPartyTest) +} \ No newline at end of file diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java new file mode 100644 index 0000000000000..06eb63ddd22f0 --- /dev/null +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.gcs; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; + +import java.util.Base64; +import java.util.Collection; + +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class GoogleCloudStorageThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { + + @Override + protected Collection> getPlugins() { + return pluginList(GoogleCloudStoragePlugin.class); + } + + @Override + protected SecureSettings credentials() { + assertThat(System.getProperty("test.google.account"), not(blankOrNullString())); + assertThat(System.getProperty("test.google.bucket"), not(blankOrNullString())); + + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setFile("gcs.client.default.credentials_file", + Base64.getDecoder().decode(System.getProperty("test.google.account"))); + return secureSettings; + } + + @Override + protected void createRepository(final String repoName) { + AcknowledgedResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo") + .setType("gcs") + .setSettings(Settings.builder() + .put("bucket", System.getProperty("test.google.bucket")) + .put("base_path", System.getProperty("test.google.base", "/")) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + } +} diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 60a4e97cfa493..bf32b99f9dc38 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -75,6 +75,7 @@ test { // these are tested explicitly in separate test tasks exclude '**/*CredentialsTests.class' exclude '**/S3BlobStoreRepositoryTests.class' + exclude '**/S3RepositoryThirdPartyTests.class' } boolean useFixture = false @@ -134,6 +135,14 @@ if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath) { throw new IllegalArgumentException("not all options specified to run EC2/ECS tests are present") } +task thirdPartyTest(type: Test) { + include '**/S3RepositoryThirdPartyTests.class' + systemProperty 'test.s3.account', s3PermanentAccessKey + systemProperty 'test.s3.key', s3PermanentSecretKey + systemProperty 'test.s3.bucket', s3PermanentBucket + systemProperty 'test.s3.base', s3PermanentBasePath +} + if (useFixture) { apply plugin: 'elasticsearch.test.fixtures' task writeDockerFile { @@ -151,6 +160,32 @@ if (useFixture) { dependsOn(writeDockerFile) } + def minioAddress = { + int minioPort = postProcessFixture.ext."test.fixtures.minio-fixture.tcp.9000" + assert minioPort > 0 + return 'http://127.0.0.1:' + minioPort + } + + File minioAddressFile = new File(project.buildDir, 'generated-resources/s3Fixture.address') + + // We can't lazy evaluate a system property for the Minio address passed to JUnit so we write it to a resource file + // and pass its name instead. + task writeMinioAddress { + dependsOn tasks.bundlePlugin, tasks.postProcessFixture + outputs.file(minioAddressFile) + doLast { + file(minioAddressFile).text = "${ -> minioAddress.call() }" + } + } + + thirdPartyTest { + dependsOn writeMinioAddress + inputs.file(minioAddressFile) + systemProperty 'test.s3.endpoint', minioAddressFile.name + } + + BuildPlugin.requireDocker(tasks.thirdPartyTest) + task integTestMinio(type: RestIntegTestTask) { description = "Runs REST tests using the Minio repository." dependsOn tasks.bundlePlugin, tasks.postProcessFixture @@ -169,11 +204,7 @@ if (useFixture) { testClusters.integTestMinio { keystore 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey keystore 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey - setting 's3.client.integration_test_permanent.endpoint', { - int minioPort = postProcessFixture.ext."test.fixtures.minio-fixture.tcp.9000" - assert minioPort > 0 - return 'http://127.0.0.1:' + minioPort - } + setting 's3.client.integration_test_permanent.endpoint', minioAddress plugin file(tasks.bundlePlugin.archiveFile) } @@ -191,6 +222,8 @@ if (useFixture) { } } +check.dependsOn(thirdPartyTest) + File parentFixtures = new File(project.buildDir, "fixtures") File s3FixtureFile = new File(parentFixtures, 's3Fixture.properties') diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java new file mode 100644 index 0000000000000..88e293575488f --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.s3; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; +import org.elasticsearch.test.StreamsUtils; + +import java.io.IOException; +import java.util.Collection; + +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class S3RepositoryThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { + + @Override + protected Collection> getPlugins() { + return pluginList(S3RepositoryPlugin.class); + } + + @Override + protected SecureSettings credentials() { + assertThat(System.getProperty("test.s3.account"), not(blankOrNullString())); + assertThat(System.getProperty("test.s3.key"), not(blankOrNullString())); + assertThat(System.getProperty("test.s3.bucket"), not(blankOrNullString())); + + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.access_key", System.getProperty("test.s3.account")); + secureSettings.setString("s3.client.default.secret_key", System.getProperty("test.s3.key")); + return secureSettings; + } + + @Override + protected void createRepository(String repoName) { + Settings.Builder settings = Settings.builder() + .put("bucket", System.getProperty("test.s3.bucket")) + .put("base_path", System.getProperty("test.s3.base", "/")); + final String endpointPath = System.getProperty("test.s3.endpoint"); + if (endpointPath != null) { + try { + settings = settings.put("endpoint", StreamsUtils.copyToStringFromClasspath("/" + endpointPath)); + } catch (IOException e) { + throw new AssertionError(e); + } + } + AcknowledgedResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo") + .setType("s3") + .setSettings(settings).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java new file mode 100644 index 0000000000000..90c399a5af6c7 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories; + +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.test.ESSingleNodeTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +public abstract class AbstractThirdPartyRepositoryTestCase extends ESSingleNodeTestCase { + + @Override + protected Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .setSecureSettings(credentials()) + .build(); + } + + protected abstract SecureSettings credentials(); + + protected abstract void createRepository(String repoName); + + + public void testCreateSnapshot() { + createRepository("test-repo"); + + createIndex("test-idx-1"); + createIndex("test-idx-2"); + createIndex("test-idx-3"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + client().prepareIndex("test-idx-1", "doc", Integer.toString(i)).setSource("foo", "bar" + i).get(); + client().prepareIndex("test-idx-2", "doc", Integer.toString(i)).setSource("foo", "bar" + i).get(); + client().prepareIndex("test-idx-3", "doc", Integer.toString(i)).setSource("foo", "bar" + i).get(); + } + client().admin().indices().prepareRefresh().get(); + + final String snapshotName = "test-snap-" + System.currentTimeMillis(); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot("test-repo", snapshotName) + .setWaitForCompletion(true) + .setIndices("test-idx-*", "-test-idx-3") + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); + + assertThat(client().admin() + .cluster() + .prepareGetSnapshots("test-repo") + .setSnapshots(snapshotName) + .get() + .getSnapshots() + .get(0) + .state(), + equalTo(SnapshotState.SUCCESS)); + + assertTrue(client().admin() + .cluster() + .prepareDeleteSnapshot("test-repo", snapshotName) + .get() + .isAcknowledged()); + + } +} From 337cef14b31628dd1880e68183645ee8d87d664e Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 27 May 2019 15:57:59 -0400 Subject: [PATCH 132/224] Add test ensure we can execute update requests in mixed cluster Relates #42596 --- .../org/elasticsearch/upgrades/RecoveryIT.java | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index 32ddc77113bc8..49bd5bb3585b6 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -567,4 +567,22 @@ private void ensureGlobalCheckpointSynced(String index) throws Exception { }); }, 60, TimeUnit.SECONDS); } + + /** Ensure that we can always execute update requests regardless of the version of cluster */ + public void testUpdateDoc() throws Exception { + final String index = "test_update_doc"; + if (CLUSTER_TYPE == ClusterType.OLD) { + Settings.Builder settings = Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2); + createIndex(index, settings.build()); + } + ensureGreen(index); + indexDocs(index, 0, 10); + for (int i = 0; i < 10; i++) { + Request update = new Request("POST", index + "/_update/" + i); + update.setJsonEntity("{\"doc\": {\"f\": " + randomNonNegativeLong() + "}}"); + client().performRequest(update); + } + } } From 643eb35a20771ebf3d1c79e5500e49e2ad658d82 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 27 May 2019 17:30:21 -0400 Subject: [PATCH 133/224] Allocate to data-only nodes in ReopenWhileClosingIT (#42560) If all primary shards are allocated on the master node, then the verifying before close step will never interact with mock transport service. This change prefers to allocate shards on data-only nodes. Closes #39757 --- .../indices/state/ReopenWhileClosingIT.java | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java b/server/src/test/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java index 0f57f518e70cd..8cf3b76184ae4 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.Glob; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -50,7 +51,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, minNumDataNodes = 2) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class ReopenWhileClosingIT extends ESIntegTestCase { @Override @@ -64,8 +65,9 @@ protected int minimumNumberOfShards() { } public void testReopenDuringClose() throws Exception { + List dataOnlyNodes = internalCluster().startDataOnlyNodes(randomIntBetween(2, 3)); final String indexName = "test"; - createIndexWithDocs(indexName); + createIndexWithDocs(indexName, dataOnlyNodes); ensureYellowAndNoInitializingShards(indexName); @@ -84,12 +86,12 @@ public void testReopenDuringClose() throws Exception { assertIndexIsOpened(indexName); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39757") public void testReopenDuringCloseOnMultipleIndices() throws Exception { + List dataOnlyNodes = internalCluster().startDataOnlyNodes(randomIntBetween(2, 3)); final List indices = new ArrayList<>(); for (int i = 0; i < randomIntBetween(2, 10); i++) { indices.add("index-" + i); - createIndexWithDocs(indices.get(i)); + createIndexWithDocs(indices.get(i), dataOnlyNodes); } ensureYellowAndNoInitializingShards(indices.toArray(Strings.EMPTY_ARRAY)); @@ -117,8 +119,9 @@ public void testReopenDuringCloseOnMultipleIndices() throws Exception { }); } - private void createIndexWithDocs(final String indexName) { - createIndex(indexName); + private void createIndexWithDocs(final String indexName, final Collection dataOnlyNodes) { + createIndex(indexName, + Settings.builder().put(indexSettings()).put("index.routing.allocation.include._name", String.join(",", dataOnlyNodes)).build()); final int nbDocs = scaledRandomIntBetween(1, 100); for (int i = 0; i < nbDocs; i++) { index(indexName, "_doc", String.valueOf(i), "num", i); From 7cf9eb2476fce3166b34424f122014167b9a1845 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 27 May 2019 21:44:36 -0400 Subject: [PATCH 134/224] Reset mock transport service in CcrRetentionLeaseIT (#42600) testRetentionLeaseIsAddedIfItDisappearsWhileFollowing does not reset the mock transport service after test. Surviving transport interceptors from that test can sneaky remove retention leases and make other tests fail. Closes #39331 Closes #39509 Closes #41428 Closes #41679 Closes #41737 Closes #41756 --- .../xpack/ccr/CcrRetentionLeaseIT.java | 87 ++++++++++--------- 1 file changed, 46 insertions(+), 41 deletions(-) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java index eb4f4be84233b..9b3eaa7de55bb 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.ccr; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; @@ -44,7 +43,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.snapshots.RestoreInfo; import org.elasticsearch.snapshots.RestoreService; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.RemoteTransportException; @@ -88,7 +86,6 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; -@TestLogging(value = "org.elasticsearch.xpack.ccr:trace,org.elasticsearch.indices.recovery:trace,org.elasticsearch.index.seqno:debug") public class CcrRetentionLeaseIT extends CcrIntegTestCase { public static final class RetentionLeaseRenewIntervalSettingPlugin extends Plugin { @@ -224,9 +221,9 @@ public void testRetentionLeaseIsRenewedDuringRecovery() throws Exception { // block the recovery from completing; this ensures the background sync is still running final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.addSendBehavior( (connection, requestId, action, request, options) -> { if (ClearCcrRestoreSessionAction.NAME.equals(action) @@ -248,9 +245,9 @@ public void testRetentionLeaseIsRenewedDuringRecovery() throws Exception { assertRetentionLeaseRenewal(numberOfShards, numberOfReplicas, followerIndex, leaderIndex); latch.countDown(); } finally { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getDataNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.clearAllRules(); } } @@ -405,9 +402,9 @@ public void testUnfollowRemovesRetentionLeases() throws Exception { final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); try { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.addSendBehavior( (connection, requestId, action, request, options) -> { if (RetentionLeaseActions.Remove.ACTION_NAME.equals(action) @@ -456,9 +453,9 @@ public void testUnfollowRemovesRetentionLeases() throws Exception { assertThat(Strings.toString(shardStats), shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); } } finally { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getDataNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.clearAllRules(); } } @@ -488,9 +485,9 @@ public void testUnfollowFailsToRemoveRetentionLeases() throws Exception { final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); try { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.addSendBehavior( (connection, requestId, action, request, options) -> { if (RetentionLeaseActions.Remove.ACTION_NAME.equals(action) @@ -526,9 +523,9 @@ public void testUnfollowFailsToRemoveRetentionLeases() throws Exception { getLeaderCluster().getClusterName(), new Index(leaderIndex, leaderUUID)))); } finally { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getDataNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.clearAllRules(); } } @@ -766,35 +763,36 @@ public void testRetentionLeaseIsAddedIfItDisappearsWhileFollowing() throws Excep final CountDownLatch latch = new CountDownLatch(1); final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getNodes().values()) { - final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); - senderTransportService.addSendBehavior( + try { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { + final MockTransportService senderTransportService = + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); + senderTransportService.addSendBehavior( (connection, requestId, action, request, options) -> { if (RetentionLeaseActions.Renew.ACTION_NAME.equals(action) - || TransportActionProxy.getProxyAction(RetentionLeaseActions.Renew.ACTION_NAME).equals(action)) { + || TransportActionProxy.getProxyAction(RetentionLeaseActions.Renew.ACTION_NAME).equals(action)) { senderTransportService.clearAllRules(); final RetentionLeaseActions.RenewRequest renewRequest = (RetentionLeaseActions.RenewRequest) request; final String primaryShardNodeId = - getLeaderCluster() - .clusterService() - .state() - .routingTable() - .index(leaderIndex) - .shard(renewRequest.getShardId().id()) - .primaryShard() - .currentNodeId(); + getLeaderCluster() + .clusterService() + .state() + .routingTable() + .index(leaderIndex) + .shard(renewRequest.getShardId().id()) + .primaryShard() + .currentNodeId(); final String primaryShardNodeName = - getLeaderCluster().clusterService().state().nodes().get(primaryShardNodeId).getName(); + getLeaderCluster().clusterService().state().nodes().get(primaryShardNodeId).getName(); final IndexShard primary = - getLeaderCluster() - .getInstance(IndicesService.class, primaryShardNodeName) - .getShardOrNull(renewRequest.getShardId()); + getLeaderCluster() + .getInstance(IndicesService.class, primaryShardNodeName) + .getShardOrNull(renewRequest.getShardId()); final CountDownLatch innerLatch = new CountDownLatch(1); // this forces the background renewal from following to face a retention lease not found exception primary.removeRetentionLease( - getRetentionLeaseId(followerIndex, leaderIndex), - ActionListener.wrap(r -> innerLatch.countDown(), e -> fail(e.toString()))); + getRetentionLeaseId(followerIndex, leaderIndex), + ActionListener.wrap(r -> innerLatch.countDown(), e -> fail(e.toString()))); try { innerLatch.await(); @@ -807,11 +805,18 @@ public void testRetentionLeaseIsAddedIfItDisappearsWhileFollowing() throws Excep } connection.sendRequest(requestId, action, request, options); }); - } + } - latch.await(); + latch.await(); - assertRetentionLeaseRenewal(numberOfShards, numberOfReplicas, followerIndex, leaderIndex); + assertRetentionLeaseRenewal(numberOfShards, numberOfReplicas, followerIndex, leaderIndex); + } finally { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { + final MockTransportService senderTransportService = + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); + senderTransportService.clearAllRules(); + } + } } /** @@ -858,9 +863,9 @@ public void testPeriodicRenewalDoesNotAddRetentionLeaseAfterUnfollow() throws Ex final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); try { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.addSendBehavior( (connection, requestId, action, request, options) -> { if (RetentionLeaseActions.Renew.ACTION_NAME.equals(action) @@ -914,9 +919,9 @@ public void onResponseReceived( assertThat(Strings.toString(shardStats), shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); } } finally { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getDataNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.clearAllRules(); } } From 7209f9769092d6caee82822f1e23195e14a0ae46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=BCrkan=20Kaymak?= Date: Tue, 28 May 2019 10:37:04 +0300 Subject: [PATCH 135/224] Fixed ignoring name parameter for percolator queries (#42598) Closes #40405 --- .../percolator/PercolateQueryBuilder.java | 21 ++++++++++++--- .../PercolateQueryBuilderTests.java | 26 +++++++++++++++++++ 2 files changed, 43 insertions(+), 4 deletions(-) diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 3021f5b31606e..151dd8b9e61ba 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -178,7 +178,7 @@ public PercolateQueryBuilder(String field, String documentType, List documentSupplier) { + protected PercolateQueryBuilder(String field, String documentType, Supplier documentSupplier) { if (field == null) { throw new IllegalArgumentException("[field] is a required argument"); } @@ -491,8 +491,12 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) { if (source == null) { return this; // not executed yet } else { - return new PercolateQueryBuilder(field, documentType, Collections.singletonList(source), - XContentHelper.xContentType(source)); + PercolateQueryBuilder rewritten = new PercolateQueryBuilder(field, documentType, + Collections.singletonList(source), XContentHelper.xContentType(source)); + if (name != null) { + rewritten.setName(name); + } + return rewritten; } } GetRequest getRequest; @@ -527,7 +531,12 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) { listener.onResponse(null); }, listener::onFailure)); }); - return new PercolateQueryBuilder(field, documentType, documentSupplier::get); + + PercolateQueryBuilder rewritten = new PercolateQueryBuilder(field, documentType, documentSupplier::get); + if (name != null) { + rewritten.setName(name); + } + return rewritten; } @Override @@ -626,6 +635,10 @@ XContentType getXContentType() { return documentXContentType; } + public String getQueryName() { + return name; + } + static IndexSearcher createMultiDocumentSearcher(Analyzer analyzer, Collection docs) { RAMDirectory ramDirectory = new RAMDirectory(); try (IndexWriter indexWriter = new IndexWriter(ramDirectory, new IndexWriterConfig(analyzer))) { diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index 6053a92b54a20..5b4dc61090042 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -54,6 +54,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Supplier; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.hamcrest.Matchers.equalTo; @@ -331,4 +332,29 @@ public void testFieldAlias() throws IOException { assertEquals(query.getVerifiedMatchesQuery(), aliasQuery.getVerifiedMatchesQuery()); } + public void testSettingNameWhileRewriting() { + String testName = "name1"; + QueryShardContext shardContext = createShardContext(); + PercolateQueryBuilder percolateQueryBuilder = doCreateTestQueryBuilder(true); + percolateQueryBuilder.setName(testName); + + QueryBuilder rewrittenQueryBuilder = percolateQueryBuilder.doRewrite(shardContext); + + assertEquals(testName, ((PercolateQueryBuilder) rewrittenQueryBuilder).getQueryName()); + assertNotEquals(rewrittenQueryBuilder, percolateQueryBuilder); + } + + public void testSettingNameWhileRewritingWhenDocumentSupplierAndSourceNotNull() { + Supplier supplier = () -> new BytesArray("{\"test\": \"test\"}"); + String testName = "name1"; + QueryShardContext shardContext = createShardContext(); + PercolateQueryBuilder percolateQueryBuilder = new PercolateQueryBuilder(queryField, null, supplier); + percolateQueryBuilder.setName(testName); + + QueryBuilder rewrittenQueryBuilder = percolateQueryBuilder.doRewrite(shardContext); + + assertEquals(testName, ((PercolateQueryBuilder) rewrittenQueryBuilder).getQueryName()); + assertNotEquals(rewrittenQueryBuilder, percolateQueryBuilder); + } + } From 578c05e771509586d426b0ad1ff8af5f1f3bf351 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 28 May 2019 09:15:03 +0100 Subject: [PATCH 136/224] [Ml Data Frame] Return bad_request on preview when config is invalid (#42447) --- ...nsportPreviewDataFrameTransformAction.java | 24 +++++++---- .../test/data_frame/preview_transforms.yml | 43 +++++++++++++++++++ 2 files changed, 59 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java index f4b93cc6ac412..dde9edb37e55c 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java @@ -31,6 +31,7 @@ import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.SourceConfig; +import org.elasticsearch.xpack.dataframe.transforms.pivot.AggregationResultUtils; import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; import java.util.List; @@ -102,14 +103,21 @@ private void getPreview(Pivot pivot, SourceConfig source, ActionListener { - final CompositeAggregation agg = r.getAggregations().get(COMPOSITE_AGGREGATION_NAME); - DataFrameIndexerTransformStats stats = DataFrameIndexerTransformStats.withDefaultTransformId(); - // remove all internal fields - List> results = pivot.extractResults(agg, deducedMappings, stats) - .peek(record -> { - record.keySet().removeIf(k -> k.startsWith("_")); - }).collect(Collectors.toList()); - listener.onResponse(results); + + try { + final CompositeAggregation agg = r.getAggregations().get(COMPOSITE_AGGREGATION_NAME); + DataFrameIndexerTransformStats stats = DataFrameIndexerTransformStats.withDefaultTransformId(); + // remove all internal fields + List> results = pivot.extractResults(agg, deducedMappings, stats) + .peek(record -> { + record.keySet().removeIf(k -> k.startsWith("_")); + }).collect(Collectors.toList()); + + listener.onResponse(results); + } catch (AggregationResultUtils.AggregationExtractionException extractionException) { + listener.onFailure( + new ElasticsearchStatusException(extractionException.getMessage(), RestStatus.BAD_REQUEST)); + } }, listener::onFailure )); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml index 5e58048b3bf0f..090243d1d966b 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml @@ -127,3 +127,46 @@ setup: "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} } } + +--- +"Test preview returns bad request with invalid agg": + - skip: + reason: date histo interval is deprecated + features: "warnings" + + - do: + warnings: + - "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future." + catch: bad_request + data_frame.preview_data_frame_transform: + body: > + { + "source": { "index": "airline-data" }, + "pivot": { + "group_by": { + "time": {"date_histogram": {"interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, + "aggs": { + "avg_response": {"avg": {"field": "responsetime"}}, + "time.min": {"min": {"field": "time"}} + } + } + } + + - do: + warnings: + - "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future." + catch: /mixed object types of nested and non-nested fields \[time.min\]/ + data_frame.preview_data_frame_transform: + body: > + { + "source": { "index": "airline-data" }, + "pivot": { + "group_by": { + "time": {"date_histogram": {"interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, + "aggs": { + "avg_response": {"avg": {"field": "responsetime"}}, + "time.min": {"min": {"field": "time"}} + } + } + } + From 777be0908d7bb96d5231cea8639c0fff92f9c3a3 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Tue, 28 May 2019 10:20:02 +0200 Subject: [PATCH 137/224] Mute AsyncTwoPhaseIndexerTests#testStateMachine() (#42609) Relates #42084 --- .../xpack/core/indexing/AsyncTwoPhaseIndexerTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index 4249d7c61d0ad..95b3de5eb333e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -225,6 +225,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42084") public void testStateMachine() throws Exception { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); From 0de11779a7aadbf46b777fdcbc4bd541c3208ea7 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 28 May 2019 09:58:35 +0100 Subject: [PATCH 138/224] [ML DataFrame] Use date histogram fixed_interval syntax and remove test skip --- .../test/data_frame/preview_transforms.yml | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml index 090243d1d966b..7b5c4e8cb5664 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml @@ -130,13 +130,7 @@ setup: --- "Test preview returns bad request with invalid agg": - - skip: - reason: date histo interval is deprecated - features: "warnings" - - do: - warnings: - - "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future." catch: bad_request data_frame.preview_data_frame_transform: body: > @@ -144,7 +138,7 @@ setup: "source": { "index": "airline-data" }, "pivot": { "group_by": { - "time": {"date_histogram": {"interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, + "time": {"date_histogram": {"fixed_interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, "aggs": { "avg_response": {"avg": {"field": "responsetime"}}, "time.min": {"min": {"field": "time"}} @@ -153,8 +147,6 @@ setup: } - do: - warnings: - - "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future." catch: /mixed object types of nested and non-nested fields \[time.min\]/ data_frame.preview_data_frame_transform: body: > @@ -162,7 +154,7 @@ setup: "source": { "index": "airline-data" }, "pivot": { "group_by": { - "time": {"date_histogram": {"interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, + "time": {"date_histogram": {"fixed_interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, "aggs": { "avg_response": {"avg": {"field": "responsetime"}}, "time.min": {"min": {"field": "time"}} From 5f651f4ab88493be7959c4042ba65d33def71c89 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Tue, 28 May 2019 12:20:20 +0200 Subject: [PATCH 139/224] Mute NodeTests (#42614) Relates #42577 --- server/src/test/java/org/elasticsearch/node/NodeTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/node/NodeTests.java b/server/src/test/java/org/elasticsearch/node/NodeTests.java index 6f0419421b868..a5653eb88e176 100644 --- a/server/src/test/java/org/elasticsearch/node/NodeTests.java +++ b/server/src/test/java/org/elasticsearch/node/NodeTests.java @@ -50,6 +50,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42577") @LuceneTestCase.SuppressFileSystems(value = "ExtrasFS") public class NodeTests extends ESTestCase { From e97bee606845c276b48e87f38fd8449c5f230ef4 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 28 May 2019 12:38:19 +0200 Subject: [PATCH 140/224] Fix Incorrect Time Math in MockTransport (#42595) * Fix Incorrect Time Math in MockTransport * The timeunit here must be nanos for the current time (we even convert it accordingly in the logging) * Also, changed the log message when dumping stack traces a little to make it easier to grep for (otherwise it's the same as the message on unregister) --- .../org/elasticsearch/transport/nio/MockNioTransport.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index 9470b7548adfb..86c7f77fa7bee 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -363,11 +363,11 @@ private void maybeLogElapsedTime(long startTime) { private void logLongRunningExecutions() { for (Map.Entry entry : registry.entrySet()) { - final long elapsedTime = threadPool.relativeTimeInMillis() - entry.getValue(); - if (elapsedTime > WARN_THRESHOLD) { + final long elapsedTimeInNanos = threadPool.relativeTimeInNanos() - entry.getValue(); + if (elapsedTimeInNanos > WARN_THRESHOLD) { final Thread thread = entry.getKey(); - logger.warn("Slow execution on network thread [{}] [{} milliseconds]: \n{}", thread.getName(), - TimeUnit.NANOSECONDS.toMillis(elapsedTime), + logger.warn("Potentially blocked execution on network thread [{}] [{} milliseconds]: \n{}", thread.getName(), + TimeUnit.NANOSECONDS.toMillis(elapsedTimeInNanos), Arrays.stream(thread.getStackTrace()).map(Object::toString).collect(Collectors.joining("\n"))); } } From 2c8440922f99e1ffc95487d2d3130168898d448b Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 28 May 2019 12:25:51 +0100 Subject: [PATCH 141/224] Remove PRE_60_NODE_CHECKPOINT (#42527) This commit removes the obsolete `PRE_60_NODE_CHECKPOINT` constant for dealing with 5.x nodes' lack of sequence number support. Backported as #42531 --- .../index/seqno/ReplicationTracker.java | 37 +++++-------------- .../index/seqno/SequenceNumbers.java | 4 -- 2 files changed, 10 insertions(+), 31 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index cf0fe6a5d25e1..c272816ed3815 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -539,9 +539,7 @@ private boolean invariant() { "checkpoints map should always have an entry for the current shard"; // local checkpoints only set during primary mode - assert primaryMode || checkpoints.values().stream() - .allMatch(lcps -> lcps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO || - lcps.localCheckpoint == SequenceNumbers.PRE_60_NODE_CHECKPOINT); + assert primaryMode || checkpoints.values().stream().allMatch(lcps -> lcps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO); // global checkpoints for other shards only set during primary mode assert primaryMode @@ -550,9 +548,7 @@ private boolean invariant() { .stream() .filter(e -> e.getKey().equals(shardAllocationId) == false) .map(Map.Entry::getValue) - .allMatch(cps -> - (cps.globalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO - || cps.globalCheckpoint == SequenceNumbers.PRE_60_NODE_CHECKPOINT)); + .allMatch(cps -> cps.globalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO); // relocation handoff can only occur in primary mode assert !handoffInProgress || primaryMode; @@ -631,7 +627,7 @@ private static long inSyncCheckpointStates( .stream() .filter(cps -> cps.inSync) .mapToLong(function) - .filter(v -> v != SequenceNumbers.PRE_60_NODE_CHECKPOINT && v != SequenceNumbers.UNASSIGNED_SEQ_NO)); + .filter(v -> v != SequenceNumbers.UNASSIGNED_SEQ_NO)); return value.isPresent() ? value.getAsLong() : SequenceNumbers.UNASSIGNED_SEQ_NO; } @@ -916,13 +912,9 @@ public synchronized void markAllocationIdAsInSync(final String allocationId, fin } private boolean updateLocalCheckpoint(String allocationId, CheckpointState cps, long localCheckpoint) { - // a local checkpoint of PRE_60_NODE_CHECKPOINT cannot be overridden - assert cps.localCheckpoint != SequenceNumbers.PRE_60_NODE_CHECKPOINT || - localCheckpoint == SequenceNumbers.PRE_60_NODE_CHECKPOINT : - "pre-6.0 shard copy " + allocationId + " unexpected to send valid local checkpoint " + localCheckpoint; - // a local checkpoint for a shard copy should be a valid sequence number or the pre-6.0 sequence number indicator - assert localCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO : - "invalid local checkpoint for shard copy [" + allocationId + "]"; + // a local checkpoint for a shard copy should be a valid sequence number + assert localCheckpoint >= SequenceNumbers.NO_OPS_PERFORMED : + "invalid local checkpoint [" + localCheckpoint + "] for shard copy [" + allocationId + "]"; if (localCheckpoint > cps.localCheckpoint) { logger.trace("updated local checkpoint of [{}] from [{}] to [{}]", allocationId, cps.localCheckpoint, localCheckpoint); cps.localCheckpoint = localCheckpoint; @@ -981,8 +973,6 @@ private static long computeGlobalCheckpoint(final Set pendingInSync, fin if (cps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO) { // unassigned in-sync replica return fallback; - } else if (cps.localCheckpoint == SequenceNumbers.PRE_60_NODE_CHECKPOINT) { - // 5.x replica, ignore for global checkpoint calculation } else { minLocalCheckpoint = Math.min(cps.localCheckpoint, minLocalCheckpoint); } @@ -1054,18 +1044,11 @@ public synchronized void completeRelocationHandoff() { handoffInProgress = false; relocated = true; // forget all checkpoint information except for global checkpoint of current shard - checkpoints.entrySet().stream().forEach(e -> { - final CheckpointState cps = e.getValue(); - if (cps.localCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO && - cps.localCheckpoint != SequenceNumbers.PRE_60_NODE_CHECKPOINT) { - cps.localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - } - if (e.getKey().equals(shardAllocationId) == false) { + checkpoints.forEach((key, cps) -> { + cps.localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; + if (key.equals(shardAllocationId) == false) { // don't throw global checkpoint information of current shard away - if (cps.globalCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO && - cps.globalCheckpoint != SequenceNumbers.PRE_60_NODE_CHECKPOINT) { - cps.globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - } + cps.globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; } }); assert invariant(); diff --git a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java index 6336e83338f8c..87257a97076da 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java @@ -28,10 +28,6 @@ public class SequenceNumbers { public static final String LOCAL_CHECKPOINT_KEY = "local_checkpoint"; public static final String MAX_SEQ_NO = "max_seq_no"; - /** - * Represents a checkpoint coming from a pre-6.0 node - */ - public static final long PRE_60_NODE_CHECKPOINT = -3L; /** * Represents an unassigned sequence number (e.g., can be used on primary operations before they are executed). */ From 692245cc447d748b8233d588bb38cc261227d6f5 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 28 May 2019 13:35:01 +0200 Subject: [PATCH 142/224] Reset state recovery after successful recovery (#42576) The problem this commit addresses is that state recovery is not reset on a node that then becomes master with a cluster state that has a state not recovered flag in it. The situation that was observed in a failed test run of MinimumMasterNodesIT.testThreeNodesNoMasterBlock (see below) is that we have 3 master nodes (node_t0, node_t1, node_t2), two of them are shut down (node_t2 remains), when the first one comes back (renamed to node_t4) it becomes leader in term 2 and sends state (with state_not_recovered_block) to node_t2, which accepts. node_t2 becomes leader in term 3, and as it was previously leader in term1 and successfully completed state recovery, does never retry state recovery in term 3. Closes #39172 --- .../elasticsearch/cluster/ClusterState.java | 3 +- .../cluster/coordination/Coordinator.java | 1 - .../elasticsearch/gateway/GatewayService.java | 11 ++-- .../coordination/CoordinatorTests.java | 61 ++++++++++++++++++- 4 files changed, 67 insertions(+), 9 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 6a5e2a324965f..6cde7d5b3bb10 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -297,7 +297,8 @@ public RoutingNodes getRoutingNodes() { public String toString() { StringBuilder sb = new StringBuilder(); final String TAB = " "; - sb.append("cluster uuid: ").append(metaData.clusterUUID()).append("\n"); + sb.append("cluster uuid: ").append(metaData.clusterUUID()) + .append(" [committed: ").append(metaData.clusterUUIDCommitted()).append("]").append("\n"); sb.append("version: ").append(version).append("\n"); sb.append("state uuid: ").append(stateUUID).append("\n"); sb.append("from_diff: ").append(wasReadFromDiff).append("\n"); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 376dd640c56b2..6d86bb613be43 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -699,7 +699,6 @@ public void invariant() { assert followersChecker.getFastResponseState().term == getCurrentTerm() : followersChecker.getFastResponseState(); assert followersChecker.getFastResponseState().mode == getMode() : followersChecker.getFastResponseState(); assert (applierState.nodes().getMasterNodeId() == null) == applierState.blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID); - assert applierState.nodes().getMasterNodeId() == null || applierState.metaData().clusterUUIDCommitted(); assert preVoteCollector.getPreVoteResponse().equals(getPreVoteResponse()) : preVoteCollector + " vs " + getPreVoteResponse(); diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java index b7b7d0759980e..3e9c25847f6a7 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -85,7 +85,7 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste private final Runnable recoveryRunnable; - private final AtomicBoolean recovered = new AtomicBoolean(); + private final AtomicBoolean recoveryInProgress = new AtomicBoolean(); private final AtomicBoolean scheduledRecovery = new AtomicBoolean(); @Inject @@ -211,7 +211,7 @@ public void onFailure(Exception e) { @Override protected void doRun() { - if (recovered.compareAndSet(false, true)) { + if (recoveryInProgress.compareAndSet(false, true)) { logger.info("recover_after_time [{}] elapsed. performing state recovery...", recoverAfterTime); recoveryRunnable.run(); } @@ -219,7 +219,7 @@ protected void doRun() { }, recoverAfterTime, ThreadPool.Names.GENERIC); } } else { - if (recovered.compareAndSet(false, true)) { + if (recoveryInProgress.compareAndSet(false, true)) { threadPool.generic().execute(new AbstractRunnable() { @Override public void onFailure(final Exception e) { @@ -237,7 +237,7 @@ protected void doRun() { } private void resetRecoveredFlags() { - recovered.set(false); + recoveryInProgress.set(false); scheduledRecovery.set(false); } @@ -256,6 +256,9 @@ public ClusterState execute(final ClusterState currentState) { @Override public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) { logger.info("recovered [{}] indices into cluster_state", newState.metaData().indices().size()); + // reset flag even though state recovery completed, to ensure that if we subsequently become leader again based on a + // not-recovered state, that we again do another state recovery. + resetRecoveredFlags(); } @Override diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 5daa863402b2a..4f1937efc9c74 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode.Role; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; @@ -69,6 +70,8 @@ import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.SeedHostsProvider.HostsResolver; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.gateway.ClusterStateUpdaters; +import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.gateway.MockGatewayMetaState; import org.elasticsearch.indices.cluster.FakeThreadPoolMasterService; @@ -130,6 +133,7 @@ import static org.elasticsearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_WRITES; import static org.elasticsearch.cluster.coordination.Reconfigurator.CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION; import static org.elasticsearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING; +import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.elasticsearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; import static org.hamcrest.Matchers.containsString; @@ -190,6 +194,45 @@ public void testRepeatableTests() throws Exception { assertEquals(result1, result2); } + /** + * This test was added to verify that state recovery is properly reset on a node after it has become master and successfully + * recovered a state (see {@link GatewayService}). The situation which triggers this with a decent likelihood is as follows: + * 3 master-eligible nodes (leader, follower1, follower2), the followers are shut down (leader remains), when followers come back + * one of them becomes leader and publishes first state (with STATE_NOT_RECOVERED_BLOCK) to old leader, which accepts it. + * Old leader is initiating an election at the same time, and wins election. It becomes leader again, but as it previously + * successfully completed state recovery, is never reset to a state where state recovery can be retried. + */ + public void testStateRecoveryResetAfterPreviousLeadership() { + final Cluster cluster = new Cluster(3); + cluster.runRandomly(); + cluster.stabilise(); + + final ClusterNode leader = cluster.getAnyLeader(); + final ClusterNode follower1 = cluster.getAnyNodeExcept(leader); + final ClusterNode follower2 = cluster.getAnyNodeExcept(leader, follower1); + + // restart follower1 and follower2 + for (ClusterNode clusterNode : Arrays.asList(follower1, follower2)) { + clusterNode.close(); + cluster.clusterNodes.forEach( + cn -> cluster.deterministicTaskQueue.scheduleNow(cn.onNode( + new Runnable() { + @Override + public void run() { + cn.transportService.disconnectFromNode(clusterNode.getLocalNode()); + } + + @Override + public String toString() { + return "disconnect from " + clusterNode.getLocalNode() + " after shutdown"; + } + }))); + cluster.clusterNodes.replaceAll(cn -> cn == clusterNode ? cn.restartedNode() : cn); + } + + cluster.stabilise(); + } + public void testCanUpdateClusterStateAfterStabilisation() { final Cluster cluster = new Cluster(randomIntBetween(1, 5)); cluster.runRandomly(); @@ -1524,6 +1567,10 @@ void stabilise(long stabilisationDurationMillis) { assertTrue(leaderId + " has been bootstrapped", leader.coordinator.isInitialConfigurationSet()); assertTrue(leaderId + " exists in its last-applied state", leader.getLastAppliedClusterState().getNodes().nodeExists(leaderId)); + assertThat(leaderId + " has no NO_MASTER_BLOCK", + leader.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID), equalTo(false)); + assertThat(leaderId + " has no STATE_NOT_RECOVERED_BLOCK", + leader.getLastAppliedClusterState().blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK), equalTo(false)); assertThat(leaderId + " has applied its state ", leader.getLastAppliedClusterState().getVersion(), isEqualToLeaderVersion); for (final ClusterNode clusterNode : clusterNodes) { @@ -1555,6 +1602,8 @@ void stabilise(long stabilisationDurationMillis) { equalTo(leader.getLocalNode())); assertThat(nodeId + " has no NO_MASTER_BLOCK", clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID), equalTo(false)); + assertThat(nodeId + " has no STATE_NOT_RECOVERED_BLOCK", + clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK), equalTo(false)); } else { assertThat(nodeId + " is not following " + leaderId, clusterNode.coordinator.getMode(), is(CANDIDATE)); assertThat(nodeId + " has no master", clusterNode.getLastAppliedClusterState().nodes().getMasterNode(), nullValue()); @@ -1724,7 +1773,8 @@ class MockPersistedState implements PersistedState { } else { nodeEnvironment = null; delegate = new InMemoryPersistedState(0L, - clusterState(0L, 0L, localNode, VotingConfiguration.EMPTY_CONFIG, VotingConfiguration.EMPTY_CONFIG, 0L)); + ClusterStateUpdaters.addStateNotRecoveredBlock( + clusterState(0L, 0L, localNode, VotingConfiguration.EMPTY_CONFIG, VotingConfiguration.EMPTY_CONFIG, 0L))); } } catch (IOException e) { throw new UncheckedIOException("Unable to create MockPersistedState", e); @@ -1764,8 +1814,9 @@ class MockPersistedState implements PersistedState { clusterState.writeTo(outStream); StreamInput inStream = new NamedWriteableAwareStreamInput(outStream.bytes().streamInput(), new NamedWriteableRegistry(ClusterModule.getNamedWriteables())); + // adapt cluster state to new localNode instance and add blocks delegate = new InMemoryPersistedState(adaptCurrentTerm.apply(oldState.getCurrentTerm()), - ClusterState.readFrom(inStream, newLocalNode)); // adapts it to new localNode instance + ClusterStateUpdaters.addStateNotRecoveredBlock(ClusterState.readFrom(inStream, newLocalNode))); } } catch (IOException e) { throw new UncheckedIOException("Unable to create MockPersistedState", e); @@ -1869,15 +1920,19 @@ protected Optional getDisruptableMockTransport(Transpo transportService)); final Collection> onJoinValidators = Collections.singletonList((dn, cs) -> extraJoinValidators.forEach(validator -> validator.accept(dn, cs))); + final AllocationService allocationService = ESAllocationTestCase.createAllocationService(Settings.EMPTY); coordinator = new Coordinator("test_node", settings, clusterSettings, transportService, writableRegistry(), - ESAllocationTestCase.createAllocationService(Settings.EMPTY), masterService, this::getPersistedState, + allocationService, masterService, this::getPersistedState, Cluster.this::provideSeedHosts, clusterApplierService, onJoinValidators, Randomness.get()); masterService.setClusterStatePublisher(coordinator); + final GatewayService gatewayService = new GatewayService(settings, allocationService, clusterService, + deterministicTaskQueue.getThreadPool(this::onNode), null, coordinator); logger.trace("starting up [{}]", localNode); transportService.start(); transportService.acceptIncomingRequests(); coordinator.start(); + gatewayService.start(); clusterService.start(); coordinator.startInitialJoin(); } From b57cbb67e5490495761ec0929c347db92d6aa65c Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 28 May 2019 08:47:18 -0400 Subject: [PATCH 143/224] [DOCS] Escape cross-ref link comma for Asciidoctor (#42402) --- docs/reference/rollup/rollup-api.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/rollup/rollup-api.asciidoc b/docs/reference/rollup/rollup-api.asciidoc index 099686fb4329d..5981336d0a054 100644 --- a/docs/reference/rollup/rollup-api.asciidoc +++ b/docs/reference/rollup/rollup-api.asciidoc @@ -9,7 +9,7 @@ * <>, <>, * <>, <>, -* <> +* <> * <> [float] From 69ef51d1418915148a313f13a4dad652a7dbc922 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 28 May 2019 08:52:59 -0400 Subject: [PATCH 144/224] [DOCS] Fix API Quick Reference rollup attribute for Asciidoctor (#42403) --- docs/reference/rollup/api-quickref.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/rollup/api-quickref.asciidoc b/docs/reference/rollup/api-quickref.asciidoc index 21eefefb4b12b..d1ea03b6284d7 100644 --- a/docs/reference/rollup/api-quickref.asciidoc +++ b/docs/reference/rollup/api-quickref.asciidoc @@ -5,7 +5,7 @@ experimental[] -Most {rollup} endpoints have the following base: +Most rollup endpoints have the following base: [source,js] ---- From f2cde97a3b3d865ac6efba01f5b32a9fc4e8fb40 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Tue, 28 May 2019 10:03:39 -0400 Subject: [PATCH 145/224] [ML] adding delayed_data_check_config to datafeed update docs (#42095) * [ML] adding delayed_data_check_config to datafeed update docs * [DOCS] Edits delayed data configuration details --- .../ml/apis/datafeedresource.asciidoc | 30 ++++++++++--------- docs/reference/ml/apis/put-datafeed.asciidoc | 9 +++--- .../ml/apis/update-datafeed.asciidoc | 9 +++++- 3 files changed, 29 insertions(+), 19 deletions(-) diff --git a/docs/reference/ml/apis/datafeedresource.asciidoc b/docs/reference/ml/apis/datafeedresource.asciidoc index 33fce3dbf7c9d..5c1e3e74a6ae8 100644 --- a/docs/reference/ml/apis/datafeedresource.asciidoc +++ b/docs/reference/ml/apis/datafeedresource.asciidoc @@ -61,12 +61,12 @@ A {dfeed} resource has the following properties: `delayed_data_check_config`:: (object) Specifies whether the data feed checks for missing data and - and the size of the window. For example: + the size of the window. For example: `{"enabled": true, "check_window": "1h"}` See <>. [[ml-datafeed-chunking-config]] -==== Chunking Configuration Objects +==== Chunking configuration objects {dfeeds-cap} might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load @@ -88,31 +88,33 @@ A chunking configuration object has the following properties: For example: `3h`. [[ml-datafeed-delayed-data-check-config]] -==== Delayed Data Check Configuration Objects +==== Delayed data check configuration objects The {dfeed} can optionally search over indices that have already been read in -an effort to find if any data has since been added to the index. If missing data -is found, it is a good indication that the `query_delay` option is set too low and -the data is being indexed after the {dfeed} has passed that moment in time. See +an effort to determine whether any data has subsequently been added to the index. +If missing data is found, it is a good indication that the `query_delay` option +is set too low and the data is being indexed after the {dfeed} has passed that +moment in time. See {stack-ov}/ml-delayed-data-detection.html[Working with delayed data]. -This check only runs on real-time {dfeeds} +This check runs only on real-time {dfeeds}. The configuration object has the following properties: `enabled`:: - (boolean) Should the {dfeed} periodically check for data being indexed after reading. - Defaults to `true` + (boolean) Specifies whether the {dfeed} periodically checks for delayed data. + Defaults to `true`. `check_window`:: - (time units) The window of time before the latest finalized bucket that should be searched - for late data. Defaults to `null` which causes an appropriate `check_window` to be calculated - when the real-time {dfeed} runs. - The default `check_window` span calculation is the max between `2h` or `8 * bucket_span`. + (time units) The window of time that is searched for late data. This window of + time ends with the latest finalized bucket. It defaults to `null`, which + causes an appropriate `check_window` to be calculated when the real-time + {dfeed} runs. In particular, the default `check_window` span calculation is + based on the maximum of `2h` or `8 * bucket_span`. [float] [[ml-datafeed-counts]] -==== {dfeed-cap} Counts +==== {dfeed-cap} counts The get {dfeed} statistics API provides information about the operational progress of a {dfeed}. All of these properties are informational; you cannot diff --git a/docs/reference/ml/apis/put-datafeed.asciidoc b/docs/reference/ml/apis/put-datafeed.asciidoc index 52728dd093da5..2e0f6700191cd 100644 --- a/docs/reference/ml/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/apis/put-datafeed.asciidoc @@ -45,6 +45,11 @@ IMPORTANT: You must use {kib} or this API to create a {dfeed}. Do not put a {df (object) Specifies how data searches are split into time chunks. See <>. +`delayed_data_check_config`:: + (object) Specifies whether the data feed checks for missing data and + the size of the window. See + <>. + `frequency`:: (time units) The interval at which scheduled queries are made while the {dfeed} runs in real time. The default value is either the bucket span for short @@ -82,10 +87,6 @@ IMPORTANT: You must use {kib} or this API to create a {dfeed}. Do not put a {df (unsigned integer) The `size` parameter that is used in {es} searches. The default value is `1000`. -`delayed_data_check_config`:: - (object) Specifies if and with how large a window should the data feed check - for missing data. See <>. - For more information about these properties, see <>. diff --git a/docs/reference/ml/apis/update-datafeed.asciidoc b/docs/reference/ml/apis/update-datafeed.asciidoc index a370c1acef9d7..63878913c7f1a 100644 --- a/docs/reference/ml/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/apis/update-datafeed.asciidoc @@ -14,7 +14,10 @@ Updates certain properties of a {dfeed}. `POST _ml/datafeeds//_update` -//===== Description +===== Description + +NOTE: If you update the `delayed_data_check_config` property, you must stop and +start the {dfeed} for the change to be applied. ==== Path Parameters @@ -32,6 +35,10 @@ The following properties can be updated after the {dfeed} is created: `chunking_config`:: (object) Specifies how data searches are split into time chunks. See <>. + +`delayed_data_check_config`:: + (object) Specifies whether the data feed checks for missing data and + the size of the window. See <>. `frequency`:: (time units) The interval at which scheduled queries are made while the From 1ef00e368b4c25bacd59a2598c60b510044da163 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 28 May 2019 15:23:55 +0100 Subject: [PATCH 146/224] Avoid loading retention leases while writing them (#42620) Resolves #41430. --- .../org/elasticsearch/index/seqno/ReplicationTracker.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index c272816ed3815..1a67eb55e0576 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -346,7 +346,10 @@ public synchronized void updateRetentionLeasesOnReplica(final RetentionLeases re * @throws IOException if an I/O exception occurs reading the retention leases */ public RetentionLeases loadRetentionLeases(final Path path) throws IOException { - final RetentionLeases retentionLeases = RetentionLeases.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path); + final RetentionLeases retentionLeases; + synchronized (retentionLeasePersistenceLock) { + retentionLeases = RetentionLeases.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path); + } // TODO after backporting we expect this never to happen in 8.x, so adjust this to throw an exception instead. assert Version.CURRENT.major <= 8 : "throw an exception instead of returning EMPTY on null"; From ae783879626ace085d30c3debd0a247de8535301 Mon Sep 17 00:00:00 2001 From: Vigya Sharma Date: Tue, 28 May 2019 20:12:46 +0530 Subject: [PATCH 147/224] Validate routing commands using updated routing state (#42066) When multiple commands are called in sequence, fetch shards from mutable, up-to-date routing nodes to ensure each command's changes are visible to subsequent commands. This addresses an issue uncovered during work on #41050. --- ...AllocateEmptyPrimaryAllocationCommand.java | 13 +++- .../AllocateReplicaAllocationCommand.java | 26 +++++-- ...AllocateStalePrimaryAllocationCommand.java | 13 +++- .../allocation/AllocationCommandsTests.java | 72 +++++++++++++++++++ 4 files changed, 112 insertions(+), 12 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java index 4d037570dd266..2e3219e67c7ae 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java @@ -110,13 +110,20 @@ public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) return explainOrThrowMissingRoutingNode(allocation, explain, discoNode); } - final ShardRouting shardRouting; try { - shardRouting = allocation.routingTable().shardRoutingTable(index, shardId).primaryShard(); + allocation.routingTable().shardRoutingTable(index, shardId).primaryShard(); } catch (IndexNotFoundException | ShardNotFoundException e) { return explainOrThrowRejectedCommand(explain, allocation, e); } - if (shardRouting.unassigned() == false) { + + ShardRouting shardRouting = null; + for (ShardRouting shard : allocation.routingNodes().unassigned()) { + if (shard.getIndexName().equals(index) && shard.getId() == shardId && shard.primary()) { + shardRouting = shard; + break; + } + } + if (shardRouting == null) { return explainOrThrowRejectedCommand(explain, allocation, "primary [" + index + "][" + shardId + "] is already assigned"); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java index 709681f2b2008..5e1bcd81bb5fa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java @@ -23,7 +23,6 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.RerouteExplanation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; @@ -35,6 +34,7 @@ import org.elasticsearch.index.shard.ShardNotFoundException; import java.io.IOException; +import java.util.ArrayList; import java.util.List; /** @@ -101,20 +101,34 @@ public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) return explainOrThrowMissingRoutingNode(allocation, explain, discoNode); } - final ShardRouting primaryShardRouting; try { - primaryShardRouting = allocation.routingTable().shardRoutingTable(index, shardId).primaryShard(); + allocation.routingTable().shardRoutingTable(index, shardId).primaryShard(); } catch (IndexNotFoundException | ShardNotFoundException e) { return explainOrThrowRejectedCommand(explain, allocation, e); } - if (primaryShardRouting.unassigned()) { + + ShardRouting primaryShardRouting = null; + for (RoutingNode node : allocation.routingNodes()) { + for (ShardRouting shard : node) { + if (shard.getIndexName().equals(index) && shard.getId() == shardId && shard.primary()) { + primaryShardRouting = shard; + break; + } + } + } + if (primaryShardRouting == null) { return explainOrThrowRejectedCommand(explain, allocation, "trying to allocate a replica shard [" + index + "][" + shardId + "], while corresponding primary shard is still unassigned"); } - List replicaShardRoutings = - allocation.routingTable().shardRoutingTable(index, shardId).replicaShardsWithState(ShardRoutingState.UNASSIGNED); + List replicaShardRoutings = new ArrayList<>(); + for (ShardRouting shard : allocation.routingNodes().unassigned()) { + if (shard.getIndexName().equals(index) && shard.getId() == shardId && shard.primary() == false) { + replicaShardRoutings.add(shard); + } + } + ShardRouting shardRouting; if (replicaShardRoutings.isEmpty()) { return explainOrThrowRejectedCommand(explain, allocation, diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java index f4c9aba17d71e..7e645c2cfcb6f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java @@ -108,13 +108,20 @@ public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) return explainOrThrowMissingRoutingNode(allocation, explain, discoNode); } - final ShardRouting shardRouting; try { - shardRouting = allocation.routingTable().shardRoutingTable(index, shardId).primaryShard(); + allocation.routingTable().shardRoutingTable(index, shardId).primaryShard(); } catch (IndexNotFoundException | ShardNotFoundException e) { return explainOrThrowRejectedCommand(explain, allocation, e); } - if (shardRouting.unassigned() == false) { + + ShardRouting shardRouting = null; + for (ShardRouting shard : allocation.routingNodes().unassigned()) { + if (shard.getIndexName().equals(index) && shard.getId() == shardId && shard.primary()) { + shardRouting = shard; + break; + } + } + if (shardRouting == null) { return explainOrThrowRejectedCommand(explain, allocation, "primary [" + index + "][" + shardId + "] is already assigned"); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index c966e3cac27dc..1405be54fd51e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -677,4 +677,76 @@ public void testMoveShardFromNonDataNode() { assertEquals("[move_allocation] can't move [test][0] from " + node2 + " to " + node1 + ": source [" + node2.getName() + "] is not a data node.", e.getMessage()); } + + public void testConflictingCommandsInSingleRequest() { + AllocationService allocation = createAllocationService(Settings.builder() + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") + .build()); + + final String index1 = "test1"; + final String index2 = "test2"; + final String index3 = "test3"; + logger.info("--> building initial routing table"); + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder(index1).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1) + .putInSyncAllocationIds(0, Collections.singleton("randomAllocID")) + .putInSyncAllocationIds(1, Collections.singleton("randomAllocID2"))) + .put(IndexMetaData.builder(index2).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1) + .putInSyncAllocationIds(0, Collections.singleton("randomAllocID")) + .putInSyncAllocationIds(1, Collections.singleton("randomAllocID2"))) + .put(IndexMetaData.builder(index3).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1) + .putInSyncAllocationIds(0, Collections.singleton("randomAllocID")) + .putInSyncAllocationIds(1, Collections.singleton("randomAllocID2"))) + .build(); + RoutingTable routingTable = RoutingTable.builder() + .addAsRecovery(metaData.index(index1)) + .addAsRecovery(metaData.index(index2)) + .addAsRecovery(metaData.index(index3)) + .build(); + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(routingTable).build(); + + final String node1 = "node1"; + final String node2 = "node2"; + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(newNode(node1)) + .add(newNode(node2)) + ).build(); + final ClusterState finalClusterState = allocation.reroute(clusterState, "reroute"); + + logger.info("--> allocating same index primary in multiple commands should fail"); + assertThat(expectThrows(IllegalArgumentException.class, () -> { + allocation.reroute(finalClusterState, + new AllocationCommands( + new AllocateStalePrimaryAllocationCommand(index1, 0, node1, true), + new AllocateStalePrimaryAllocationCommand(index1, 0, node2, true) + ), false, false); + }).getMessage(), containsString("primary [" + index1 + "][0] is already assigned")); + + assertThat(expectThrows(IllegalArgumentException.class, () -> { + allocation.reroute(finalClusterState, + new AllocationCommands( + new AllocateEmptyPrimaryAllocationCommand(index2, 0, node1, true), + new AllocateEmptyPrimaryAllocationCommand(index2, 0, node2, true) + ), false, false); + }).getMessage(), containsString("primary [" + index2 + "][0] is already assigned")); + + + clusterState = allocation.reroute(clusterState, + new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand(index3, 0, node1, true)), false, false).getClusterState(); + clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + + final ClusterState updatedClusterState = clusterState; + assertThat(updatedClusterState.getRoutingNodes().node(node1).shardsWithState(STARTED).size(), equalTo(1)); + + logger.info("--> subsequent replica allocation fails as all configured replicas have been allocated"); + assertThat(expectThrows(IllegalArgumentException.class, () -> { + allocation.reroute(updatedClusterState, + new AllocationCommands( + new AllocateReplicaAllocationCommand(index3, 0, node2), + new AllocateReplicaAllocationCommand(index3, 0, node2) + ), false, false); + }).getMessage(), containsString("all copies of [" + index3 + "][0] are already assigned. Use the move allocation command instead")); + } } From 4a08b3d1c94cc5821841b6682ccc9606c4095112 Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Tue, 28 May 2019 07:51:04 -0700 Subject: [PATCH 148/224] remove 6.4.x version constants (#42127) relates refactoring initiative #41164. --- .../action/PainlessExecuteAction.java | 18 ++------ .../main/java/org/elasticsearch/Version.java | 16 ------- .../index/mapper/TextFieldMapper.java | 15 +++---- .../search/slice/SliceBuilder.java | 8 +--- .../indices/close/CloseIndexRequestTests.java | 8 ++-- .../coordination/JoinTaskExecutorTests.java | 25 +++++------ .../index/mapper/NestedObjectMapperTests.java | 42 +++++++++++++++++++ .../index/mapper/TextFieldMapperTests.java | 19 ++------- .../query/SpanMultiTermQueryBuilderTests.java | 24 ++++------- .../elasticsearch/license/LicenseUtils.java | 12 +----- .../core/ml/action/GetJobsStatsAction.java | 9 +--- .../core/ml/job/config/DetectionRule.java | 3 -- .../xpack/core/ml/job/config/Detector.java | 6 +-- .../xpack/core/ml/job/config/MlFilter.java | 13 ++---- .../ml/job/results/CategoryDefinition.java | 9 +--- .../ml/action/TransportOpenJobAction.java | 9 ---- .../action/TransportOpenJobActionTests.java | 30 ------------- .../action/PutJobStateMachineTests.java | 5 ++- 18 files changed, 90 insertions(+), 181 deletions(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java index cb407978da83e..7c8a368d38a58 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java @@ -30,7 +30,6 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.store.RAMDirectory; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -313,25 +312,16 @@ public ActionRequestValidationException validate() { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); script = new Script(in); - if (in.getVersion().before(Version.V_6_4_0)) { - byte scriptContextId = in.readByte(); - assert scriptContextId == 0; - } else { - context = fromScriptContextName(in.readString()); - contextSetup = in.readOptionalWriteable(ContextSetup::new); - } + context = fromScriptContextName(in.readString()); + contextSetup = in.readOptionalWriteable(ContextSetup::new); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); script.writeTo(out); - if (out.getVersion().before(Version.V_6_4_0)) { - out.writeByte((byte) 0); - } else { - out.writeString(context.name); - out.writeOptionalWriteable(contextSetup); - } + out.writeString(context.name); + out.writeOptionalWriteable(contextSetup); } // For testing only: diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index c685d39c7562f..48d37957844e2 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -46,14 +46,6 @@ public class Version implements Comparable, ToXContentFragment { */ public static final int V_EMPTY_ID = 0; public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST); - public static final int V_6_4_0_ID = 6040099; - public static final Version V_6_4_0 = new Version(V_6_4_0_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); - public static final int V_6_4_1_ID = 6040199; - public static final Version V_6_4_1 = new Version(V_6_4_1_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); - public static final int V_6_4_2_ID = 6040299; - public static final Version V_6_4_2 = new Version(V_6_4_2_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); - public static final int V_6_4_3_ID = 6040399; - public static final Version V_6_4_3 = new Version(V_6_4_3_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_6_5_0_ID = 6050099; public static final Version V_6_5_0 = new Version(V_6_5_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); public static final int V_6_5_1_ID = 6050199; @@ -136,14 +128,6 @@ public static Version fromId(int id) { return V_6_5_1; case V_6_5_0_ID: return V_6_5_0; - case V_6_4_3_ID: - return V_6_4_3; - case V_6_4_2_ID: - return V_6_4_2; - case V_6_4_1_ID: - return V_6_4_1; - case V_6_4_0_ID: - return V_6_4_0; case V_EMPTY_ID: return V_EMPTY; default: diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 8a3203ad8e7e0..6906ceb113b9c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -54,7 +54,6 @@ import org.apache.lucene.util.automaton.Automata; import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.Operations; -import org.elasticsearch.Version; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.common.settings.Settings; @@ -193,15 +192,11 @@ public TextFieldMapper build(BuilderContext context) { } // Copy the index options of the main field to allow phrase queries on // the prefix field. - if (context.indexCreatedVersion().onOrAfter(Version.V_6_4_0)) { - if (fieldType.indexOptions() == IndexOptions.DOCS_AND_FREQS) { - // frequencies are not needed because prefix queries always use a constant score - prefixFieldType.setIndexOptions(IndexOptions.DOCS); - } else { - prefixFieldType.setIndexOptions(fieldType.indexOptions()); - } - } else if (fieldType.indexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) { - prefixFieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); + if (fieldType.indexOptions() == IndexOptions.DOCS_AND_FREQS) { + // frequencies are not needed because prefix queries always use a constant score + prefixFieldType.setIndexOptions(IndexOptions.DOCS); + } else { + prefixFieldType.setIndexOptions(fieldType.indexOptions()); } if (fieldType.storeTermVectorOffsets()) { prefixFieldType.setStoreTermVectorOffsets(true); diff --git a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java index 3c86b21a0873d..01924e938dcd0 100644 --- a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java @@ -217,19 +217,15 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request, int shardId = request.shardId().id(); int numShards = context.getIndexSettings().getNumberOfShards(); - if (minNodeVersion.onOrAfter(Version.V_6_4_0) && - (request.preference() != null || request.indexRoutings().length > 0)) { + if (request.preference() != null || request.indexRoutings().length > 0) { GroupShardsIterator group = buildShardIterator(clusterService, request); assert group.size() <= numShards : "index routing shards: " + group.size() + " cannot be greater than total number of shards: " + numShards; if (group.size() < numShards) { - /** + /* * The routing of this request targets a subset of the shards of this index so we need to we retrieve * the original {@link GroupShardsIterator} and compute the request shard id and number of * shards from it. - * This behavior has been added in {@link Version#V_6_4_0} so if there is another node in the cluster - * with an older version we use the original shard id and number of shards in order to ensure that all - * slices use the same numbers. */ numShards = group.size(); int ord = 0; diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java index 985b4304a32f4..df940012bf24d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java @@ -28,8 +28,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; -import static org.elasticsearch.test.VersionUtils.randomVersionBetween; - public class CloseIndexRequestTests extends ESTestCase { public void testSerialization() throws Exception { @@ -54,7 +52,8 @@ public void testBwcSerialization() throws Exception { { final CloseIndexRequest request = randomRequest(); try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(randomVersionBetween(random(), Version.V_6_4_0, VersionUtils.getPreviousVersion(Version.V_7_2_0))); + out.setVersion(VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), + VersionUtils.getPreviousVersion(Version.V_7_2_0))); request.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { @@ -77,7 +76,8 @@ public void testBwcSerialization() throws Exception { final CloseIndexRequest deserializedRequest = new CloseIndexRequest(); try (StreamInput in = out.bytes().streamInput()) { - in.setVersion(randomVersionBetween(random(), Version.V_6_4_0, VersionUtils.getPreviousVersion(Version.V_7_2_0))); + in.setVersion(VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), + VersionUtils.getPreviousVersion(Version.V_7_2_0))); deserializedRequest.readFrom(in); } assertEquals(sample.getParentTask(), deserializedRequest.getParentTask()); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java index e20559ca00561..f2bb3bd3cc03b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java @@ -31,7 +31,6 @@ import static org.elasticsearch.test.VersionUtils.getPreviousVersion; import static org.elasticsearch.test.VersionUtils.maxCompatibleVersion; import static org.elasticsearch.test.VersionUtils.randomCompatibleVersion; -import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.elasticsearch.test.VersionUtils.randomVersionBetween; public class JoinTaskExecutorTests extends ESTestCase { @@ -69,27 +68,25 @@ public void testPreventJoinClusterWithUnsupportedIndices() { public void testPreventJoinClusterWithUnsupportedNodeVersions() { DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); - final Version version = randomVersion(random()); + final Version version = randomCompatibleVersion(random(), Version.CURRENT); builder.add(new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), version)); builder.add(new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), randomCompatibleVersion(random(), version))); DiscoveryNodes nodes = builder.build(); final Version maxNodeVersion = nodes.getMaxNodeVersion(); final Version minNodeVersion = nodes.getMinNodeVersion(); - if (maxNodeVersion.onOrAfter(Version.V_7_0_0)) { - final Version tooLow = getPreviousVersion(maxNodeVersion.minimumCompatibilityVersion()); - expectThrows(IllegalStateException.class, () -> { - if (randomBoolean()) { - JoinTaskExecutor.ensureNodesCompatibility(tooLow, nodes); - } else { - JoinTaskExecutor.ensureNodesCompatibility(tooLow, minNodeVersion, maxNodeVersion); - } - }); - } - Version oldMajor = Version.V_6_4_0.minimumCompatibilityVersion(); - expectThrows(IllegalStateException.class, () -> JoinTaskExecutor.ensureMajorVersionBarrier(oldMajor, minNodeVersion)); + final Version tooLow = getPreviousVersion(maxNodeVersion.minimumCompatibilityVersion()); + expectThrows(IllegalStateException.class, () -> { + if (randomBoolean()) { + JoinTaskExecutor.ensureNodesCompatibility(tooLow, nodes); + } else { + JoinTaskExecutor.ensureNodesCompatibility(tooLow, minNodeVersion, maxNodeVersion); + } + }); + Version oldMajor = minNodeVersion.minimumCompatibilityVersion(); + expectThrows(IllegalStateException.class, () -> JoinTaskExecutor.ensureMajorVersionBarrier(oldMajor, minNodeVersion)); final Version minGoodVersion = maxNodeVersion.major == minNodeVersion.major ? // we have to stick with the same major diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index edca517830833..5a2fe0233ef05 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.IndexableField; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; @@ -32,6 +33,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.io.UncheckedIOException; @@ -695,4 +697,44 @@ protected boolean forbidPrivateIndexSettings() { */ return false; } + + public void testReorderParent() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") + .startObject("nested1").field("type", "nested").endObject() + .endObject().endObject().endObject()); + + DocumentMapper docMapper = createIndex("test", + Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), + VersionUtils.randomIndexCompatibleVersion(random())).build()) + .mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + + assertThat(docMapper.hasNestedObjects(), equalTo(true)); + ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1"); + assertThat(nested1Mapper.nested().isNested(), equalTo(true)); + + ParsedDocument doc = docMapper.parse(new SourceToParse("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", "value") + .startArray("nested1") + .startObject() + .field("field1", "1") + .field("field2", "2") + .endObject() + .startObject() + .field("field1", "3") + .field("field2", "4") + .endObject() + .endArray() + .endObject()), + XContentType.JSON)); + + assertThat(doc.docs().size(), equalTo(3)); + assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString())); + assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("1")); + assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("2")); + assertThat(doc.docs().get(1).get("nested1.field1"), equalTo("3")); + assertThat(doc.docs().get(1).get("nested1.field2"), equalTo("4")); + assertThat(doc.docs().get(2).get("field"), equalTo("value")); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index 449d17a5b9bda..b2a8d40156f6b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -41,7 +41,6 @@ import org.apache.lucene.search.spans.SpanOrQuery; import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -670,11 +669,7 @@ public void testIndexPrefixIndexTypes() throws IOException { FieldMapper prefix = (FieldMapper) mapper.mappers().getMapper("field._index_prefix"); FieldType ft = prefix.fieldType; - if (indexService.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_4_0)) { - assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, ft.indexOptions()); - } else { - assertEquals(IndexOptions.DOCS, ft.indexOptions()); - } + assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, ft.indexOptions()); assertFalse(ft.storeTermVectors()); } @@ -691,11 +686,7 @@ public void testIndexPrefixIndexTypes() throws IOException { FieldMapper prefix = (FieldMapper) mapper.mappers().getMapper("field._index_prefix"); FieldType ft = prefix.fieldType; - if (indexService.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_4_0)) { - assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, ft.indexOptions()); - } else { - assertEquals(IndexOptions.DOCS, ft.indexOptions()); - } + assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, ft.indexOptions()); assertTrue(ft.storeTermVectorOffsets()); } @@ -712,11 +703,7 @@ public void testIndexPrefixIndexTypes() throws IOException { FieldMapper prefix = (FieldMapper) mapper.mappers().getMapper("field._index_prefix"); FieldType ft = prefix.fieldType; - if (indexService.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_4_0)) { - assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, ft.indexOptions()); - } else { - assertEquals(IndexOptions.DOCS, ft.indexOptions()); - } + assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, ft.indexOptions()); assertFalse(ft.storeTermVectorOffsets()); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java index 4c59e25804a55..163c730294867 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java @@ -38,7 +38,6 @@ import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.store.Directory; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.StreamOutput; @@ -193,22 +192,13 @@ public void testToQueryInnerTermQuery() throws IOException { final QueryShardContext context = createShardContext(); { Query query = new SpanMultiTermQueryBuilder(new PrefixQueryBuilder(fieldName, "foo")).toQuery(context); - if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_4_0)) { - assertThat(query, instanceOf(FieldMaskingSpanQuery.class)); - FieldMaskingSpanQuery fieldQuery = (FieldMaskingSpanQuery) query; - assertThat(fieldQuery.getMaskedQuery(), instanceOf(SpanTermQuery.class)); - assertThat(fieldQuery.getField(), equalTo("prefix_field")); - SpanTermQuery termQuery = (SpanTermQuery) fieldQuery.getMaskedQuery(); - assertThat(termQuery.getTerm().field(), equalTo("prefix_field._index_prefix")); - assertThat(termQuery.getTerm().text(), equalTo("foo")); - } else { - assertThat(query, instanceOf(SpanMultiTermQueryWrapper.class)); - SpanMultiTermQueryWrapper wrapper = (SpanMultiTermQueryWrapper) query; - assertThat(wrapper.getWrappedQuery(), instanceOf(PrefixQuery.class)); - PrefixQuery prefixQuery = (PrefixQuery) wrapper.getWrappedQuery(); - assertThat(prefixQuery.getField(), equalTo("prefix_field")); - assertThat(prefixQuery.getPrefix().text(), equalTo("foo")); - } + assertThat(query, instanceOf(FieldMaskingSpanQuery.class)); + FieldMaskingSpanQuery fieldQuery = (FieldMaskingSpanQuery) query; + assertThat(fieldQuery.getMaskedQuery(), instanceOf(SpanTermQuery.class)); + assertThat(fieldQuery.getField(), equalTo("prefix_field")); + SpanTermQuery termQuery = (SpanTermQuery) fieldQuery.getMaskedQuery(); + assertThat(termQuery.getTerm().field(), equalTo("prefix_field._index_prefix")); + assertThat(termQuery.getTerm().text(), equalTo("foo")); } { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java index 4c8a558682b13..c39b37373ea13 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java @@ -6,12 +6,9 @@ package org.elasticsearch.license; import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.rest.RestStatus; -import java.util.stream.StreamSupport; - public class LicenseUtils { public static final String EXPIRED_FEATURE_METADATA = "es.license.expired.feature"; @@ -58,13 +55,6 @@ public static boolean signatureNeedsUpdate(License license, DiscoveryNodes curre public static int compatibleLicenseVersion(DiscoveryNodes currentNodes) { assert License.VERSION_CRYPTO_ALGORITHMS == License.VERSION_CURRENT : "update this method when adding a new version"; - - if (StreamSupport.stream(currentNodes.spliterator(), false) - .allMatch(node -> node.getVersion().onOrAfter(Version.V_6_4_0))) { - // License.VERSION_CRYPTO_ALGORITHMS was introduced in 6.4.0 - return License.VERSION_CRYPTO_ALGORITHMS; - } else { - return License.VERSION_START_DATE; - } + return License.VERSION_CRYPTO_ALGORITHMS; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java index 17de9dfc3522c..b71ca63e3218f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; @@ -185,9 +184,7 @@ public JobStats(StreamInput in) throws IOException { node = in.readOptionalWriteable(DiscoveryNode::new); assignmentExplanation = in.readOptionalString(); openTime = in.readOptionalTimeValue(); - if (in.getVersion().onOrAfter(Version.V_6_4_0)) { - forecastStats = in.readOptionalWriteable(ForecastStats::new); - } + forecastStats = in.readOptionalWriteable(ForecastStats::new); } public String getJobId() { @@ -275,9 +272,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(node); out.writeOptionalString(assignmentExplanation); out.writeOptionalTimeValue(openTime); - if (out.getVersion().onOrAfter(Version.V_6_4_0)) { - out.writeOptionalWriteable(forecastStats); - } + out.writeOptionalWriteable(forecastStats); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java index 25cd0cffe7b39..8ee63f6c11ea2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.config; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -26,8 +25,6 @@ public class DetectionRule implements ToXContentObject, Writeable { - public static final Version VERSION_INTRODUCED = Version.V_6_4_0; - public static final ParseField DETECTION_RULE_FIELD = new ParseField("detection_rule"); public static final ParseField ACTIONS_FIELD = new ParseField("actions"); public static final ParseField SCOPE_FIELD = new ParseField("scope"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java index b27149ef412a5..4903a1383bcdd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java @@ -246,11 +246,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - if (out.getVersion().onOrAfter(DetectionRule.VERSION_INTRODUCED)) { - out.writeList(rules); - } else { - out.writeList(Collections.emptyList()); - } + out.writeList(rules); out.writeInt(detectorIndex); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java index f2be3315b4dc7..4c60a4795e050 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.config; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -76,20 +75,14 @@ private MlFilter(String id, String description, SortedSet items) { public MlFilter(StreamInput in) throws IOException { id = in.readString(); - if (in.getVersion().onOrAfter(Version.V_6_4_0)) { - description = in.readOptionalString(); - } else { - description = null; - } + description = in.readOptionalString(); items = new TreeSet<>(Arrays.asList(in.readStringArray())); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); - if (out.getVersion().onOrAfter(Version.V_6_4_0)) { - out.writeOptionalString(description); - } + out.writeOptionalString(description); out.writeStringArray(items.toArray(new String[items.size()])); } @@ -201,4 +194,4 @@ public MlFilter build() { return new MlFilter(id, description, items); } } -} \ No newline at end of file +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java index 576bed5dcea2f..4b25b456c15b4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.results; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -78,9 +77,7 @@ public CategoryDefinition(StreamInput in) throws IOException { regex = in.readString(); maxMatchingLength = in.readLong(); examples = new TreeSet<>(in.readStringList()); - if (in.getVersion().onOrAfter(Version.V_6_4_0)) { - grokPattern = in.readOptionalString(); - } + grokPattern = in.readOptionalString(); } @Override @@ -91,9 +88,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(regex); out.writeLong(maxMatchingLength); out.writeStringCollection(examples); - if (out.getVersion().onOrAfter(Version.V_6_4_0)) { - out.writeOptionalString(grokPattern); - } + out.writeOptionalString(grokPattern); } public String getJobId() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index d2aea878b0f50..86d6fe3dfe59e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -45,7 +45,6 @@ import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; -import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; @@ -182,14 +181,6 @@ static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String j continue; } - if (jobHasRules(job) && node.getVersion().before(DetectionRule.VERSION_INTRODUCED)) { - String reason = "Not opening job [" + jobId + "] on node [" + nodeNameAndVersion(node) + "], because jobs using " + - "custom_rules require a node of version [" + DetectionRule.VERSION_INTRODUCED + "] or higher"; - logger.trace(reason); - reasons.add(reason); - continue; - } - long numberOfAssignedJobs = 0; int numberOfAllocatingJobs = 0; long assignedJobMemory = 0; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java index 5f1a4050d1f3e..a35b9d0968134 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java @@ -402,36 +402,6 @@ public void testSelectLeastLoadedMlNode_noNodesMatchingModelSnapshotMinVersion() assertNull(result.getExecutorNode()); } - public void testSelectLeastLoadedMlNode_jobWithRulesButNoNodeMeetsRequiredVersion() { - Map nodeAttr = new HashMap<>(); - nodeAttr.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "10"); - nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); - Version version = Version.fromString("6.3.0"); - DiscoveryNodes nodes = DiscoveryNodes.builder() - .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), - nodeAttr, Collections.emptySet(), version)) - .add(new DiscoveryNode("_node_name2", "_node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), - nodeAttr, Collections.emptySet(), version)) - .build(); - - PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - addJobTask("job_with_rules", "_node_id1", null, tasksBuilder); - PersistentTasksCustomMetaData tasks = tasksBuilder.build(); - - ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); - MetaData.Builder metaData = MetaData.builder(); - cs.nodes(nodes); - metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); - cs.metaData(metaData); - - Job job = jobWithRules("job_with_rules"); - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_with_rules", job, cs.build(), 10, 2, 30, memoryTracker, - isMemoryTrackerRecentlyRefreshed, logger); - assertThat(result.getExplanation(), containsString( - "because jobs using custom_rules require a node of version [6.4.0] or higher")); - assertNull(result.getExecutorNode()); - } - public void testSelectLeastLoadedMlNode_jobWithRulesAndNodeMeetsRequiredVersion() { Map nodeAttr = new HashMap<>(); nodeAttr.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "10"); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java index 3f49609953ea9..19f241440c438 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.PutRollupJobAction; @@ -303,7 +304,7 @@ public void testJobAlreadyInMapping() { doAnswer(invocation -> { GetMappingsResponse response = mock(GetMappingsResponse.class); Map m = new HashMap<>(2); - m.put(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD, Version.V_6_4_0); + m.put(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD, VersionUtils.randomIndexCompatibleVersion(random())); m.put(RollupField.ROLLUP_META, Collections.singletonMap(job.getConfig().getId(), job.getConfig())); MappingMetaData meta = new MappingMetaData(RollupField.TYPE_NAME, @@ -344,7 +345,7 @@ public void testAddJobToMapping() { doAnswer(invocation -> { GetMappingsResponse response = mock(GetMappingsResponse.class); Map m = new HashMap<>(2); - m.put(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD, Version.V_6_4_0); + m.put(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD, VersionUtils.randomIndexCompatibleVersion(random())); m.put(RollupField.ROLLUP_META, Collections.singletonMap(unrelatedJob.getId(), unrelatedJob)); MappingMetaData meta = new MappingMetaData(RollupField.TYPE_NAME, From 0db0e1330c55b6c5a4c529d6a4f6ecdca7dc4449 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 28 May 2019 15:58:03 +0100 Subject: [PATCH 149/224] [ML Data Frame] Set DF task state when stopping (#42516) Set the state to stopped prior to persisting --- ...FrameTransformPersistentTasksExecutor.java | 13 +------------ .../transforms/DataFrameTransformTask.java | 19 +++++++------------ .../test/data_frame/transforms_start_stop.yml | 4 ++-- 3 files changed, 10 insertions(+), 26 deletions(-) diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java index 9ed8da61d8feb..443d499dfefd1 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java @@ -30,7 +30,6 @@ import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; import org.elasticsearch.xpack.dataframe.DataFrame; @@ -223,18 +222,8 @@ private void startTask(DataFrameTransformTask buildTask, DataFrameTransformTask.ClientDataFrameIndexerBuilder indexerBuilder, Long previousCheckpoint, ActionListener listener) { - // If we are stopped, and it is an initial run, this means we have never been started, - // attempt to start the task - buildTask.initializeIndexer(indexerBuilder); - // TODO isInitialRun is false after relocation?? - if (buildTask.getState().getTaskState().equals(DataFrameTransformTaskState.STOPPED) && buildTask.isInitialRun()) { - logger.info("Data frame transform [{}] created.", buildTask.getTransformId()); - buildTask.start(previousCheckpoint, listener); - } else { - logger.debug("No need to start task. Its current state is: {}", buildTask.getState().getIndexerState()); - listener.onResponse(new StartDataFrameTransformTaskAction.Response(true)); - } + buildTask.start(previousCheckpoint, listener); } @Override diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index 926f233c454d1..13deab6748c94 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -174,13 +174,8 @@ public long getInProgressCheckpoint() { } } - public boolean isStopped() { - IndexerState currentState = getIndexer() == null ? initialIndexerState : getIndexer().getState(); - return currentState.equals(IndexerState.STOPPED); - } - - boolean isInitialRun() { - return getIndexer() != null && getIndexer().initialRun(); + public void setTaskStateStopped() { + taskState.set(DataFrameTransformTaskState.STOPPED); } /** @@ -235,11 +230,9 @@ public synchronized void start(Long startingCheckpoint, ActionListener public synchronized void stop() { if (getIndexer() == null) { - return; - } - // taskState is initialized as STOPPED and is updated in tandem with the indexerState - // Consequently, if it is STOPPED, we consider the whole task STOPPED. - if (taskState.get() == DataFrameTransformTaskState.STOPPED) { + // If there is no indexer the task has not been triggered + // but it still needs to be stopped and removed + shutdown(); return; } @@ -609,6 +602,8 @@ protected void onFinish(ActionListener listener) { protected void onStop() { auditor.info(transformConfig.getId(), "Indexer has stopped"); logger.info("Data frame transform [{}] indexer has stopped", transformConfig.getId()); + + transformTask.setTaskStateStopped(); transformsConfigManager.putOrUpdateTransformStats( new DataFrameTransformStateAndStats(transformId, transformTask.getState(), getStats(), DataFrameTransformCheckpointingInfo.EMPTY), // TODO should this be null diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml index a475c3ceadca6..4909761c5633b 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml @@ -114,8 +114,8 @@ teardown: transform_id: "airline-transform-start-stop" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-start-stop" } -# - match: { transforms.0.state.indexer_state: "stopped" } -# - match: { transforms.0.state.task_state: "stopped" } + - match: { transforms.0.state.indexer_state: "stopped" } + - match: { transforms.0.state.task_state: "stopped" } - do: data_frame.start_data_frame_transform: From 905902c3257c0548a5ed268d5851d4af07a51540 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 28 May 2019 09:04:02 -0700 Subject: [PATCH 150/224] [DOCS] Reorg monitoring configuration for re-use (#42547) --- .../configuring-metricbeat.asciidoc | 121 ++++++++++-------- 1 file changed, 68 insertions(+), 53 deletions(-) diff --git a/docs/reference/monitoring/configuring-metricbeat.asciidoc b/docs/reference/monitoring/configuring-metricbeat.asciidoc index df578e88da614..e337c5bf7d345 100644 --- a/docs/reference/monitoring/configuring-metricbeat.asciidoc +++ b/docs/reference/monitoring/configuring-metricbeat.asciidoc @@ -17,6 +17,8 @@ image::monitoring/images/metricbeat.png[Example monitoring architecture] To learn about monitoring in general, see {stack-ov}/xpack-monitoring.html[Monitoring the {stack}]. +//NOTE: The tagged regions are re-used in the Stack Overview. + . Enable the collection of monitoring data. Set `xpack.monitoring.collection.enabled` to `true` on each node in the production cluster. By default, it is is disabled (`false`). @@ -71,13 +73,13 @@ PUT _cluster/settings Leave `xpack.monitoring.enabled` set to its default value (`true`). -- -. On each {es} node in the production cluster: - -.. {metricbeat-ref}/metricbeat-installation.html[Install {metricbeat}]. +. {metricbeat-ref}/metricbeat-installation.html[Install {metricbeat}] on each +{es} node in the production cluster. -.. Enable the {es} module in {metricbeat}. + +. Enable the {es} {xpack} module in {metricbeat} on each {es} node. + + -- +// tag::enable-es-module[] For example, to enable the default configuration in the `modules.d` directory, run the following command: @@ -89,39 +91,57 @@ metricbeat modules enable elasticsearch-xpack For more information, see {metricbeat-ref}/configuration-metricbeat.html[Specify which modules to run] and {metricbeat-ref}/metricbeat-module-elasticsearch.html[{es} module]. --- - -.. By default the module will collect {es} monitoring metrics from `http://localhost:9200`. -If the local {es} node has a different address, you must specify it via the `hosts` setting -in the `modules.d/elasticsearch-xpack.yml` file. -.. If Elastic {security-features} are enabled, you must also provide a user ID -and password so that {metricbeat} can collect metrics successfully. - -... Create a user on the production cluster that has the -{stack-ov}/built-in-roles.html[`remote_monitoring_collector` built-in role]. -Alternatively, use the {stack-ov}/built-in-users.html[`remote_monitoring_user` built-in user]. +// end::enable-es-module[] +-- -... Add the `username` and `password` settings to the {es} module configuration -file. +. Configure the {es} {xpack} module in {metricbeat} on each {es} node. + + -- -For example, add the following settings in the `modules.d/elasticsearch-xpack.yml` file: +// tag::configure-es-module[] +The `modules.d/elasticsearch-xpack.yml` file contains the following settings: [source,yaml] ---------------------------------- -- module: elasticsearch - ... - username: remote_monitoring_user - password: YOUR_PASSWORD + - module: elasticsearch + metricsets: + - ccr + - cluster_stats + - index + - index_recovery + - index_summary + - ml_job + - node_stats + - shard + period: 10s + hosts: ["http://localhost:9200"] + #username: "user" + #password: "secret" + xpack.enabled: true ---------------------------------- --- -.. If you configured {es} to use <>, -you must access it via HTTPS. For example, use a `hosts` setting like -`https://localhost:9200` in the `modules.d/elasticsearch-xpack.yml` file. +By default, the module collects {es} monitoring metrics from +`http://localhost:9200`. If that host and port number are not correct, you must +update the `hosts` setting. If you configured {es} to use encrypted +communications, you must access it via HTTPS. For example, use a `hosts` setting +like `https://localhost:9200`. +// end::configure-es-module[] -.. Identify where to send the monitoring data. + +// tag::remote-monitoring-user[] +If Elastic {security-features} are enabled, you must also provide a user ID +and password so that {metricbeat} can collect metrics successfully: + +.. Create a user on the production cluster that has the +{stack-ov}/built-in-roles.html[`remote_monitoring_collector` built-in role]. +Alternatively, use the +{stack-ov}/built-in-users.html[`remote_monitoring_user` built-in user]. + +.. Add the `username` and `password` settings to the {es} module configuration +file. +// end::remote-monitoring-user[] +-- + +. Identify where to send the monitoring data. + + -- TIP: In production environments, we strongly recommend using a separate cluster @@ -136,48 +156,43 @@ configuration file (`metricbeat.yml`): [source,yaml] ---------------------------------- output.elasticsearch: + # Array of hosts to connect to. hosts: ["http://es-mon-1:9200", "http://es-mon2:9200"] <1> + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" ---------------------------------- <1> In this example, the data is stored on a monitoring cluster with nodes `es-mon-1` and `es-mon-2`. +If you configured the monitoring cluster to use encrypted communications, you +must access it via HTTPS. For example, use a `hosts` setting like +`https://es-mon-1:9200`. + IMPORTANT: The {es} {monitor-features} use ingest pipelines, therefore the cluster that stores the monitoring data must have at least one <>. -For more information about these configuration options, see -{metricbeat-ref}/elasticsearch-output.html[Configure the {es} output]. --- - -.. If {es} {security-features} are enabled on the monitoring cluster, you -must provide a valid user ID and password so that {metricbeat} can send metrics -successfully. +If {es} {security-features} are enabled on the monitoring cluster, you must +provide a valid user ID and password so that {metricbeat} can send metrics +successfully: -... Create a user on the monitoring cluster that has the +.. Create a user on the monitoring cluster that has the {stack-ov}/built-in-roles.html[`remote_monitoring_agent` built-in role]. Alternatively, use the {stack-ov}/built-in-users.html[`remote_monitoring_user` built-in user]. -... Add the `username` and `password` settings to the {es} output information in -the {metricbeat} configuration file (`metricbeat.yml`): -+ --- -[source,yaml] ----------------------------------- -output.elasticsearch: - ... - username: remote_monitoring_user - password: YOUR_PASSWORD ----------------------------------- --- +.. Add the `username` and `password` settings to the {es} output information in +the {metricbeat} configuration file. -.. If you configured the monitoring cluster to use -<>, you must access it via -HTTPS. For example, use a `hosts` setting like `https://es-mon-1:9200` in the -`metricbeat.yml` file. +For more information about these configuration options, see +{metricbeat-ref}/elasticsearch-output.html[Configure the {es} output]. +-- -. <>. +. <> on each node. -. {metricbeat-ref}/metricbeat-starting.html[Start {metricbeat}]. +. {metricbeat-ref}/metricbeat-starting.html[Start {metricbeat}] on each node. . {kibana-ref}/monitoring-data.html[View the monitoring data in {kib}]. From 492efa7729fc826734807aefdd191a4b850f0f03 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Tue, 28 May 2019 18:37:17 +0200 Subject: [PATCH 151/224] Remove suppresions for "unchecked" for hamcrest varargs methods (#41528) In hamcrest 2.1 warnings for unchecked varargs were fixed by hamcrest using @SafeVarargs for those matchers where this warning occurred. This PR is aimed to remove these annotations when Matchers.contains ,Matchers.containsInAnyOrder or Matchers.hasItems was used --- .../java/org/elasticsearch/client/BulkProcessorIT.java | 2 -- .../client/BulkRequestWithGlobalParametersIT.java | 5 ----- .../elasticsearch/common/logging/JsonLoggerTests.java | 2 -- .../xpack/restart/FullClusterRestartIT.java | 1 - .../security/authc/ldap/SearchGroupsResolverTests.java | 1 - .../authc/ldap/ActiveDirectoryGroupsResolverTests.java | 1 - .../authc/ldap/ActiveDirectorySessionFactoryTests.java | 9 --------- .../authc/ldap/UserAttributeGroupsResolverTests.java | 3 --- 8 files changed, 24 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java index 762e927551b8b..2aa9457bcd897 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java @@ -291,7 +291,6 @@ public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception assertMultiGetResponse(highLevelClient().mget(multiGetRequest, RequestOptions.DEFAULT), testDocs); } - @SuppressWarnings("unchecked") public void testGlobalParametersAndSingleRequest() throws Exception { createIndexWithMultipleShards("test"); @@ -326,7 +325,6 @@ public void testGlobalParametersAndSingleRequest() throws Exception { assertThat(blogs, everyItem(hasProperty(fieldFromSource("fieldNameXYZ"), equalTo("valueXYZ")))); } - @SuppressWarnings("unchecked") public void testGlobalParametersAndBulkProcessor() throws Exception { createIndexWithMultipleShards("test"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java index 3020eb0329b5c..dc49e6f88a6e4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java @@ -44,7 +44,6 @@ public class BulkRequestWithGlobalParametersIT extends ESRestHighLevelClientTestCase { - @SuppressWarnings("unchecked") public void testGlobalPipelineOnBulkRequest() throws IOException { createFieldAddingPipleine("xyz", "fieldNameXYZ", "valueXYZ"); @@ -83,7 +82,6 @@ public void testPipelineOnRequestOverridesGlobalPipeline() throws IOException { assertThat(hits, everyItem(hasProperty(fieldFromSource("fieldXYZ"), nullValue()))); } - @SuppressWarnings("unchecked") public void testMixPipelineOnRequestAndGlobal() throws IOException { createFieldAddingPipleine("globalId", "fieldXYZ", "valueXYZ"); createFieldAddingPipleine("perIndexId", "someNewField", "someValue"); @@ -153,7 +151,6 @@ public void testGlobalType() throws IOException { assertThat(hits, everyItem(hasType("global_type"))); } - @SuppressWarnings("unchecked") public void testTypeGlobalAndPerRequest() throws IOException { BulkRequest request = new BulkRequest(null, "global_type"); request.add(new IndexRequest("index1", "local_type", "1") @@ -171,7 +168,6 @@ public void testTypeGlobalAndPerRequest() throws IOException { .and(hasType("global_type")))); } - @SuppressWarnings("unchecked") public void testGlobalRouting() throws IOException { createIndexWithMultipleShards("index"); BulkRequest request = new BulkRequest(null); @@ -189,7 +185,6 @@ public void testGlobalRouting() throws IOException { assertThat(hits, containsInAnyOrder(hasId("1"), hasId("2"))); } - @SuppressWarnings("unchecked") public void testMixLocalAndGlobalRouting() throws IOException { BulkRequest request = new BulkRequest(null); request.routing("globalRouting"); diff --git a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java index bbb20737c4708..b62e1a78e82ca 100644 --- a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java +++ b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -67,7 +67,6 @@ public void tearDown() throws Exception { super.tearDown(); } - @SuppressWarnings("unchecked") public void testJsonLayout() throws IOException { final Logger testLogger = LogManager.getLogger("test"); @@ -90,7 +89,6 @@ public void testJsonLayout() throws IOException { } } - @SuppressWarnings("unchecked") public void testPrefixLoggerInJson() throws IOException { Logger shardIdLogger = Loggers.getLogger("shardIdLogger", ShardId.fromString("[indexName][123]")); shardIdLogger.info("This is an info message with a shardId"); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 3ac0b20f95d0f..aa0788bd2b426 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -277,7 +277,6 @@ private String loadWatch(String watch) throws IOException { return StreamsUtils.copyToStringFromClasspath("/org/elasticsearch/xpack/restart/" + watch); } - @SuppressWarnings("unchecked") private void assertOldTemplatesAreDeleted() throws IOException { Map templates = entityAsMap(client().performRequest(new Request("GET", "/_template"))); assertThat(templates.keySet(), not(hasItems(is("watches"), startsWith("watch-history"), is("triggered_watches")))); diff --git a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverTests.java b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverTests.java index 036cf8ad0db33..f24bcface06bb 100644 --- a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverTests.java +++ b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverTests.java @@ -23,7 +23,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -@SuppressWarnings("unchecked") public class SearchGroupsResolverTests extends GroupsResolverTestCase { private static final String BRUCE_BANNER_DN = "uid=hulk,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java index 1a4fd0242dbaa..7fbbd217ae90b 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java @@ -35,7 +35,6 @@ public void setReferralFollowing() { } @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35738") - @SuppressWarnings("unchecked") public void testResolveSubTree() throws Exception { Settings settings = Settings.builder() .put("xpack.security.authc.realms.active_directory.ad.group_search.scope", LdapSearchScope.SUB_TREE) diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java index 73e1df5dd08bd..3dc432b482bd6 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java @@ -66,7 +66,6 @@ public boolean enableWarningsCheck() { return false; } - @SuppressWarnings("unchecked") public void testAdAuth() throws Exception { RealmConfig config = configureRealm("ad-test", buildAdSettings(AD_LDAP_URL, AD_DOMAIN, false)); try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { @@ -101,7 +100,6 @@ private RealmConfig configureRealm(String name, Settings settings) { return new RealmConfig(identifier, mergedSettings, env, new ThreadContext(globalSettings)); } - @SuppressWarnings("unchecked") public void testNetbiosAuth() throws Exception { final String adUrl = randomFrom(AD_LDAP_URL, AD_LDAP_GC_URL); RealmConfig config = configureRealm("ad-test", buildAdSettings(adUrl, AD_DOMAIN, false)); @@ -139,7 +137,6 @@ public void testAdAuthAvengers() throws Exception { } } - @SuppressWarnings("unchecked") public void testAuthenticate() throws Exception { Settings settings = buildAdSettings(REALM_ID, AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.ONE_LEVEL, false); @@ -163,7 +160,6 @@ public void testAuthenticate() throws Exception { } } - @SuppressWarnings("unchecked") public void testAuthenticateBaseUserSearch() throws Exception { Settings settings = buildAdSettings(REALM_ID, AD_LDAP_URL, AD_DOMAIN, "CN=Bruce Banner, CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.BASE, false); @@ -208,7 +204,6 @@ public void testAuthenticateBaseGroupSearch() throws Exception { } } - @SuppressWarnings("unchecked") public void testAuthenticateWithUserPrincipalName() throws Exception { Settings settings = buildAdSettings(REALM_ID, AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.ONE_LEVEL, false); @@ -229,7 +224,6 @@ public void testAuthenticateWithUserPrincipalName() throws Exception { } } - @SuppressWarnings("unchecked") public void testAuthenticateWithSAMAccountName() throws Exception { Settings settings = buildAdSettings(REALM_ID, AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.ONE_LEVEL, false); @@ -251,7 +245,6 @@ public void testAuthenticateWithSAMAccountName() throws Exception { } } - @SuppressWarnings("unchecked") public void testCustomUserFilter() throws Exception { Settings settings = Settings.builder() .put(buildAdSettings(REALM_ID, AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", @@ -275,7 +268,6 @@ public void testCustomUserFilter() throws Exception { } - @SuppressWarnings("unchecked") public void testStandardLdapConnection() throws Exception { String groupSearchBase = "DC=ad,DC=test,DC=elasticsearch,DC=com"; String userTemplate = "CN={0},CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; @@ -341,7 +333,6 @@ public void testHandlingLdapReferralErrors() throws Exception { } } - @SuppressWarnings("unchecked") public void testStandardLdapWithAttributeGroups() throws Exception { String userTemplate = "CN={0},CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; Settings settings = LdapTestCase.buildLdapSettings(new String[]{AD_LDAP_URL}, userTemplate, false); diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java index 24f0ecace67b9..38adbbe019048 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java @@ -29,7 +29,6 @@ public class UserAttributeGroupsResolverTests extends GroupsResolverTestCase { public static final String BRUCE_BANNER_DN = "cn=Bruce Banner,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; private static final RealmConfig.RealmIdentifier REALM_ID = new RealmConfig.RealmIdentifier("ldap", "realm1"); - @SuppressWarnings("unchecked") public void testResolve() throws Exception { //falling back on the 'memberOf' attribute UserAttributeGroupsResolver resolver = new UserAttributeGroupsResolver(config(REALM_ID, Settings.EMPTY)); @@ -42,7 +41,6 @@ public void testResolve() throws Exception { containsString("Philanthropists"))); } - @SuppressWarnings("unchecked") public void testResolveFromPreloadedAttributes() throws Exception { SearchRequest preSearch = new SearchRequest(BRUCE_BANNER_DN, SearchScope.BASE, LdapUtils.OBJECT_CLASS_PRESENCE_FILTER, "memberOf"); final Collection attributes = ldapConnection.searchForEntry(preSearch).getAttributes(); @@ -57,7 +55,6 @@ public void testResolveFromPreloadedAttributes() throws Exception { containsString("Philanthropists"))); } - @SuppressWarnings("unchecked") public void testResolveCustomGroupAttribute() throws Exception { Settings settings = Settings.builder() .put("user_group_attribute", "seeAlso") From f07b90f3c351fb352d2058229b50fbf94442c07d Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 28 May 2019 09:49:40 -0700 Subject: [PATCH 152/224] Remove support for chained multi-fields. (#42333) Follow-up to #41926, where we deprecated support for multi-fields within multi-fields. Addresses #41267. --- .../migration/migrate_8_0/mappings.asciidoc | 9 ++ .../index/mapper/TypeParsers.java | 18 ++-- .../mapper/ExternalFieldMapperTests.java | 89 ------------------- .../ExternalValuesMapperIntegrationIT.java | 10 +-- .../index/mapper/TypeParsersTests.java | 38 +++++--- 5 files changed, 52 insertions(+), 112 deletions(-) diff --git a/docs/reference/migration/migrate_8_0/mappings.asciidoc b/docs/reference/migration/migrate_8_0/mappings.asciidoc index 371e9fc44c415..16e75473885c6 100644 --- a/docs/reference/migration/migrate_8_0/mappings.asciidoc +++ b/docs/reference/migration/migrate_8_0/mappings.asciidoc @@ -14,3 +14,12 @@ The number of completion contexts within a single completion field has been limited to 10. + +[float] +==== Defining multi-fields within multi-fields + +Previously, it was possible to define a multi-field within a multi-field. +Defining chained multi-fields was deprecated in 7.3 and is now no longer +supported. To migrate the mappings, all instances of `fields` that occur within +a `fields` block should be removed, either by flattening the chained `fields` +blocks into a single level, or by switching to `copy_to` if appropriate. \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java index 9848a23cac11b..12c80361a855c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java @@ -22,6 +22,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.lucene.index.IndexOptions; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -219,11 +220,18 @@ public static boolean parseMultiField(FieldMapper.Builder builder, String name, String propName, Object propNode) { if (propName.equals("fields")) { if (parserContext.isWithinMultiField()) { - deprecationLogger.deprecatedAndMaybeLog("multifield_within_multifield", "At least one multi-field, [" + name + "], was " + - "encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated and will " + - "no longer be supported in 8.0. To resolve the issue, all instances of [fields] that occur within a [fields] block " + - "should be removed from the mappings, either by flattening the chained [fields] blocks into a single level, or " + - "switching to [copy_to] if appropriate."); + // For indices created prior to 8.0, we only emit a deprecation warning and do not fail type parsing. This is to + // maintain the backwards-compatibility guarantee that we can always load indexes from the previous major version. + if (parserContext.indexVersionCreated().before(Version.V_8_0_0)) { + deprecationLogger.deprecatedAndMaybeLog("multifield_within_multifield", "At least one multi-field, [" + name + "], " + + "was encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated " + + "and is not supported for indices created in 8.0 and later. To migrate the mappings, all instances of [fields] " + + "that occur within a [fields] block should be removed from the mappings, either by flattening the chained " + + "[fields] blocks into a single level, or switching to [copy_to] if appropriate."); + } else { + throw new IllegalArgumentException("Encountered a multi-field [" + name + "] which itself contains a multi-field. " + + "Defining chained multi-fields is not supported."); + } } parserContext = parserContext.createMultiFieldContext(parserContext); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java index e5d3040f7a3bc..5515603db3476 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; @@ -132,12 +131,6 @@ public void testExternalValuesWithMultifield() throws Exception { .startObject("field") .field("type", "text") .field("store", true) - .startObject("fields") - .startObject("raw") - .field("type", "keyword") - .field("store", true) - .endObject() - .endObject() .endObject() .endObject() .endObject() @@ -164,87 +157,5 @@ public void testExternalValuesWithMultifield() throws Exception { IndexableField field = doc.rootDoc().getField("field.field"); assertThat(field, notNullValue()); assertThat(field.stringValue(), is("foo")); - - IndexableField raw = doc.rootDoc().getField("field.field.raw"); - - assertThat(raw, notNullValue()); - assertThat(raw.binaryValue(), is(new BytesRef("foo"))); - - assertWarnings("At least one multi-field, [field], was " + - "encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated and will " + - "no longer be supported in 8.0. To resolve the issue, all instances of [fields] that occur within a [fields] block " + - "should be removed from the mappings, either by flattening the chained [fields] blocks into a single level, or " + - "switching to [copy_to] if appropriate."); - } - - public void testExternalValuesWithMultifieldTwoLevels() throws Exception { - IndexService indexService = createIndex("test"); - Map mapperParsers = new HashMap<>(); - mapperParsers.put(ExternalMapperPlugin.EXTERNAL, new ExternalMapper.TypeParser(ExternalMapperPlugin.EXTERNAL, "foo")); - mapperParsers.put(ExternalMapperPlugin.EXTERNAL_BIS, new ExternalMapper.TypeParser(ExternalMapperPlugin.EXTERNAL, "bar")); - mapperParsers.put(TextFieldMapper.CONTENT_TYPE, new TextFieldMapper.TypeParser()); - MapperRegistry mapperRegistry = new MapperRegistry(mapperParsers, Collections.emptyMap(), MapperPlugin.NOOP_FIELD_FILTER); - - Supplier queryShardContext = () -> { - return indexService.newQueryShardContext(0, null, () -> { throw new UnsupportedOperationException(); }, null); - }; - DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), - indexService.xContentRegistry(), indexService.similarityService(), mapperRegistry, queryShardContext); - - DocumentMapper documentMapper = parser.parse("type", new CompressedXContent( - Strings - .toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") - .startObject("field") - .field("type", ExternalMapperPlugin.EXTERNAL) - .startObject("fields") - .startObject("field") - .field("type", "text") - .startObject("fields") - .startObject("generated") - .field("type", ExternalMapperPlugin.EXTERNAL_BIS) - .endObject() - .startObject("raw") - .field("type", "text") - .endObject() - .endObject() - .endObject() - .startObject("raw") - .field("type", "text") - .endObject() - .endObject() - .endObject() - .endObject().endObject().endObject()))); - - ParsedDocument doc = documentMapper.parse(new SourceToParse("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() - .startObject() - .field("field", "1234") - .endObject()), - XContentType.JSON)); - - assertThat(doc.rootDoc().getField("field.bool"), notNullValue()); - assertThat(doc.rootDoc().getField("field.bool").stringValue(), is("T")); - - assertThat(doc.rootDoc().getField("field.point"), notNullValue()); - - assertThat(doc.rootDoc().getField("field.shape"), notNullValue()); - - assertThat(doc.rootDoc().getField("field.field"), notNullValue()); - assertThat(doc.rootDoc().getField("field.field").stringValue(), is("foo")); - - assertThat(doc.rootDoc().getField("field.field.generated.generated"), notNullValue()); - assertThat(doc.rootDoc().getField("field.field.generated.generated").stringValue(), is("bar")); - - assertThat(doc.rootDoc().getField("field.field.raw"), notNullValue()); - assertThat(doc.rootDoc().getField("field.field.raw").stringValue(), is("foo")); - - assertThat(doc.rootDoc().getField("field.raw"), notNullValue()); - assertThat(doc.rootDoc().getField("field.raw").stringValue(), is("foo")); - - assertWarnings("At least one multi-field, [field], was " + - "encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated and will " + - "no longer be supported in 8.0. To resolve the issue, all instances of [fields] that occur within a [fields] block " + - "should be removed from the mappings, either by flattening the chained [fields] blocks into a single level, or " + - "switching to [copy_to] if appropriate."); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalValuesMapperIntegrationIT.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalValuesMapperIntegrationIT.java index 6d47e4a784e06..7e7764d9514fe 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalValuesMapperIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalValuesMapperIntegrationIT.java @@ -139,14 +139,8 @@ public void testExternalValuesWithMultifield() throws Exception { .field("type", ExternalMapperPlugin.EXTERNAL_UPPER) .startObject("fields") .startObject("g") - .field("type", "text") + .field("type", "keyword") .field("store", true) - .startObject("fields") - .startObject("raw") - .field("type", "keyword") - .field("store", true) - .endObject() - .endObject() .endObject() .endObject() .endObject() @@ -156,7 +150,7 @@ public void testExternalValuesWithMultifield() throws Exception { refresh(); SearchResponse response = client().prepareSearch("test-idx") - .setQuery(QueryBuilders.termQuery("f.g.raw", "FOO BAR")) + .setQuery(QueryBuilders.termQuery("f.g", "FOO BAR")) .execute().actionGet(); assertThat(response.getHits().getTotalHits().value, equalTo((long) 1)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java index 70f469b96370c..b0fbc3618ed2f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.Collections; @@ -48,6 +49,7 @@ import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_ANALYZER_NAME; import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_SEARCH_ANALYZER_NAME; import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_SEARCH_QUOTED_ANALYZER_NAME; +import static org.hamcrest.core.IsEqual.equalTo; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -179,19 +181,35 @@ public void testMultiFieldWithinMultiField() throws IOException { .endObject() .endObject(); + Mapper.TypeParser typeParser = new KeywordFieldMapper.TypeParser(); + + // For indices created prior to 8.0, we should only emit a warning and not fail parsing. Map fieldNode = XContentHelper.convertToMap( BytesReference.bytes(mapping), true, mapping.contentType()).v2(); - Mapper.TypeParser typeParser = new KeywordFieldMapper.TypeParser(); - Mapper.TypeParser.ParserContext parserContext = new Mapper.TypeParser.ParserContext("type", - null, null, type -> typeParser, Version.CURRENT, null); - - TypeParsers.parseField(builder, "some-field", fieldNode, parserContext); - assertWarnings("At least one multi-field, [sub-field], was " + - "encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated and will " + - "no longer be supported in 8.0. To resolve the issue, all instances of [fields] that occur within a [fields] block " + - "should be removed from the mappings, either by flattening the chained [fields] blocks into a single level, or " + - "switching to [copy_to] if appropriate."); + Version olderVersion = VersionUtils.randomPreviousCompatibleVersion(random(), Version.V_8_0_0); + Mapper.TypeParser.ParserContext olderContext = new Mapper.TypeParser.ParserContext("type", + null, null, type -> typeParser, olderVersion, null); + + TypeParsers.parseField(builder, "some-field", fieldNode, olderContext); + assertWarnings("At least one multi-field, [sub-field], " + + "was encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated " + + "and is not supported for indices created in 8.0 and later. To migrate the mappings, all instances of [fields] " + + "that occur within a [fields] block should be removed from the mappings, either by flattening the chained " + + "[fields] blocks into a single level, or switching to [copy_to] if appropriate."); + + // For indices created in 8.0 or later, we should throw an error. + Map fieldNodeCopy = XContentHelper.convertToMap( + BytesReference.bytes(mapping), true, mapping.contentType()).v2(); + + Version version = VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, Version.CURRENT); + Mapper.TypeParser.ParserContext context = new Mapper.TypeParser.ParserContext("type", + null, null, type -> typeParser, version, null); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> TypeParsers.parseField(builder, "some-field", fieldNodeCopy, context)); + assertThat(e.getMessage(), equalTo("Encountered a multi-field [sub-field] which itself contains a " + + "multi-field. Defining chained multi-fields is not supported.")); } private Analyzer createAnalyzerWithMode(String name, AnalysisMode mode) { From 7d2809597047cfb0cbda441070b513ae30d00405 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 28 May 2019 13:04:19 -0400 Subject: [PATCH 153/224] Lazily compute Java 8 home in reindex configuration (#42630) In the reindex from old tests we require Java 8. Today when configuring the reindex from old tests, we eagerly evalulate Java 8 home, which means that we require JAVA8_HOME to be set even if the reindex from old test tasks are not in the task graph. This is an onerous requirement if, for example, all that you want to do is build a distribution. This commit addresses this by making evaluation of Java 8 home lazy, so that it is only done and required if the reindex from old test tasks would be executed. --- modules/reindex/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index da184deedaa11..260c8dcc1df79 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -125,7 +125,7 @@ if (Os.isFamily(Os.FAMILY_WINDOWS)) { dependsOn unzip executable = new File(project.runtimeJavaHome, 'bin/java') env 'CLASSPATH', "${ -> project.configurations.oldesFixture.asPath }" - env 'JAVA_HOME', getJavaHome(it, 8) + env 'JAVA_HOME', "${ -> getJavaHome(it, 8)}" args 'oldes.OldElasticsearch', baseDir, unzip.temporaryDir, From 6e39433cd5377536bda3e09168f1872660188ac0 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 28 May 2019 19:09:09 +0200 Subject: [PATCH 154/224] Remove "nodes/0" folder prefix from data path (#42489) With the removal of node.max_local_storage_nodes, there is no need anymore to keep the data in subfolders indexed by a node ordinal. This commit makes it so that ES 8.0 will store data directly in $DATA_DIR instead of $DATA_DIR/nodes/$nodeOrdinal. Upon startup, Elasticsearch will check to see if there is data in the old location, and automatically move it to the new location. This automatic migration only works if $nodeOrdinal is 0, i.e., multiple node instances have not previously run on the same data path, which required for node.max_local_storage_nodes to explicitly be configured. --- docs/reference/commands/shard-tool.asciidoc | 10 +- .../migration/migrate_8_0/node.asciidoc | 22 +++ .../env/NodeEnvironmentEvilTests.java | 9 +- .../elasticsearch/env/NodeEnvironment.java | 170 +++++++++++++++--- .../RemoveCorruptedShardDataCommand.java | 5 +- .../RecoveryWithUnsupportedIndicesIT.java | 17 +- .../elasticsearch/env/NodeEnvironmentIT.java | 85 +++++++++ .../env/NodeEnvironmentTests.java | 12 +- .../index/shard/NewPathForShardTests.java | 2 +- .../RemoveCorruptedShardDataCommandTests.java | 5 +- 10 files changed, 285 insertions(+), 52 deletions(-) diff --git a/docs/reference/commands/shard-tool.asciidoc b/docs/reference/commands/shard-tool.asciidoc index 6fca1355a27be..c13c8d3db6a36 100644 --- a/docs/reference/commands/shard-tool.asciidoc +++ b/docs/reference/commands/shard-tool.asciidoc @@ -51,14 +51,14 @@ $ bin/elasticsearch-shard remove-corrupted-data --index twitter --shard-id 0 Please make a complete backup of your index before using this tool. -Opening Lucene index at /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/ +Opening Lucene index at /var/lib/elasticsearchdata/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/ - >> Lucene index is corrupted at /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/ + >> Lucene index is corrupted at /var/lib/elasticsearchdata/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/ -Opening translog at /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/translog/ +Opening translog at /var/lib/elasticsearchdata/indices/P45vf_YQRhqjfwLMUvSqDw/0/translog/ - >> Translog is clean at /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/translog/ + >> Translog is clean at /var/lib/elasticsearchdata/indices/P45vf_YQRhqjfwLMUvSqDw/0/translog/ Corrupted Lucene index segments found - 32 documents will be lost. @@ -93,7 +93,7 @@ POST /_cluster/reroute You must accept the possibility of data loss by changing parameter `accept_data_loss` to `true`. -Deleted corrupt marker corrupted_FzTSBSuxT7i3Tls_TgwEag from /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/ +Deleted corrupt marker corrupted_FzTSBSuxT7i3Tls_TgwEag from /var/lib/elasticsearchdata/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/ -------------------------------------------------- diff --git a/docs/reference/migration/migrate_8_0/node.asciidoc b/docs/reference/migration/migrate_8_0/node.asciidoc index a1dcd654807e1..b1187e88b5d90 100644 --- a/docs/reference/migration/migrate_8_0/node.asciidoc +++ b/docs/reference/migration/migrate_8_0/node.asciidoc @@ -14,3 +14,25 @@ The `node.max_local_storage_nodes` setting was deprecated in 7.x and has been removed in 8.0. Nodes should be run on separate data paths to ensure that each node is consistently assigned to the same data path. + +[float] +==== Change of data folder layout + +Each node's data is now stored directly in the data directory set by the +`path.data` setting, rather than in `${path.data}/nodes/0`, because the removal +of the `node.max_local_storage_nodes` setting means that nodes may no longer +share a data path. At startup, Elasticsearch will automatically migrate the data +path to the new layout. This automatic migration will not proceed if the data +path contains data for more than one node. You should move to a configuration in +which each node has its own data path before upgrading. + +If you try to upgrade a configuration in which there is data for more than one +node in a data path then the automatic migration will fail and Elasticsearch +will refuse to start. To resolve this you will need to perform the migration +manually. The data for the extra nodes are stored in folders named +`${path.data}/nodes/1`, `${path.data}/nodes/2` and so on, and you should move +each of these folders to an appropriate location and then configure the +corresponding node to use this location for its data path. If your nodes each +have more than one data path in their `path.data` settings then you should move +all the corresponding subfolders in parallel. Each node uses the same subfolder +(e.g. `nodes/2`) across all its data paths. \ No newline at end of file diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java index 44d3c2a88a55b..49e30ac4b5ed3 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java @@ -51,10 +51,11 @@ public void testMissingWritePermission() throws IOException { Settings build = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) .putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build(); - IOException exception = expectThrows(IOException.class, () -> { + IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { new NodeEnvironment(build, TestEnvironment.newEnvironment(build)); }); - assertTrue(exception.getMessage(), exception.getMessage().startsWith(path.toString())); + assertTrue(exception.getCause().getCause().getMessage(), + exception.getCause().getCause().getMessage().startsWith(path.toString())); } } @@ -62,7 +63,7 @@ public void testMissingWritePermissionOnIndex() throws IOException { assumeTrue("posix filesystem", isPosix); final String[] tempPaths = tmpPaths(); Path path = PathUtils.get(randomFrom(tempPaths)); - Path fooIndex = path.resolve("nodes").resolve("0").resolve(NodeEnvironment.INDICES_FOLDER) + Path fooIndex = path.resolve(NodeEnvironment.INDICES_FOLDER) .resolve("foo"); Files.createDirectories(fooIndex); try (PosixPermissionsResetter attr = new PosixPermissionsResetter(fooIndex)) { @@ -82,7 +83,7 @@ public void testMissingWritePermissionOnShard() throws IOException { assumeTrue("posix filesystem", isPosix); final String[] tempPaths = tmpPaths(); Path path = PathUtils.get(randomFrom(tempPaths)); - Path fooIndex = path.resolve("nodes").resolve("0").resolve(NodeEnvironment.INDICES_FOLDER) + Path fooIndex = path.resolve(NodeEnvironment.INDICES_FOLDER) .resolve("foo"); Path fooShard = fooIndex.resolve("0"); Path fooShardIndex = fooShard.resolve("index"); diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 4d19dd66732fc..75f39e70cfc7b 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -35,6 +35,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.UUIDs; @@ -45,6 +46,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.gateway.MetaDataStateFormat; @@ -81,6 +83,7 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -90,9 +93,9 @@ */ public final class NodeEnvironment implements Closeable { public static class NodePath { - /* ${data.paths}/nodes/0 */ + /* ${data.paths} */ public final Path path; - /* ${data.paths}/nodes/0/indices */ + /* ${data.paths}/indices */ public final Path indicesPath; /** Cached FileStore from path */ public final FileStore fileStore; @@ -115,7 +118,7 @@ public NodePath(Path path) throws IOException { /** * Resolves the given shards directory against this NodePath - * ${data.paths}/nodes/{node.id}/indices/{index.uuid}/{shard.id} + * ${data.paths}/indices/{index.uuid}/{shard.id} */ public Path resolve(ShardId shardId) { return resolve(shardId.getIndex()).resolve(Integer.toString(shardId.id())); @@ -123,7 +126,7 @@ public Path resolve(ShardId shardId) { /** * Resolves index directory against this NodePath - * ${data.paths}/nodes/{node.id}/indices/{index.uuid} + * ${data.paths}/indices/{index.uuid} */ public Path resolve(Index index) { return resolve(index.getUUID()); @@ -170,7 +173,6 @@ public String toString() { public static final Setting ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING = Setting.boolSetting("node.enable_lucene_segment_infos_trace", false, Property.NodeScope); - public static final String NODES_FOLDER = "nodes"; public static final String INDICES_FOLDER = "indices"; public static final String NODE_LOCK_FILENAME = "node.lock"; @@ -179,20 +181,28 @@ public static class NodeLock implements Releasable { private final Lock[] locks; private final NodePath[] nodePaths; + + public NodeLock(final Logger logger, + final Environment environment, + final CheckedFunction pathFunction) throws IOException { + this(logger, environment, pathFunction, Function.identity()); + } + /** * Tries to acquire a node lock for a node id, throws {@code IOException} if it is unable to acquire it * @param pathFunction function to check node path before attempt of acquiring a node lock */ public NodeLock(final Logger logger, final Environment environment, - final CheckedFunction pathFunction) throws IOException { + final CheckedFunction pathFunction, + final Function subPathMapping) throws IOException { nodePaths = new NodePath[environment.dataFiles().length]; locks = new Lock[nodePaths.length]; try { final Path[] dataPaths = environment.dataFiles(); for (int dirIndex = 0; dirIndex < dataPaths.length; dirIndex++) { Path dataDir = dataPaths[dirIndex]; - Path dir = resolveNodePath(dataDir); + Path dir = subPathMapping.apply(dataDir); if (pathFunction.apply(dir) == false) { continue; } @@ -247,7 +257,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce sharedDataPath = environment.sharedDataFile(); for (Path path : environment.dataFiles()) { - Files.createDirectories(resolveNodePath(path)); + Files.createDirectories(path); } final NodeLock nodeLock; @@ -264,7 +274,6 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce this.locks = nodeLock.locks; this.nodePaths = nodeLock.nodePaths; - this.nodeMetaData = loadOrCreateNodeMetaData(settings, logger, nodePaths); logger.debug("using node location {}", Arrays.toString(nodePaths)); @@ -278,6 +287,10 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce ensureAtomicMoveSupported(nodePaths); } + if (upgradeLegacyNodeFolders(logger, settings, environment, nodeLock)) { + assertCanWrite(); + } + if (DiscoveryNode.isDataNode(settings) == false) { if (DiscoveryNode.isMasterNode(settings) == false) { ensureNoIndexMetaData(nodePaths); @@ -286,6 +299,8 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce ensureNoShardData(nodePaths); } + this.nodeMetaData = loadOrCreateNodeMetaData(settings, logger, nodePaths); + success = true; } finally { if (success == false) { @@ -295,13 +310,128 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce } /** - * Resolve a specific nodes/{node.id} path for the specified path and node lock id. - * - * @param path the path - * @return the resolved path + * Upgrades all data paths that have been written to by an older ES version to the 8.0+ compatible folder layout, + * removing the "nodes/${lockId}" folder prefix */ - public static Path resolveNodePath(final Path path) { - return path.resolve(NODES_FOLDER).resolve("0"); + private static boolean upgradeLegacyNodeFolders(Logger logger, Settings settings, Environment environment, + NodeLock nodeLock) throws IOException { + boolean upgradeNeeded = false; + + // check if we can do an auto-upgrade + for (Path path : environment.dataFiles()) { + final Path nodesFolderPath = path.resolve("nodes"); + if (Files.isDirectory(nodesFolderPath)) { + final List nodeLockIds = new ArrayList<>(); + + try (DirectoryStream stream = Files.newDirectoryStream(nodesFolderPath)) { + for (Path nodeLockIdPath : stream) { + String fileName = nodeLockIdPath.getFileName().toString(); + if (Files.isDirectory(nodeLockIdPath) && fileName.chars().allMatch(Character::isDigit)) { + int nodeLockId = Integer.parseInt(fileName); + nodeLockIds.add(nodeLockId); + } else if (FileSystemUtils.isDesktopServicesStore(nodeLockIdPath) == false) { + throw new IllegalStateException("unexpected file/folder encountered during data folder upgrade: " + + nodeLockIdPath); + } + } + } + + if (nodeLockIds.isEmpty() == false) { + upgradeNeeded = true; + + if (nodeLockIds.equals(Arrays.asList(0)) == false) { + throw new IllegalStateException("data path " + nodesFolderPath + " cannot be upgraded automatically because it " + + "contains data from nodes with ordinals " + nodeLockIds + ", due to previous use of the now obsolete " + + "[node.max_local_storage_nodes] setting. Please check the breaking changes docs for the current version of " + + "Elasticsearch to find an upgrade path"); + } + } + } + } + + if (upgradeNeeded == false) { + logger.trace("data folder upgrade not required"); + return false; + } + + logger.info("upgrading legacy data folders: {}", Arrays.toString(environment.dataFiles())); + + // acquire locks on legacy path for duration of upgrade (to ensure there is no older ES version running on this path) + final NodeLock legacyNodeLock; + try { + legacyNodeLock = new NodeLock(logger, environment, dir -> true, path -> path.resolve("nodes").resolve("0")); + } catch (IOException e) { + final String message = String.format( + Locale.ROOT, + "failed to obtain legacy node locks, tried %s;" + + " maybe these locations are not writable or multiple nodes were started on the same data path?", + Arrays.toString(environment.dataFiles())); + throw new IllegalStateException(message, e); + } + + // move contents from legacy path to new path + assert nodeLock.getNodePaths().length == legacyNodeLock.getNodePaths().length; + try { + final List> upgradeActions = new ArrayList<>(); + for (int i = 0; i < legacyNodeLock.getNodePaths().length; i++) { + final NodePath legacyNodePath = legacyNodeLock.getNodePaths()[i]; + final NodePath nodePath = nodeLock.getNodePaths()[i]; + + // determine folders to move and check that there are no extra files/folders + final Set folderNames = new HashSet<>(); + + try (DirectoryStream stream = Files.newDirectoryStream(legacyNodePath.path)) { + for (Path subFolderPath : stream) { + final String fileName = subFolderPath.getFileName().toString(); + if (FileSystemUtils.isDesktopServicesStore(subFolderPath)) { + // ignore + } else if (FileSystemUtils.isAccessibleDirectory(subFolderPath, logger)) { + if (fileName.equals(INDICES_FOLDER) == false && // indices folder + fileName.equals(MetaDataStateFormat.STATE_DIR_NAME) == false) { // global metadata & node state folder + throw new IllegalStateException("unexpected folder encountered during data folder upgrade: " + + subFolderPath); + } + final Path targetSubFolderPath = nodePath.path.resolve(fileName); + if (Files.exists(targetSubFolderPath)) { + throw new IllegalStateException("target folder already exists during data folder upgrade: " + + targetSubFolderPath); + } + folderNames.add(fileName); + } else if (fileName.equals(NODE_LOCK_FILENAME) == false && + fileName.equals(TEMP_FILE_NAME) == false) { + throw new IllegalStateException("unexpected file/folder encountered during data folder upgrade: " + + subFolderPath); + } + } + } + + assert Sets.difference(folderNames, Sets.newHashSet(INDICES_FOLDER, MetaDataStateFormat.STATE_DIR_NAME)).isEmpty() : + "expected indices and/or state dir folder but was " + folderNames; + + upgradeActions.add(() -> { + for (String folderName : folderNames) { + final Path sourceSubFolderPath = legacyNodePath.path.resolve(folderName); + final Path targetSubFolderPath = nodePath.path.resolve(folderName); + Files.move(sourceSubFolderPath, targetSubFolderPath, StandardCopyOption.ATOMIC_MOVE); + logger.info("data folder upgrade: moved from [{}] to [{}]", sourceSubFolderPath, targetSubFolderPath); + } + IOUtils.fsync(nodePath.path, true); + }); + } + // now do the actual upgrade. start by upgrading the node metadata file before moving anything, since a downgrade in an + // intermediate state would be pretty disastrous + loadOrCreateNodeMetaData(settings, logger, legacyNodeLock.getNodePaths()); + for (CheckedRunnable upgradeAction : upgradeActions) { + upgradeAction.run(); + } + } finally { + legacyNodeLock.close(); + } + + // upgrade successfully completed, remove legacy nodes folders + IOUtils.rm(Stream.of(environment.dataFiles()).map(path -> path.resolve("nodes")).toArray(Path[]::new)); + + return true; } private void maybeLogPathDetails() throws IOException { @@ -801,14 +931,14 @@ public Path[] availableShardPaths(ShardId shardId) { } /** - * Returns all folder names in ${data.paths}/nodes/{node.id}/indices folder + * Returns all folder names in ${data.paths}/indices folder */ public Set availableIndexFolders() throws IOException { return availableIndexFolders(p -> false); } /** - * Returns folder names in ${data.paths}/nodes/{node.id}/indices folder that don't match the given predicate. + * Returns folder names in ${data.paths}/indices folder that don't match the given predicate. * @param excludeIndexPathIdsPredicate folder names to exclude */ public Set availableIndexFolders(Predicate excludeIndexPathIdsPredicate) throws IOException { @@ -825,7 +955,7 @@ public Set availableIndexFolders(Predicate excludeIndexPathIdsPr } /** - * Return all directory names in the nodes/{node.id}/indices directory for the given node path. + * Return all directory names in the indices directory for the given node path. * * @param nodePath the path * @return all directories that could be indices for the given node path. @@ -836,7 +966,7 @@ public Set availableIndexFoldersForPath(final NodePath nodePath) throws } /** - * Return directory names in the nodes/{node.id}/indices directory for the given node path that don't match the given predicate. + * Return directory names in the indices directory for the given node path that don't match the given predicate. * * @param nodePath the path * @param excludeIndexPathIdsPredicate folder names to exclude @@ -865,7 +995,7 @@ public Set availableIndexFoldersForPath(final NodePath nodePath, Predica } /** - * Resolves all existing paths to indexFolderName in ${data.paths}/nodes/{node.id}/indices + * Resolves all existing paths to indexFolderName in ${data.paths}/indices */ public Path[] resolveIndexFolder(String indexFolderName) { if (nodePaths == null || locks == null) { diff --git a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java index 16db596515b4c..5fc3ba57980bf 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java +++ b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java @@ -140,17 +140,14 @@ protected void findAndProcessShardPath(OptionSet options, Environment environmen IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, shardParent); final String shardIdFileName = path.getFileName().toString(); - final String nodeIdFileName = shardParentParent.getParent().getFileName().toString(); if (Files.isDirectory(path) && shardIdFileName.chars().allMatch(Character::isDigit) // SHARD-ID path element check && NodeEnvironment.INDICES_FOLDER.equals(shardParentParent.getFileName().toString()) // `indices` check - && nodeIdFileName.chars().allMatch(Character::isDigit) // NODE-ID check - && NodeEnvironment.NODES_FOLDER.equals(shardParentParent.getParent().getParent().getFileName().toString()) // `nodes` check ) { shardId = Integer.parseInt(shardIdFileName); indexName = indexMetaData.getIndex().getName(); } else { throw new ElasticsearchException("Unable to resolve shard id. Wrong folder structure at [ " + path.toString() - + " ], expected .../nodes/[NODE-ID]/indices/[INDEX-UUID]/[SHARD-ID]"); + + " ], expected .../indices/[INDEX-UUID]/[SHARD-ID]"); } } else { // otherwise resolve shardPath based on the index name and shard id diff --git a/server/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java b/server/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java index 53efeb393e4b4..720439768fabc 100644 --- a/server/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java +++ b/server/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java @@ -18,6 +18,12 @@ */ package org.elasticsearch.bwcompat; +import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESIntegTestCase; + import java.io.IOException; import java.io.InputStream; import java.nio.file.DirectoryStream; @@ -26,13 +32,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.TestUtil; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.test.ESIntegTestCase; - import static org.hamcrest.Matchers.containsString; @LuceneTestCase.SuppressCodecs("*") @@ -69,8 +68,8 @@ protected Settings prepareBackwardsDataDir(Path backwardsIndex) throws IOExcepti } throw new IllegalStateException(builder.toString()); } - Path src = list[0].resolve(NodeEnvironment.NODES_FOLDER); - Path dest = dataDir.resolve(NodeEnvironment.NODES_FOLDER); + Path src = list[0].resolve("nodes"); + Path dest = dataDir.resolve("nodes"); assertTrue(Files.exists(src)); Files.move(src, dest); assertFalse(Files.exists(src)); diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java index 74de578426f2c..4d1848428e5a7 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -21,13 +21,24 @@ import org.elasticsearch.Version; import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.List; +import java.util.stream.Collectors; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.endsWith; @@ -123,4 +134,78 @@ public void testFailsToStartIfUpgradedTooFar() { assertThat(illegalStateException.getMessage(), allOf(startsWith("cannot upgrade a node from version ["), endsWith("] directly to version [" + Version.CURRENT + "]"))); } + + public void testUpgradeDataFolder() throws IOException, InterruptedException { + String node = internalCluster().startNode(); + prepareCreate("test").get(); + indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("{}", XContentType.JSON)); + String nodeId = client().admin().cluster().prepareState().get().getState().nodes().getMasterNodeId(); + + final Settings dataPathSettings = internalCluster().dataPathSettings(node); + internalCluster().stopRandomDataNode(); + + // simulate older data path layout by moving data under "nodes/0" folder + final List dataPaths = Environment.PATH_DATA_SETTING.get(dataPathSettings) + .stream().map(PathUtils::get).collect(Collectors.toList()); + dataPaths.forEach(path -> { + final Path targetPath = path.resolve("nodes").resolve("0"); + try { + Files.createDirectories(targetPath); + + try (DirectoryStream stream = Files.newDirectoryStream(path)) { + for (Path subPath : stream) { + String fileName = subPath.getFileName().toString(); + Path targetSubPath = targetPath.resolve(fileName); + if (fileName.equals("nodes") == false) { + Files.move(subPath, targetSubPath, StandardCopyOption.ATOMIC_MOVE); + } + } + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }); + + dataPaths.forEach(path -> assertTrue(Files.exists(path.resolve("nodes")))); + + // create extra file/folder, and check that upgrade fails + if (dataPaths.isEmpty() == false) { + final Path badFileInNodesDir = Files.createTempFile(randomFrom(dataPaths).resolve("nodes"), "bad", "file"); + IllegalStateException ise = expectThrows(IllegalStateException.class, () -> internalCluster().startNode(dataPathSettings)); + assertThat(ise.getMessage(), containsString("unexpected file/folder encountered during data folder upgrade")); + Files.delete(badFileInNodesDir); + + final Path badFolderInNodesDir = Files.createDirectories(randomFrom(dataPaths).resolve("nodes").resolve("bad-folder")); + ise = expectThrows(IllegalStateException.class, () -> internalCluster().startNode(dataPathSettings)); + assertThat(ise.getMessage(), containsString("unexpected file/folder encountered during data folder upgrade")); + Files.delete(badFolderInNodesDir); + + final Path badFile = Files.createTempFile(randomFrom(dataPaths).resolve("nodes").resolve("0"), "bad", "file"); + ise = expectThrows(IllegalStateException.class, () -> internalCluster().startNode(dataPathSettings)); + assertThat(ise.getMessage(), containsString("unexpected file/folder encountered during data folder upgrade")); + Files.delete(badFile); + + final Path badFolder = Files.createDirectories(randomFrom(dataPaths).resolve("nodes").resolve("0").resolve("bad-folder")); + ise = expectThrows(IllegalStateException.class, () -> internalCluster().startNode(dataPathSettings)); + assertThat(ise.getMessage(), containsString("unexpected folder encountered during data folder upgrade")); + Files.delete(badFolder); + + final Path conflictingFolder = randomFrom(dataPaths).resolve("indices"); + if (Files.exists(conflictingFolder) == false) { + Files.createDirectories(conflictingFolder); + ise = expectThrows(IllegalStateException.class, () -> internalCluster().startNode(dataPathSettings)); + assertThat(ise.getMessage(), containsString("target folder already exists during data folder upgrade")); + Files.delete(conflictingFolder); + } + } + + // check that upgrade works + dataPaths.forEach(path -> assertTrue(Files.exists(path.resolve("nodes")))); + internalCluster().startNode(dataPathSettings); + dataPaths.forEach(path -> assertFalse(Files.exists(path.resolve("nodes")))); + assertEquals(nodeId, client().admin().cluster().prepareState().get().getState().nodes().getMasterNodeId()); + assertTrue(client().admin().indices().prepareExists("test").get().isExists()); + ensureYellow("test"); + assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L); + } } diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index f21b55b9aee8f..5bb1152bcbe45 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -373,10 +373,10 @@ public void testCustomDataPaths() throws Exception { assertThat("shard paths with a custom data_path should contain only regular paths", env.availableShardPaths(sid), - equalTo(stringsToPaths(dataPaths, "nodes/0/indices/" + index.getUUID() + "/0"))); + equalTo(stringsToPaths(dataPaths, "indices/" + index.getUUID() + "/0"))); assertThat("index paths uses the regular template", - env.indexPaths(index), equalTo(stringsToPaths(dataPaths, "nodes/0/indices/" + index.getUUID()))); + env.indexPaths(index), equalTo(stringsToPaths(dataPaths, "indices/" + index.getUUID()))); IndexSettings s3 = new IndexSettings(s2.getIndexMetaData(), Settings.builder().build()); @@ -385,10 +385,10 @@ public void testCustomDataPaths() throws Exception { assertThat("shard paths with a custom data_path should contain only regular paths", env.availableShardPaths(sid), - equalTo(stringsToPaths(dataPaths, "nodes/0/indices/" + index.getUUID() + "/0"))); + equalTo(stringsToPaths(dataPaths, "indices/" + index.getUUID() + "/0"))); assertThat("index paths uses the regular template", - env.indexPaths(index), equalTo(stringsToPaths(dataPaths, "nodes/0/indices/" + index.getUUID()))); + env.indexPaths(index), equalTo(stringsToPaths(dataPaths, "indices/" + index.getUUID()))); env.close(); } @@ -418,7 +418,7 @@ public void testExistingTempFiles() throws IOException { String[] paths = tmpPaths(); // simulate some previous left over temp files for (String path : randomSubsetOf(randomIntBetween(1, paths.length), paths)) { - final Path nodePath = NodeEnvironment.resolveNodePath(PathUtils.get(path)); + final Path nodePath = PathUtils.get(path); Files.createDirectories(nodePath); Files.createFile(nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME)); if (randomBoolean()) { @@ -433,7 +433,7 @@ public void testExistingTempFiles() throws IOException { // check we clean up for (String path: paths) { - final Path nodePath = NodeEnvironment.resolveNodePath(PathUtils.get(path)); + final Path nodePath = PathUtils.get(path); final Path tempFile = nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME); assertFalse(tempFile + " should have been cleaned", Files.exists(tempFile)); final Path srcTempFile = nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME + ".src"); diff --git a/server/src/test/java/org/elasticsearch/index/shard/NewPathForShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/NewPathForShardTests.java index 4e6e3036f4c40..73ae826d7211f 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/NewPathForShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/NewPathForShardTests.java @@ -90,7 +90,7 @@ static class MockUsableSpaceFileSystemProvider extends FilterFileSystemProvider @Override public FileStore getFileStore(Path path) throws IOException { - if (path.toString().contains(aPathPart)) { + if (path.toString().contains(aPathPart) || (path.toString() + path.getFileSystem().getSeparator()).contains(aPathPart)) { return aFileStore; } else { return bFileStore; diff --git a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java index c7b1846356363..3291a250f5ccb 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java @@ -94,8 +94,7 @@ public void setup() throws IOException { .putList(Environment.PATH_DATA_SETTING.getKey(), dataDir.toAbsolutePath().toString()).build()); // create same directory structure as prod does - final Path path = NodeEnvironment.resolveNodePath(dataDir); - Files.createDirectories(path); + Files.createDirectories(dataDir); settings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) @@ -103,7 +102,7 @@ public void setup() throws IOException { .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .build(); - final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(path); + final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(dataDir); shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); final IndexMetaData.Builder metaData = IndexMetaData.builder(routing.getIndexName()) .settings(settings) From 79a3de4152fa63707713356098b3a7849b5231da Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 28 May 2019 14:20:42 -0400 Subject: [PATCH 155/224] [DOCS] Set explicit anchors for Asciidoctor (#42521) --- docs/reference/mapping/types/nested.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc index 5969dcfd6956b..fe150a69b4900 100644 --- a/docs/reference/mapping/types/nested.asciidoc +++ b/docs/reference/mapping/types/nested.asciidoc @@ -193,7 +193,7 @@ phase. Instead, highlighting needs to be performed via ============================================= - +[[limit-number-nested-fields]] ==== Limiting the number of `nested` fields Indexing a document with 100 nested fields actually indexes 101 documents as each nested From 8fae57b3816a1bce7a0365577a74bf1f20f543bf Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Tue, 28 May 2019 13:27:13 -0500 Subject: [PATCH 156/224] unmute 'Test url escaping with url mustache function' and bump logging (#42400) --- x-pack/qa/smoke-test-watcher/build.gradle | 1 + .../rest-api-spec/test/mustache/50_webhook_url_escaping.yml | 4 +--- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/x-pack/qa/smoke-test-watcher/build.gradle b/x-pack/qa/smoke-test-watcher/build.gradle index 9194c46daed01..8de7448618ea1 100644 --- a/x-pack/qa/smoke-test-watcher/build.gradle +++ b/x-pack/qa/smoke-test-watcher/build.gradle @@ -12,6 +12,7 @@ integTestCluster { setting 'xpack.ml.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' setting 'logger.org.elasticsearch.xpack.watcher', 'DEBUG' + setting 'logger.org.elasticsearch.xpack.core.watcher', 'DEBUG' } integTestRunner { diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml index e11809a79baa5..bb06aca4f95a4 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml @@ -1,8 +1,6 @@ --- "Test url escaping with url mustache function": - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/41172" + - do: cluster.health: wait_for_status: yellow From 792435f160ac5fe8dd1218d98a0cb4f0a530030b Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Tue, 28 May 2019 20:37:30 +0200 Subject: [PATCH 157/224] check position before and after latch (#42623) check position before and after latch #fixes 42084 --- .../xpack/core/indexing/AsyncTwoPhaseIndexerTests.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index 95b3de5eb333e..fc86a9554880f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -225,7 +225,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42084") public void testStateMachine() throws Exception { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); @@ -236,10 +235,11 @@ public void testStateMachine() throws Exception { assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); + assertTrue(awaitBusy(() -> indexer.getPosition() == 2)); countDownLatch.countDown(); - - assertThat(indexer.getPosition(), equalTo(2)); assertTrue(awaitBusy(() -> isFinished.get())); + assertThat(indexer.getPosition(), equalTo(3)); + assertFalse(isStopped.get()); assertThat(indexer.getStep(), equalTo(6)); assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); From aad6cc42411ac475b94b00e9570f65660d68c6be Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 28 May 2019 15:18:51 -0400 Subject: [PATCH 158/224] [DOCS] Fix X-Pack tag for Asciidoctor (#42443) --- docs/reference/ccr/apis/follow-request-body.asciidoc | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/reference/ccr/apis/follow-request-body.asciidoc b/docs/reference/ccr/apis/follow-request-body.asciidoc index e7e6ae2e26a05..d8fb725f02b14 100644 --- a/docs/reference/ccr/apis/follow-request-body.asciidoc +++ b/docs/reference/ccr/apis/follow-request-body.asciidoc @@ -1,4 +1,3 @@ -[role="xpack"] [testenv="platinum"] `max_read_request_operation_count`:: (integer) the maximum number of operations to pull per read from the remote From 7df025ce8619909b1e3c73f46c5e076a5e5e0be2 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Tue, 28 May 2019 22:11:34 +0200 Subject: [PATCH 159/224] fix javadoc of SearchRequestBuilder#setTrackTotalHits (#42219) --- .../elasticsearch/action/search/SearchRequestBuilder.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 96c93c974cabb..3e2d835a4b803 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -378,7 +378,9 @@ public SearchRequestBuilder setTrackScores(boolean trackScores) { } /** - * Indicates if the total hit count for the query should be tracked. Defaults to {@code true} + * Indicates if the total hit count for the query should be tracked. Requests will count total hit count accurately + * up to 10,000 by default, see {@link #setTrackTotalHitsUpTo(int)} to change this value or set to true/false to always/never + * count accurately. */ public SearchRequestBuilder setTrackTotalHits(boolean trackTotalHits) { sourceBuilder().trackTotalHits(trackTotalHits); @@ -386,7 +388,7 @@ public SearchRequestBuilder setTrackTotalHits(boolean trackTotalHits) { } /** - * Indicates if the total hit count for the query should be tracked. Defaults to {@code true} + * Indicates the total hit count that should be tracked accurately or null if the value is unset. Defaults to 10,000. */ public SearchRequestBuilder setTrackTotalHitsUpTo(int trackTotalHitsUpTo) { sourceBuilder().trackTotalHitsUpTo(trackTotalHitsUpTo); From 4da6453673f28b4899f92bcc9867f153332154c9 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 28 May 2019 21:41:15 +0100 Subject: [PATCH 160/224] [ML Data Frame] Mute stop start test Relates to https://github.com/elastic/elasticsearch/issues/42650 --- .../rest-api-spec/test/data_frame/transforms_start_stop.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml index 4909761c5633b..58af6e0899dda 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml @@ -90,6 +90,9 @@ teardown: - match: { airline-data-by-airline-start-stop.mappings: {} } --- "Test start/stop/start transform": + - skip: + reason: "https://github.com/elastic/elasticsearch/issues/42650" + version: "all" - do: data_frame.start_data_frame_transform: transform_id: "airline-transform-start-stop" From 0f5f8880c414febf5e1bc180aaed1b0b65a7e3ae Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 28 May 2019 23:13:53 +0200 Subject: [PATCH 161/224] Add 7.1.2 version constant. (#42643) Relates to #42635 --- server/src/main/java/org/elasticsearch/Version.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 48d37957844e2..844b963e9aa83 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -74,6 +74,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_1_0 = new Version(V_7_1_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_1_1_ID = 7010199; public static final Version V_7_1_1 = new Version(V_7_1_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final int V_7_1_2_ID = 7010299; + public static final Version V_7_1_2 = new Version(V_7_1_2_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_2_0_ID = 7020099; public static final Version V_7_2_0 = new Version(V_7_2_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_3_0_ID = 7030099; @@ -100,6 +102,8 @@ public static Version fromId(int id) { return V_7_3_0; case V_7_2_0_ID: return V_7_2_0; + case V_7_1_2_ID: + return V_7_1_2; case V_7_1_1_ID: return V_7_1_1; case V_7_1_0_ID: From aaf0ab42cb314b87782f85a313156e0be807838c Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 28 May 2019 23:22:52 +0200 Subject: [PATCH 162/224] Adjust use of Deprecated Netty API (#42613) * With the recent upgrade to Netty 4.1.36 this method became deprecated and I made the advised change to fix the deprecation --- .../elasticsearch/http/netty4/Netty4HttpServerTransport.java | 4 ++-- .../org/elasticsearch/transport/netty4/Netty4Transport.java | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 356cfa0bbf99d..8b31e0bcb28a2 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -23,8 +23,8 @@ import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerAdapter; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; import io.netty.channel.FixedRecvByteBufAllocator; @@ -351,7 +351,7 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E } @ChannelHandler.Sharable - private static class ServerChannelExceptionHandler extends ChannelHandlerAdapter { + private static class ServerChannelExceptionHandler extends ChannelInboundHandlerAdapter { private final Netty4HttpServerTransport transport; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 34fb2173143c8..f2871ff34e8b7 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -25,8 +25,8 @@ import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerAdapter; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; import io.netty.channel.FixedRecvByteBufAllocator; @@ -315,7 +315,7 @@ private void addClosedExceptionLogger(Channel channel) { } @ChannelHandler.Sharable - private class ServerChannelExceptionHandler extends ChannelHandlerAdapter { + private class ServerChannelExceptionHandler extends ChannelInboundHandlerAdapter { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { From a1e78585d18fe07cce52950cd39f9992da3c126b Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 28 May 2019 17:40:07 -0700 Subject: [PATCH 163/224] Fix a callout in the field alias docs. --- docs/reference/mapping/types/alias.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/mapping/types/alias.asciidoc b/docs/reference/mapping/types/alias.asciidoc index a7bba54df56e4..c70d96a7e572d 100644 --- a/docs/reference/mapping/types/alias.asciidoc +++ b/docs/reference/mapping/types/alias.asciidoc @@ -16,7 +16,7 @@ PUT trips }, "route_length_miles": { "type": "alias", - "path": "distance" // <1> + "path": "distance" <1> }, "transit_mode": { "type": "keyword" From 813e57d2d8e66e51f423e77b178e05316335391e Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 28 May 2019 17:52:35 -0700 Subject: [PATCH 164/224] Add explicit build flag for experimenting with test execution cacheability (#42649) * Add build flag for ignoring random test seed as task input * Fix checkstyle violations --- .../elasticsearch/gradle/BuildPlugin.groovy | 31 +++++------ ...emPropertyCommandLineArgumentProvider.java | 30 +++++++++++ .../testfixtures/TestFixturesPlugin.java | 53 +++++++++++-------- 3 files changed, 72 insertions(+), 42 deletions(-) create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/SystemPropertyCommandLineArgumentProvider.java diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index b5c69a418cceb..92d11a8477436 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -817,7 +817,7 @@ class BuildPlugin implements Plugin { } test.jvmArgumentProviders.add(nonInputProperties) - test.extensions.getByType(ExtraPropertiesExtension).set('nonInputProperties', nonInputProperties) + test.extensions.add('nonInputProperties', nonInputProperties) test.executable = "${ext.get('runtimeJavaHome')}/bin/java" test.workingDir = project.file("${project.buildDir}/testrun/${test.name}") @@ -842,17 +842,25 @@ class BuildPlugin implements Plugin { // we use './temp' since this is per JVM and tests are forbidden from writing to CWD test.systemProperties 'gradle.dist.lib': new File(project.class.location.toURI()).parent, - 'gradle.worker.jar': "${project.gradle.getGradleUserHomeDir()}/caches/${project.gradle.gradleVersion}/workerMain/gradle-worker.jar", - 'gradle.user.home': project.gradle.getGradleUserHomeDir(), 'java.io.tmpdir': './temp', 'java.awt.headless': 'true', 'tests.gradle': 'true', 'tests.artifact': project.name, 'tests.task': test.path, 'tests.security.manager': 'true', - 'tests.seed': project.property('testSeed'), 'jna.nosys': 'true' + // ignore changing test seed when build is passed -Dignore.tests.seed for cacheability experimentation + if (System.getProperty('ignore.tests.seed') != null) { + nonInputProperties.systemProperty('tests.seed', project.property('testSeed')) + } else { + test.systemProperty('tests.seed', project.property('testSeed')) + } + + // don't track these as inputs since they contain absolute paths and break cache relocatability + nonInputProperties.systemProperty('gradle.worker.jar', "${project.gradle.getGradleUserHomeDir()}/caches/${project.gradle.gradleVersion}/workerMain/gradle-worker.jar") + nonInputProperties.systemProperty('gradle.user.home', project.gradle.getGradleUserHomeDir()) + nonInputProperties.systemProperty('compiler.java', "${-> (ext.get('compilerJavaVersion') as JavaVersion).getMajorVersion()}") // TODO: remove setting logging level via system property @@ -965,19 +973,4 @@ class BuildPlugin implements Plugin { }) } } - - private static class SystemPropertyCommandLineArgumentProvider implements CommandLineArgumentProvider { - private final Map systemProperties = [:] - - void systemProperty(String key, Object value) { - systemProperties.put(key, value) - } - - @Override - Iterable asArguments() { - return systemProperties.collect { key, value -> - "-D${key}=${value.toString()}".toString() - } - } - } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/SystemPropertyCommandLineArgumentProvider.java b/buildSrc/src/main/java/org/elasticsearch/gradle/SystemPropertyCommandLineArgumentProvider.java new file mode 100644 index 0000000000000..7e808724035df --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/SystemPropertyCommandLineArgumentProvider.java @@ -0,0 +1,30 @@ +package org.elasticsearch.gradle; + +import org.gradle.api.tasks.Input; +import org.gradle.process.CommandLineArgumentProvider; + +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.stream.Collectors; + +public class SystemPropertyCommandLineArgumentProvider implements CommandLineArgumentProvider { + private final Map systemProperties = new LinkedHashMap<>(); + + public void systemProperty(String key, Object value) { + systemProperties.put(key, value); + } + + @Override + public Iterable asArguments() { + return systemProperties.entrySet() + .stream() + .map(entry -> "-D" + entry.getKey() + "=" + entry.getValue()) + .collect(Collectors.toList()); + } + + // Track system property keys as an input so our build cache key will change if we add properties but values are still ignored + @Input + public Iterable getPropertyNames() { + return systemProperties.keySet(); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java index b930955236fb8..0313123655afd 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java @@ -22,9 +22,11 @@ import com.avast.gradle.dockercompose.DockerComposePlugin; import com.avast.gradle.dockercompose.tasks.ComposeUp; import org.elasticsearch.gradle.OS; +import org.elasticsearch.gradle.SystemPropertyCommandLineArgumentProvider; import org.elasticsearch.gradle.precommit.JarHellTask; import org.elasticsearch.gradle.precommit.TestingConventionsTasks; import org.elasticsearch.gradle.precommit.ThirdPartyAuditTask; +import org.gradle.api.Action; import org.gradle.api.DefaultTask; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -122,7 +124,8 @@ public void apply(Project project) { configureServiceInfoForTask( task, fixtureProject, - task::systemProperty + (name, host) -> + task.getExtensions().getByType(SystemPropertyCommandLineArgumentProvider.class).systemProperty(name, host) ); task.dependsOn(fixtureProject.getTasks().getByName("postProcessFixture")); }) @@ -143,28 +146,32 @@ private void conditionTaskByType(TaskContainer tasks, TestFixtureExtension exten private void configureServiceInfoForTask(Task task, Project fixtureProject, BiConsumer consumer) { // Configure ports for the tests as system properties. // We only know these at execution time so we need to do it in doFirst - task.doFirst(theTask -> - fixtureProject.getExtensions().getByType(ComposeExtension.class).getServicesInfos() - .forEach((service, infos) -> { - infos.getTcpPorts() - .forEach((container, host) -> { - String name = "test.fixtures." + service + ".tcp." + container; - theTask.getLogger().info("port mapping property: {}={}", name, host); - consumer.accept( - name, - host - ); - }); - infos.getUdpPorts() - .forEach((container, host) -> { - String name = "test.fixtures." + service + ".udp." + container; - theTask.getLogger().info("port mapping property: {}={}", name, host); - consumer.accept( - name, - host - ); - }); - }) + task.doFirst(new Action() { + @Override + public void execute(Task theTask) { + fixtureProject.getExtensions().getByType(ComposeExtension.class).getServicesInfos() + .forEach((service, infos) -> { + infos.getTcpPorts() + .forEach((container, host) -> { + String name = "test.fixtures." + service + ".tcp." + container; + theTask.getLogger().info("port mapping property: {}={}", name, host); + consumer.accept( + name, + host + ); + }); + infos.getUdpPorts() + .forEach((container, host) -> { + String name = "test.fixtures." + service + ".udp." + container; + theTask.getLogger().info("port mapping property: {}={}", name, host); + consumer.accept( + name, + host + ); + }); + }); + } + } ); } From b960c2002df64dce956244782a4471e9c1152520 Mon Sep 17 00:00:00 2001 From: Henning Andersen <33268011+henningandersen@users.noreply.github.com> Date: Wed, 29 May 2019 08:14:49 +0200 Subject: [PATCH 165/224] Use correct global checkpoint sync interval (#42642) A disruption test case need to use a lower checkpoint sync interval since they verify sequence numbers after the test waiting max 10 seconds for it to stabilize. Closes #42637 --- .../elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java b/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java index a6b3865de247b..bf05a8cb19d7d 100644 --- a/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java +++ b/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java @@ -136,6 +136,7 @@ public void testSeqNoCASLinearizability() { assertAcked(prepareCreate("test") .setSettings(Settings.builder() + .put(indexSettings()) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1 + randomInt(2)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(3)) )); From 3f10cea87ac658b037a256bbf99b62e5dcae05b3 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Wed, 29 May 2019 08:50:30 +0100 Subject: [PATCH 166/224] Removes types from SearchRequest and QueryShardContext (#42112) --- .../client/RequestConverters.java | 14 +-- .../client/RollupRequestConverters.java | 10 -- .../client/RequestConvertersTests.java | 27 +----- .../org/elasticsearch/client/RollupIT.java | 22 +---- .../common/HighlighterWithAnalyzersTests.java | 2 +- .../ExpressionFieldScriptTests.java | 2 +- .../ExpressionNumberSortScriptTests.java | 9 +- .../ExpressionTermsSetQueryTests.java | 9 +- .../expression/StoredExpressionTests.java | 2 +- .../MultiSearchTemplateRequestTests.java | 3 - .../script/mustache/SearchTemplateIT.java | 14 +-- .../mustache/simple-msearch-template.json | 4 +- .../painless/NeedsScoreTests.java | 2 +- .../TokenCountFieldMapperIntegrationIT.java | 3 +- .../join/query/HasChildQueryBuilderTests.java | 5 +- .../query/HasParentQueryBuilderTests.java | 4 - .../index/reindex/RestReindexAction.java | 5 - .../reindex/remote/RemoteRequestBuilders.java | 11 --- .../index/reindex/CancelTests.java | 4 +- .../reindex/DeleteByQueryBasicTests.java | 18 ++-- .../index/reindex/ReindexBasicTests.java | 2 +- .../index/reindex/RestReindexActionTests.java | 22 ----- .../reindex/UpdateByQueryBasicTests.java | 4 +- .../remote/RemoteRequestBuildersTests.java | 41 ++------ .../ICUCollationKeywordFieldMapperIT.java | 24 ++--- .../resources/rest-api-spec/api/count.json | 7 -- .../test/count/11_basic_with_types.yml | 66 ------------- .../test/msearch/12_basic_with_types.yml | 97 ------------------- .../query/TransportValidateQueryAction.java | 2 +- .../explain/TransportExplainAction.java | 12 +-- .../action/search/MultiSearchRequest.java | 9 -- .../action/search/SearchRequest.java | 54 +++-------- .../action/search/SearchRequestBuilder.java | 11 --- .../org/elasticsearch/index/IndexService.java | 7 +- .../elasticsearch/index/SearchSlowLog.java | 9 +- .../index/query/IdsQueryBuilder.java | 75 +++----------- .../index/query/QueryShardContext.java | 27 +----- .../reindex/AbstractBulkByScrollRequest.java | 3 - .../reindex/ClientScrollableHitSource.java | 5 +- .../index/reindex/DeleteByQueryRequest.java | 51 ---------- .../index/reindex/ReindexRequest.java | 14 --- .../index/reindex/UpdateByQueryRequest.java | 28 ------ .../rest/action/search/RestCountAction.java | 15 --- .../action/search/RestMultiSearchAction.java | 2 +- .../search/DefaultSearchContext.java | 19 ---- .../internal/ShardSearchLocalRequest.java | 31 +++--- .../search/internal/ShardSearchRequest.java | 2 - .../internal/ShardSearchTransportRequest.java | 5 - .../search/lookup/DocLookup.java | 13 +-- .../search/lookup/FieldsLookup.java | 8 +- .../search/lookup/LeafDocLookup.java | 11 +-- .../search/lookup/LeafFieldsLookup.java | 10 +- .../search/lookup/SearchLookup.java | 8 +- .../admin/cluster/node/tasks/TasksIT.java | 7 +- .../action/bulk/BulkProcessorRetryIT.java | 1 - .../action/search/ExpandSearchPhaseTests.java | 1 - .../search/MultiSearchRequestTests.java | 23 +---- .../action/search/SearchRequestTests.java | 7 -- .../index/SearchSlowLogTests.java | 5 - .../index/mapper/IdFieldTypeTests.java | 7 -- .../query/CommonTermsQueryParserTests.java | 2 +- .../index/query/IdsQueryBuilderTests.java | 75 +------------- .../index/query/RandomQueryBuilder.java | 3 +- .../reindex/DeleteByQueryRequestTests.java | 24 ----- .../elasticsearch/indexing/IndexActionIT.java | 2 +- .../indices/state/OpenCloseIndexIT.java | 2 +- .../action/search/RestCountActionTests.java | 62 ------------ .../search/DefaultSearchContextTests.java | 1 - .../aggregations/bucket/BooleanTermsIT.java | 6 +- .../aggregations/bucket/DateHistogramIT.java | 1 - .../bucket/DiversifiedSamplerIT.java | 4 +- .../aggregations/bucket/DoubleTermsIT.java | 3 +- .../aggregations/bucket/HistogramIT.java | 4 +- .../aggregations/bucket/LongTermsIT.java | 4 +- .../aggregations/bucket/MinDocCountIT.java | 13 +-- .../search/aggregations/bucket/NestedIT.java | 4 +- .../search/aggregations/bucket/SamplerIT.java | 6 +- .../aggregations/bucket/ShardSizeTermsIT.java | 32 +++--- .../bucket/ShardSizeTestCase.java | 4 +- .../SignificantTermsSignificanceScoreIT.java | 20 ++-- .../bucket/TermsDocCountErrorIT.java | 94 +++++++++--------- .../bucket/terms/StringTermsIT.java | 50 +++++----- .../aggregations/metrics/CardinalityIT.java | 34 +++---- .../aggregations/pipeline/SerialDiffIT.java | 4 +- .../support/ScriptValuesTests.java | 6 +- .../basic/SearchWithRandomIOExceptionsIT.java | 7 +- .../subphase/FetchSourceSubPhaseTests.java | 4 +- .../highlight/CustomHighlighterSearchIT.java | 9 +- .../highlight/HighlighterSearchIT.java | 4 +- .../search/fields/SearchFieldsIT.java | 6 +- .../search/geo/GeoDistanceIT.java | 1 - .../search/geo/GeoShapeQueryTests.java | 10 +- .../ShardSearchTransportRequestTests.java | 1 - .../search/lookup/LeafDocLookupTests.java | 9 -- .../search/lookup/LeafFieldsLookupTests.java | 4 +- .../search/morelikethis/MoreLikeThisIT.java | 22 ++--- .../search/nested/SimpleNestedIT.java | 8 +- .../search/profile/query/QueryProfilerIT.java | 1 - .../search/query/SearchQueryIT.java | 23 +---- .../search/query/SimpleQueryStringIT.java | 4 +- .../search/scroll/SearchScrollIT.java | 4 +- .../search/slice/SliceBuilderTests.java | 5 - .../sort/GeoDistanceSortBuilderTests.java | 1 - .../suggest/CompletionSuggestSearchIT.java | 3 +- .../SharedSignificantTermsTestMethods.java | 2 +- .../action/search/simple-msearch1.json | 2 +- .../action/search/simple-msearch2.json | 2 +- .../action/search/simple-msearch3.json | 4 +- .../action/search/simple-msearch4.json | 4 +- .../search/RandomSearchRequestGenerator.java | 3 - .../aggregations/AggregatorTestCase.java | 2 +- .../action/TransportGraphExploreAction.java | 4 +- .../BatchedDocumentsIteratorTests.java | 1 - .../ml/job/persistence/MockClientBuilder.java | 4 +- .../job/persistence/StateStreamerTests.java | 1 - .../action/TransportRollupSearchAction.java | 4 +- .../DocumentLevelSecurityTests.java | 12 +-- .../integration/FieldLevelSecurityTests.java | 32 +++--- .../SecurityCachePermissionTests.java | 5 +- .../integration/SecurityClearScrollTests.java | 6 +- .../search/WatcherSearchTemplateService.java | 3 - 121 files changed, 380 insertions(+), 1212 deletions(-) delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/count/11_basic_with_types.yml delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/msearch/12_basic_with_types.yml delete mode 100644 server/src/test/java/org/elasticsearch/rest/action/search/RestCountActionTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 023bd1fe63786..711bb68e7fb08 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -391,7 +391,7 @@ static Request update(UpdateRequest updateRequest) throws IOException { * searches. */ static Request search(SearchRequest searchRequest, String searchEndpoint) throws IOException { - Request request = new Request(HttpPost.METHOD_NAME, endpoint(searchRequest.indices(), searchRequest.types(), searchEndpoint)); + Request request = new Request(HttpPost.METHOD_NAME, endpoint(searchRequest.indices(), searchEndpoint)); Params params = new Params(request); addSearchRequestParams(params, searchRequest); @@ -455,7 +455,7 @@ static Request searchTemplate(SearchTemplateRequest searchTemplateRequest) throw request = new Request(HttpGet.METHOD_NAME, "_render/template"); } else { SearchRequest searchRequest = searchTemplateRequest.getRequest(); - String endpoint = endpoint(searchRequest.indices(), searchRequest.types(), "_search/template"); + String endpoint = endpoint(searchRequest.indices(), "_search/template"); request = new Request(HttpGet.METHOD_NAME, endpoint); Params params = new Params(request); @@ -551,8 +551,7 @@ private static Request prepareReindexRequest(ReindexRequest reindexRequest, bool } static Request updateByQuery(UpdateByQueryRequest updateByQueryRequest) throws IOException { - String endpoint = - endpoint(updateByQueryRequest.indices(), updateByQueryRequest.getDocTypes(), "_update_by_query"); + String endpoint = endpoint(updateByQueryRequest.indices(), "_update_by_query"); Request request = new Request(HttpPost.METHOD_NAME, endpoint); Params params = new Params(request) .withRouting(updateByQueryRequest.getRouting()) @@ -579,8 +578,7 @@ static Request updateByQuery(UpdateByQueryRequest updateByQueryRequest) throws I } static Request deleteByQuery(DeleteByQueryRequest deleteByQueryRequest) throws IOException { - String endpoint = - endpoint(deleteByQueryRequest.indices(), deleteByQueryRequest.getDocTypes(), "_delete_by_query"); + String endpoint = endpoint(deleteByQueryRequest.indices(), "_delete_by_query"); Request request = new Request(HttpPost.METHOD_NAME, endpoint); Params params = new Params(request) .withRouting(deleteByQueryRequest.getRouting()) @@ -710,10 +708,12 @@ static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType, return new NByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); } + @Deprecated static String endpoint(String index, String type, String id) { return new EndpointBuilder().addPathPart(index, type, id).build(); } + @Deprecated static String endpoint(String index, String type, String id, String endpoint) { return new EndpointBuilder().addPathPart(index, type, id).addPathPartAsIs(endpoint).build(); } @@ -726,6 +726,7 @@ static String endpoint(String[] indices, String endpoint) { return new EndpointBuilder().addCommaSeparatedPathParts(indices).addPathPartAsIs(endpoint).build(); } + @Deprecated static String endpoint(String[] indices, String[] types, String endpoint) { return new EndpointBuilder().addCommaSeparatedPathParts(indices).addCommaSeparatedPathParts(types) .addPathPartAsIs(endpoint).build(); @@ -736,6 +737,7 @@ static String endpoint(String[] indices, String endpoint, String[] suffixes) { .addCommaSeparatedPathParts(suffixes).build(); } + @Deprecated static String endpoint(String[] indices, String endpoint, String type) { return new EndpointBuilder().addCommaSeparatedPathParts(indices).addPathPartAsIs(endpoint).addPathPart(type).build(); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java index 4cd3be057a9f5..f43505c1c6537 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java @@ -95,16 +95,6 @@ static Request deleteJob(final DeleteRollupJobRequest deleteRollupJobRequest) th } static Request search(final SearchRequest request) throws IOException { - if (request.types().length > 0) { - /* - * Ideally we'd check this with the standard validation framework - * but we don't have a special request for rollup search so that'd - * be difficult. - */ - ValidationException ve = new ValidationException(); - ve.addValidationError("types are not allowed in rollup search"); - throw ve; - } return RequestConverters.search(request, "_rollup_search"); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 9c5137d54427a..8c5e6b779ff47 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -404,9 +404,6 @@ public void testReindex() throws IOException { ); reindexRequest.setRemoteInfo(remoteInfo); } - if (randomBoolean()) { - reindexRequest.setSourceDocTypes("doc", "tweet"); - } if (randomBoolean()) { reindexRequest.setSourceBatchSize(randomInt(100)); } @@ -457,9 +454,6 @@ public void testUpdateByQuery() throws IOException { UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); updateByQueryRequest.indices(randomIndicesNames(1, 5)); Map expectedParams = new HashMap<>(); - if (randomBoolean()) { - updateByQueryRequest.setDocTypes(generateRandomStringArray(5, 5, false, false)); - } if (randomBoolean()) { int batchSize = randomInt(100); updateByQueryRequest.setBatchSize(batchSize); @@ -505,8 +499,6 @@ public void testUpdateByQuery() throws IOException { Request request = RequestConverters.updateByQuery(updateByQueryRequest); StringJoiner joiner = new StringJoiner("/", "/", ""); joiner.add(String.join(",", updateByQueryRequest.indices())); - if (updateByQueryRequest.getDocTypes().length > 0) - joiner.add(String.join(",", updateByQueryRequest.getDocTypes())); joiner.add("_update_by_query"); assertEquals(joiner.toString(), request.getEndpoint()); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); @@ -518,9 +510,6 @@ public void testDeleteByQuery() throws IOException { DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(); deleteByQueryRequest.indices(randomIndicesNames(1, 5)); Map expectedParams = new HashMap<>(); - if (randomBoolean()) { - deleteByQueryRequest.setDocTypes(generateRandomStringArray(5, 5, false, false)); - } if (randomBoolean()) { int batchSize = randomInt(100); deleteByQueryRequest.setBatchSize(batchSize); @@ -559,8 +548,6 @@ public void testDeleteByQuery() throws IOException { Request request = RequestConverters.deleteByQuery(deleteByQueryRequest); StringJoiner joiner = new StringJoiner("/", "/", ""); joiner.add(String.join(",", deleteByQueryRequest.indices())); - if (deleteByQueryRequest.getDocTypes().length > 0) - joiner.add(String.join(",", deleteByQueryRequest.getDocTypes())); joiner.add("_delete_by_query"); assertEquals(joiner.toString(), request.getEndpoint()); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); @@ -1065,13 +1052,6 @@ public void testSearch() throws Exception { String[] indices = randomIndicesNames(0, 5); SearchRequest searchRequest = new SearchRequest(indices); - int numTypes = randomIntBetween(0, 5); - String[] types = new String[numTypes]; - for (int i = 0; i < numTypes; i++) { - types[i] = "type-" + randomAlphaOfLengthBetween(2, 5); - } - searchRequest.types(types); - Map expectedParams = new HashMap<>(); setRandomSearchParams(searchRequest, expectedParams); setRandomIndicesOptions(searchRequest::indicesOptions, searchRequest::indicesOptions, expectedParams); @@ -1128,10 +1108,6 @@ public void testSearch() throws Exception { if (Strings.hasLength(index)) { endpoint.add(index); } - String type = String.join(",", types); - if (Strings.hasLength(type)) { - endpoint.add(type); - } endpoint.add(searchEndpoint); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals(endpoint.toString(), request.getEndpoint()); @@ -1142,7 +1118,6 @@ public void testSearch() throws Exception { public void testSearchNullIndicesAndTypes() { expectThrows(NullPointerException.class, () -> new SearchRequest((String[]) null)); expectThrows(NullPointerException.class, () -> new SearchRequest().indices((String[]) null)); - expectThrows(NullPointerException.class, () -> new SearchRequest().types((String[]) null)); } public void testCountNotNullSource() throws IOException { @@ -1257,7 +1232,7 @@ public void testMultiSearch() throws IOException { requests.add(searchRequest); }; MultiSearchRequest.readMultiLineFormat(new BytesArray(EntityUtils.toByteArray(request.getEntity())), - REQUEST_BODY_CONTENT_TYPE.xContent(), consumer, null, multiSearchRequest.indicesOptions(), null, null, null, null, + REQUEST_BODY_CONTENT_TYPE.xContent(), consumer, null, multiSearchRequest.indicesOptions(), null, null, null, xContentRegistry(), true); assertEquals(requests, multiSearchRequest.requests()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java index d92c6ad73a738..a7c8a85131355 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java @@ -29,8 +29,8 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.client.core.IndexerState; import org.elasticsearch.client.core.AcknowledgedResponse; +import org.elasticsearch.client.core.IndexerState; import org.elasticsearch.client.rollup.DeleteRollupJobRequest; import org.elasticsearch.client.rollup.GetRollupCapsRequest; import org.elasticsearch.client.rollup.GetRollupCapsResponse; @@ -40,10 +40,10 @@ import org.elasticsearch.client.rollup.GetRollupJobResponse; import org.elasticsearch.client.rollup.GetRollupJobResponse.JobWrapper; import org.elasticsearch.client.rollup.PutRollupJobRequest; -import org.elasticsearch.client.rollup.StartRollupJobRequest; -import org.elasticsearch.client.rollup.StartRollupJobResponse; import org.elasticsearch.client.rollup.RollableIndexCaps; import org.elasticsearch.client.rollup.RollupJobCaps; +import org.elasticsearch.client.rollup.StartRollupJobRequest; +import org.elasticsearch.client.rollup.StartRollupJobResponse; import org.elasticsearch.client.rollup.StopRollupJobRequest; import org.elasticsearch.client.rollup.StopRollupJobResponse; import org.elasticsearch.client.rollup.job.config.DateHistogramGroupConfig; @@ -54,10 +54,10 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -259,20 +259,6 @@ public void testSearch() throws Exception { assertThat(avg.value(), closeTo(sum / numDocs, 0.00000001)); } - public void testSearchWithType() throws Exception { - SearchRequest search = new SearchRequest(rollupIndex); - search.types("a", "b", "c"); - search.source(new SearchSourceBuilder() - .size(0) - .aggregation(new AvgAggregationBuilder("avg").field("value"))); - try { - highLevelClient().rollup().search(search, RequestOptions.DEFAULT); - fail("types are not allowed but didn't fail"); - } catch (ValidationException e) { - assertEquals("Validation Failed: 1: types are not allowed in rollup search;", e.getMessage()); - } - } - public void testGetMissingRollupJob() throws Exception { GetRollupJobRequest getRollupJobRequest = new GetRollupJobRequest("missing"); RollupClient rollupClient = highLevelClient().rollup(); diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java index 8f58a074cf102..c4cc7589678ad 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java @@ -108,7 +108,7 @@ public void testNgramHighlightingWithBrokenPositions() throws IOException { client().prepareIndex("test", "test", "1") .setSource("name", "ARCOTEL Hotels Deutschland").get(); refresh(); - SearchResponse search = client().prepareSearch("test").setTypes("test") + SearchResponse search = client().prepareSearch("test") .setQuery(matchQuery("name.autocomplete", "deut tel").operator(Operator.OR)) .highlighter(new HighlightBuilder().field("name.autocomplete")).get(); assertHighlight(search, 0, "name.autocomplete", 0, diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionFieldScriptTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionFieldScriptTests.java index 205e638314fe3..abc1b8e882389 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionFieldScriptTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionFieldScriptTests.java @@ -64,7 +64,7 @@ public void setUp() throws Exception { when(fieldData.load(anyObject())).thenReturn(atomicFieldData); service = new ExpressionScriptEngine(); - lookup = new SearchLookup(mapperService, ignored -> fieldData, null); + lookup = new SearchLookup(mapperService, ignored -> fieldData); } private FieldScript.LeafFactory compile(String expression) { diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionNumberSortScriptTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionNumberSortScriptTests.java index e6bd503bfabe1..94acf6b35ab98 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionNumberSortScriptTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionNumberSortScriptTests.java @@ -19,9 +19,6 @@ package org.elasticsearch.script.expression; -import java.io.IOException; -import java.text.ParseException; -import java.util.Collections; import org.elasticsearch.index.fielddata.AtomicNumericFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; @@ -33,6 +30,10 @@ import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; +import java.text.ParseException; +import java.util.Collections; + import static org.mockito.Matchers.anyInt; import static org.mockito.Matchers.anyObject; import static org.mockito.Mockito.mock; @@ -63,7 +64,7 @@ public void setUp() throws Exception { when(fieldData.load(anyObject())).thenReturn(atomicFieldData); service = new ExpressionScriptEngine(); - lookup = new SearchLookup(mapperService, ignored -> fieldData, null); + lookup = new SearchLookup(mapperService, ignored -> fieldData); } private NumberSortScript.LeafFactory compile(String expression) { diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTermsSetQueryTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTermsSetQueryTests.java index 137f8e058cd85..cc67501eba319 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTermsSetQueryTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTermsSetQueryTests.java @@ -19,9 +19,6 @@ package org.elasticsearch.script.expression; -import java.io.IOException; -import java.text.ParseException; -import java.util.Collections; import org.elasticsearch.index.fielddata.AtomicNumericFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; @@ -33,6 +30,10 @@ import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; +import java.text.ParseException; +import java.util.Collections; + import static org.mockito.Matchers.anyInt; import static org.mockito.Matchers.anyObject; import static org.mockito.Mockito.mock; @@ -63,7 +64,7 @@ public void setUp() throws Exception { when(fieldData.load(anyObject())).thenReturn(atomicFieldData); service = new ExpressionScriptEngine(); - lookup = new SearchLookup(mapperService, ignored -> fieldData, null); + lookup = new SearchLookup(mapperService, ignored -> fieldData); } private TermsSetQueryScript.LeafFactory compile(String expression) { diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/StoredExpressionTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/StoredExpressionTests.java index 7f7f30f271acf..29e13eebf5845 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/StoredExpressionTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/StoredExpressionTests.java @@ -67,7 +67,7 @@ public void testAllOpsDisabledIndexedScripts() throws IOException { client().prepareSearch() .setSource(new SearchSourceBuilder().scriptField("test1", new Script(ScriptType.STORED, null, "script1", Collections.emptyMap()))) - .setIndices("test").setTypes("scriptTest").get(); + .setIndices("test").get(); fail("search script should have been rejected"); } catch(Exception e) { assertThat(e.toString(), containsString("cannot execute scripts using [field] context")); diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java index 39400197a3871..3b5c7562472e4 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java @@ -56,13 +56,10 @@ public void testParseRequest() throws Exception { assertThat(request.requests().get(0).getRequest().preference(), nullValue()); assertThat(request.requests().get(1).getRequest().indices()[0], equalTo("test2")); assertThat(request.requests().get(1).getRequest().indices()[1], equalTo("test3")); - assertThat(request.requests().get(1).getRequest().types()[0], equalTo("type1")); assertThat(request.requests().get(1).getRequest().requestCache(), nullValue()); assertThat(request.requests().get(1).getRequest().preference(), equalTo("_local")); assertThat(request.requests().get(2).getRequest().indices()[0], equalTo("test4")); assertThat(request.requests().get(2).getRequest().indices()[1], equalTo("test1")); - assertThat(request.requests().get(2).getRequest().types()[0], equalTo("type2")); - assertThat(request.requests().get(2).getRequest().types()[1], equalTo("type1")); assertThat(request.requests().get(2).getRequest().routing(), equalTo("123")); assertNotNull(request.requests().get(0).getScript()); assertNotNull(request.requests().get(1).getScript()); diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java index 0fbc3fa16afd2..3ff2bb649a79b 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java @@ -185,7 +185,7 @@ public void testIndexedTemplateClient() throws Exception { templateParams.put("fieldParam", "foo"); SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()) - .setRequest(new SearchRequest("test").types("type")) + .setRequest(new SearchRequest("test")) .setScript("testTemplate").setScriptType(ScriptType.STORED).setScriptParams(templateParams) .get(); assertHitCount(searchResponse.getResponse(), 4); @@ -235,7 +235,7 @@ public void testIndexedTemplate() throws Exception { templateParams.put("fieldParam", "foo"); SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()) - .setRequest(new SearchRequest().indices("test").types("type")) + .setRequest(new SearchRequest().indices("test")) .setScript("1a") .setScriptType(ScriptType.STORED) .setScriptParams(templateParams) @@ -243,7 +243,7 @@ public void testIndexedTemplate() throws Exception { assertHitCount(searchResponse.getResponse(), 4); expectThrows(ResourceNotFoundException.class, () -> new SearchTemplateRequestBuilder(client()) - .setRequest(new SearchRequest().indices("test").types("type")) + .setRequest(new SearchRequest().indices("test")) .setScript("1000") .setScriptType(ScriptType.STORED) .setScriptParams(templateParams) @@ -251,7 +251,7 @@ public void testIndexedTemplate() throws Exception { templateParams.put("fieldParam", "bar"); searchResponse = new SearchTemplateRequestBuilder(client()) - .setRequest(new SearchRequest("test").types("type")) + .setRequest(new SearchRequest("test")) .setScript("2").setScriptType(ScriptType.STORED).setScriptParams(templateParams) .get(); assertHitCount(searchResponse.getResponse(), 1); @@ -297,7 +297,7 @@ public void testIndexedTemplateOverwrite() throws Exception { templateParams.put("P_Keyword1", "dev"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new SearchTemplateRequestBuilder(client()) - .setRequest(new SearchRequest("testindex").types("test")) + .setRequest(new SearchRequest("testindex")) .setScript("git01").setScriptType(ScriptType.STORED).setScriptParams(templateParams) .get()); assertThat(e.getMessage(), containsString("No negative slop allowed")); @@ -308,7 +308,7 @@ public void testIndexedTemplateOverwrite() throws Exception { ); SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()) - .setRequest(new SearchRequest("testindex").types("test")) + .setRequest(new SearchRequest("testindex")) .setScript("git01").setScriptType(ScriptType.STORED).setScriptParams(templateParams) .get(); assertHitCount(searchResponse.getResponse(), 1); @@ -352,7 +352,7 @@ public void testIndexedTemplateWithArray() throws Exception { arrayTemplateParams.put("fieldParam", fieldParams); SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()) - .setRequest(new SearchRequest("test").types("type")) + .setRequest(new SearchRequest("test")) .setScript("4").setScriptType(ScriptType.STORED).setScriptParams(arrayTemplateParams) .get(); assertHitCount(searchResponse.getResponse(), 5); diff --git a/modules/lang-mustache/src/test/resources/org/elasticsearch/script/mustache/simple-msearch-template.json b/modules/lang-mustache/src/test/resources/org/elasticsearch/script/mustache/simple-msearch-template.json index 11a0091492c4d..1809b4012fde1 100644 --- a/modules/lang-mustache/src/test/resources/org/elasticsearch/script/mustache/simple-msearch-template.json +++ b/modules/lang-mustache/src/test/resources/org/elasticsearch/script/mustache/simple-msearch-template.json @@ -1,6 +1,6 @@ {"index":["test0", "test1"], "request_cache": true} {"source": {"query" : {"match_{{template}}" :{}}}, "params": {"template": "all" } } -{"index" : "test2,test3", "type" : "type1", "preference": "_local"} +{"index" : "test2,test3", "preference": "_local"} {"source": {"query" : {"match_{{template}}" :{}}}, "params": {"template": "all" } } -{"index" : ["test4", "test1"], "type" : [ "type2", "type1" ], "routing": "123"} +{"index" : ["test4", "test1"], "routing": "123"} {"source": {"query" : {"match_{{template}}" :{}}}, "params": {"template": "all" } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java index eeb636d6697c6..92816426eaf47 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java @@ -47,7 +47,7 @@ public void testNeedsScores() { PainlessScriptEngine service = new PainlessScriptEngine(Settings.EMPTY, contexts); QueryShardContext shardContext = index.newQueryShardContext(0, null, () -> 0, null); - SearchLookup lookup = new SearchLookup(index.mapperService(), shardContext::getForField, null); + SearchLookup lookup = new SearchLookup(index.mapperService(), shardContext::getForField); NumberSortScript.Factory factory = service.compile(null, "1.2", NumberSortScript.CONTEXT, Collections.emptyMap()); NumberSortScript.LeafFactory ss = factory.newFactory(Collections.emptyMap(), lookup); diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java index b5348ac91465c..d05be9ba2d7f9 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java @@ -21,6 +21,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.bulk.BulkResponse; @@ -180,7 +181,7 @@ private SearchRequestBuilder searchByNumericRange(int low, int high) { } private SearchRequestBuilder prepareSearch() { - SearchRequestBuilder request = client().prepareSearch("test").setTypes("test"); + SearchRequestBuilder request = client().prepareSearch("test"); request.addStoredField("foo.token_count"); request.addStoredField("foo.token_count_without_position_increments"); if (loadCountedFields) { diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java index 8a50e3a734977..925f85eb684f0 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.join.query; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -280,13 +281,9 @@ public void testFromJson() throws IOException { } public void testToQueryInnerQueryType() throws IOException { - String[] searchTypes = new String[]{TYPE}; QueryShardContext shardContext = createShardContext(); - shardContext.setTypes(searchTypes); HasChildQueryBuilder hasChildQueryBuilder = hasChildQuery(CHILD_DOC, new IdsQueryBuilder().addIds("id"), ScoreMode.None); Query query = hasChildQueryBuilder.toQuery(shardContext); - //verify that the context types are still the same as the ones we previously set - assertThat(shardContext.getTypes(), equalTo(searchTypes)); assertLateParsingQuery(query, CHILD_DOC, "id"); } diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java index ea77ad80799ba..a634690dcac85 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java @@ -192,14 +192,10 @@ public void testIllegalValues() throws IOException { } public void testToQueryInnerQueryType() throws IOException { - String[] searchTypes = new String[]{TYPE}; QueryShardContext shardContext = createShardContext(); - shardContext.setTypes(searchTypes); HasParentQueryBuilder hasParentQueryBuilder = new HasParentQueryBuilder(PARENT_DOC, new IdsQueryBuilder().addIds("id"), false); Query query = hasParentQueryBuilder.toQuery(shardContext); - //verify that the context types are still the same as the ones we previously set - assertThat(shardContext.getTypes(), equalTo(searchTypes)); HasChildQueryBuilderTests.assertLateParsingQuery(query, PARENT_DOC, "id"); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java index ae9ca0be7ca65..e90c5165ab940 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java @@ -69,11 +69,6 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler= 0) { throw new IllegalArgumentException(name + " containing [,] not supported but got [" + indexOrType + "]"); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java index 6d6ae01f0626c..283ead0c91872 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java @@ -216,7 +216,7 @@ public void testReindexCancel() throws Exception { assertThat(response, matcher().created(modified).reasonCancelled(equalTo("by user request"))); refresh("dest"); - assertHitCount(client().prepareSearch("dest").setTypes(TYPE).setSize(0).get(), modified); + assertHitCount(client().prepareSearch("dest").setSize(0).get(), modified); }, equalTo("reindex from [" + INDEX + "] to [dest][" + TYPE + "]")); } @@ -251,7 +251,7 @@ public void testReindexCancelWithWorkers() throws Exception { (response, total, modified) -> { assertThat(response, matcher().created(modified).reasonCancelled(equalTo("by user request")).slices(hasSize(5))); refresh("dest"); - assertHitCount(client().prepareSearch("dest").setTypes(TYPE).setSize(0).get(), modified); + assertHitCount(client().prepareSearch("dest").setSize(0).get(), modified); }, equalTo("reindex from [" + INDEX + "] to [dest][" + TYPE + "]")); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java index 5bef735be5e6e..7d115ecef4ea9 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java @@ -63,25 +63,25 @@ public void testBasics() throws Exception { client().prepareIndex("test", "test", "7").setSource("foo", "f") ); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 7); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 7); // Deletes two docs that matches "foo:a" assertThat(deleteByQuery().source("test").filter(termQuery("foo", "a")).refresh(true).get(), matcher().deleted(2)); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 5); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 5); // Deletes the two first docs with limit by size DeleteByQueryRequestBuilder request = deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()).size(2).refresh(true); request.source().addSort("foo.keyword", SortOrder.ASC); assertThat(request.get(), matcher().deleted(2)); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 3); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 3); // Deletes but match no docs assertThat(deleteByQuery().source("test").filter(termQuery("foo", "no_match")).refresh(true).get(), matcher().deleted(0)); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 3); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 3); // Deletes all remaining docs assertThat(deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()).refresh(true).get(), matcher().deleted(3)); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 0); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 0); } public void testDeleteByQueryWithOneIndex() throws Exception { @@ -236,7 +236,7 @@ public void testSlices() throws Exception { client().prepareIndex("test", "test", "6").setSource("foo", "e"), client().prepareIndex("test", "test", "7").setSource("foo", "f") ); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 7); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 7); int slices = randomSlices(); int expectedSlices = expectedSliceStatuses(slices, "test"); @@ -251,7 +251,7 @@ public void testSlices() throws Exception { matcher() .deleted(2) .slices(hasSize(expectedSlices))); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 5); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 5); // Delete remaining docs assertThat( @@ -263,7 +263,7 @@ public void testSlices() throws Exception { matcher() .deleted(5) .slices(hasSize(expectedSlices))); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 0); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 0); } public void testMultipleSources() throws Exception { @@ -301,7 +301,7 @@ public void testMultipleSources() throws Exception { .slices(hasSize(expectedSlices))); for (String index : docs.keySet()) { - assertHitCount(client().prepareSearch(index).setTypes("test").setSize(0).get(), 0); + assertHitCount(client().prepareSearch(index).setSize(0).get(), 0); } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexBasicTests.java index 43764bf25fcbf..c085327b08639 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexBasicTests.java @@ -109,7 +109,7 @@ public void testCopyManyWithSlices() throws Exception { // Use a small batch size so we have to use more than one batch copy.source().setSize(5); assertThat(copy.get(), matcher().created(max).batches(greaterThanOrEqualTo(max / 5)).slices(hasSize(expectedSlices))); - assertHitCount(client().prepareSearch("dest").setTypes("type").setSize(0).get(), max); + assertHitCount(client().prepareSearch("dest").setSize(0).get(), max); // Copy some of the docs int half = max / 2; diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java index f0aca38545b4c..3245845610c00 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java @@ -32,7 +32,6 @@ import org.junit.Before; import java.io.IOException; -import java.util.Arrays; import java.util.HashMap; import java.util.Map; @@ -216,27 +215,6 @@ private RemoteInfo buildRemoteInfoHostTestCase(String hostInRest) throws IOExcep return RestReindexAction.buildRemoteInfo(source); } - /** - * test deprecation is logged if one or more types are used in source search request inside reindex - */ - public void testTypeInSource() throws IOException { - FakeRestRequest.Builder requestBuilder = new FakeRestRequest.Builder(xContentRegistry()) - .withMethod(Method.POST) - .withPath("/_reindex"); - XContentBuilder b = JsonXContent.contentBuilder().startObject(); - { - b.startObject("source"); - { - b.field("type", randomFrom(Arrays.asList("\"t1\"", "[\"t1\", \"t2\"]", "\"_doc\""))); - } - b.endObject(); - } - b.endObject(); - requestBuilder.withContent(new BytesArray(BytesReference.bytes(b).toBytesRef()), XContentType.JSON); - dispatchRequest(requestBuilder.build()); - assertWarnings(RestReindexAction.TYPES_DEPRECATION_MESSAGE); - } - /** * test deprecation is logged if a type is used in the destination index request inside reindex */ diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryBasicTests.java index 91a92005c2cdc..c40114c734609 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryBasicTests.java @@ -41,7 +41,7 @@ public void testBasics() throws Exception { client().prepareIndex("test", "test", "2").setSource("foo", "a"), client().prepareIndex("test", "test", "3").setSource("foo", "b"), client().prepareIndex("test", "test", "4").setSource("foo", "c")); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 4); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 4); assertEquals(1, client().prepareGet("test", "test", "1").get().getVersion()); assertEquals(1, client().prepareGet("test", "test", "4").get().getVersion()); @@ -79,7 +79,7 @@ public void testSlices() throws Exception { client().prepareIndex("test", "test", "2").setSource("foo", "a"), client().prepareIndex("test", "test", "3").setSource("foo", "b"), client().prepareIndex("test", "test", "4").setSource("foo", "c")); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 4); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 4); assertEquals(1, client().prepareGet("test", "test", "1").get().getVersion()); assertEquals(1, client().prepareGet("test", "test", "4").get().getVersion()); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java index bf6856754044d..28b8d32688397 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -63,50 +63,27 @@ public void testIntialSearchPath() { SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder()); assertEquals("/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); searchRequest.indices("a"); - searchRequest.types("b"); - assertEquals("/a/b/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); + assertEquals("/a/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); searchRequest.indices("a", "b"); - searchRequest.types("c", "d"); - assertEquals("/a,b/c,d/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); + assertEquals("/a,b/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); searchRequest.indices("cat,"); - assertEquals("/cat%2C/c,d/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); + assertEquals("/cat%2C/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); searchRequest.indices("cat/"); - assertEquals("/cat%2F/c,d/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); + assertEquals("/cat%2F/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); searchRequest.indices("cat/", "dog"); - assertEquals("/cat%2F,dog/c,d/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); + assertEquals("/cat%2F,dog/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); // test a specific date math + all characters that need escaping. searchRequest.indices("", "<>/{}|+:,"); - assertEquals("/%3Ccat%7Bnow%2Fd%7D%3E,%3C%3E%2F%7B%7D%7C%2B%3A%2C/c,d/_search", + assertEquals("/%3Ccat%7Bnow%2Fd%7D%3E,%3C%3E%2F%7B%7D%7C%2B%3A%2C/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); // re-escape already escaped (no special handling). searchRequest.indices("%2f", "%3a"); - assertEquals("/%252f,%253a/c,d/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); + assertEquals("/%252f,%253a/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); searchRequest.indices("%2fcat,"); - assertEquals("/%252fcat%2C/c,d/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); + assertEquals("/%252fcat%2C/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); searchRequest.indices("%3ccat/"); - assertEquals("/%253ccat%2F/c,d/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); - - searchRequest.indices("ok"); - searchRequest.types("cat,"); - expectBadStartRequest(searchRequest, "Type", ",", "cat,"); - searchRequest.types("cat,", "dog"); - expectBadStartRequest(searchRequest, "Type", ",", "cat,"); - searchRequest.types("dog", "cat,"); - expectBadStartRequest(searchRequest, "Type", ",", "cat,"); - searchRequest.types("cat/"); - expectBadStartRequest(searchRequest, "Type", "/", "cat/"); - searchRequest.types("cat/", "dog"); - expectBadStartRequest(searchRequest, "Type", "/", "cat/"); - searchRequest.types("dog", "cat/"); - expectBadStartRequest(searchRequest, "Type", "/", "cat/"); - } - - private void expectBadStartRequest(SearchRequest searchRequest, String type, String bad, String failed) { - Version remoteVersion = Version.fromId(between(0, Version.CURRENT.id)); - BytesReference query = new BytesArray("{}"); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> initialSearch(searchRequest, query, remoteVersion)); - assertEquals(type + " containing [" + bad + "] not supported but got [" + failed + "]", e.getMessage()); + assertEquals("/%253ccat%2F/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); } public void testInitialSearchParamsSort() { diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java index 3bcefe0cf5680..8a55d3424c1ff 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java @@ -18,15 +18,10 @@ */ package org.elasticsearch.index.mapper; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; - import com.ibm.icu.text.Collator; import com.ibm.icu.text.RuleBasedCollator; import com.ibm.icu.util.ULocale; + import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -43,6 +38,12 @@ import java.util.Collection; import java.util.Collections; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; + public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase { @Override @@ -82,7 +83,6 @@ public void testBasicUsage() throws Exception { // searching for either of the terms should return both results since they collate to the same value SearchRequest request = new SearchRequest() .indices(index) - .types(type) .source(new SearchSourceBuilder() .fetchSource(false) .query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1])) @@ -122,7 +122,6 @@ public void testMultipleValues() throws Exception { // using sort mode = max, values B and C will be used for the sort SearchRequest request = new SearchRequest() .indices(index) - .types(type) .source(new SearchSourceBuilder() .fetchSource(false) .query(QueryBuilders.termQuery("collate", "a")) @@ -139,7 +138,6 @@ public void testMultipleValues() throws Exception { // same thing, using different sort mode that will use a for both docs request = new SearchRequest() .indices(index) - .types(type) .source(new SearchSourceBuilder() .fetchSource(false) .query(QueryBuilders.termQuery("collate", "a")) @@ -183,7 +181,6 @@ public void testNormalization() throws Exception { // searching for either of the terms should return both results since they collate to the same value SearchRequest request = new SearchRequest() .indices(index) - .types(type) .source(new SearchSourceBuilder() .fetchSource(false) .query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1])) @@ -225,7 +222,6 @@ public void testSecondaryStrength() throws Exception { SearchRequest request = new SearchRequest() .indices(index) - .types(type) .source(new SearchSourceBuilder() .fetchSource(false) .query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1])) @@ -268,7 +264,6 @@ public void testIgnorePunctuation() throws Exception { SearchRequest request = new SearchRequest() .indices(index) - .types(type) .source(new SearchSourceBuilder() .fetchSource(false) .query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1])) @@ -312,7 +307,6 @@ public void testIgnoreWhitespace() throws Exception { SearchRequest request = new SearchRequest() .indices(index) - .types(type) .source(new SearchSourceBuilder() .fetchSource(false) .sort("collate", SortOrder.ASC) @@ -352,7 +346,6 @@ public void testNumerics() throws Exception { SearchRequest request = new SearchRequest() .indices(index) - .types(type) .source(new SearchSourceBuilder() .fetchSource(false) .sort("collate", SortOrder.ASC) @@ -394,7 +387,6 @@ public void testIgnoreAccentsButNotCase() throws Exception { SearchRequest request = new SearchRequest() .indices(index) - .types(type) .source(new SearchSourceBuilder() .fetchSource(false) .sort("collate", SortOrder.ASC) @@ -435,7 +427,6 @@ public void testUpperCaseFirst() throws Exception { SearchRequest request = new SearchRequest() .indices(index) - .types(type) .source(new SearchSourceBuilder() .fetchSource(false) .sort("collate", SortOrder.ASC) @@ -487,7 +478,6 @@ public void testCustomRules() throws Exception { SearchRequest request = new SearchRequest() .indices(index) - .types(type) .source(new SearchSourceBuilder() .fetchSource(false) .query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1])) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json index b933091b9a416..86486c39e1728 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json @@ -5,13 +5,6 @@ "url": { "path": "/_count", "paths": ["/_count", "/{index}/_count"], - "deprecated_paths" : [ - { - "version" : "7.0.0", - "path" : "/{index}/{type}/_count", - "description" : "Specifying types in urls has been deprecated" - } - ], "parts": { "index": { "type" : "list", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/count/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/count/11_basic_with_types.yml deleted file mode 100644 index 48cfc610b435e..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/count/11_basic_with_types.yml +++ /dev/null @@ -1,66 +0,0 @@ -setup: - - do: - indices.create: - index: test - - do: - index: - index: test - type: test - id: 1 - body: { foo: bar } - - - do: - indices.refresh: - index: [test] - ---- -"count with body": - - do: - count: - index: test - type: test - body: - query: - match: - foo: bar - - - match: {count : 1} - - - do: - count: - index: test - body: - query: - match: - foo: test - - - match: {count : 0} - ---- -"count with empty body": -# empty body should default to match_all query - - do: - count: - index: test - type: test - body: { } - - - match: {count : 1} - - - do: - count: - index: test - type: test - - - match: {count : 1} - ---- -"count body without query element": - - do: - catch: bad_request - count: - index: test - type: test - body: - match: - foo: bar diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/12_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/12_basic_with_types.yml deleted file mode 100644 index 64e88de404ab7..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/12_basic_with_types.yml +++ /dev/null @@ -1,97 +0,0 @@ ---- -setup: - - - do: - index: - index: index_1 - type: test - id: 1 - body: { foo: bar } - - - do: - index: - index: index_1 - type: test - id: 2 - body: { foo: baz } - - - do: - index: - index: index_1 - type: test - id: 3 - body: { foo: foo } - - - do: - index: - index: index_2 - type: test - id: 1 - body: { foo: foo } - - - do: - indices.refresh: {} - ---- -"Basic multi-search": - - - do: - msearch: - rest_total_hits_as_int: true - body: - - index: index_* - - query: - match: {foo: foo} - - index: index_2 - - query: - match_all: {} - - index: index_1 - - query: - match: {foo: foo} - - index: index_3 - - query: - match_all: {} - - type: test - - query: - match_all: {} - - - match: { responses.0.hits.total: 2 } - - match: { responses.1.hits.total: 1 } - - match: { responses.2.hits.total: 1 } - - match: { responses.3.error.root_cause.0.type: index_not_found_exception } - - match: { responses.3.error.root_cause.0.reason: "/no.such.index/" } - - match: { responses.3.error.root_cause.0.index: index_3 } - - match: { responses.4.hits.total: 4 } - ---- -"Least impact smoke test": -# only passing these parameters to make sure they are consumed - - do: - msearch: - rest_total_hits_as_int: true - max_concurrent_shard_requests: 1 - max_concurrent_searches: 1 - body: - - index: index_* - - query: - match: {foo: foo} - - index: index_2 - - query: - match_all: {} - - index: index_1 - - query: - match: {foo: foo} - - index: index_3 - - query: - match_all: {} - - type: test - - query: - match_all: {} - - - match: { responses.0.hits.total: 2 } - - match: { responses.1.hits.total: 1 } - - match: { responses.2.hits.total: 1 } - - match: { responses.3.error.root_cause.0.type: index_not_found_exception } - - match: { responses.3.error.root_cause.0.reason: "/no.such.index/" } - - match: { responses.3.error.root_cause.0.index: index_3 } - - match: { responses.4.hits.total: 4 } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 6e10d3d42187f..8f85e91d29ee2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -193,7 +193,7 @@ protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest re boolean valid; String explanation = null; String error = null; - ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(request.shardId(), request.types(), + ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(request.shardId(), request.nowInMillis(), request.filteringAliases()); SearchContext searchContext = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT); try { diff --git a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index c29da21fe4afe..e6a2ef5be9d04 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lease.Releasables; @@ -38,7 +37,6 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.mapper.IdFieldMapper; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; @@ -110,14 +108,8 @@ protected void asyncShardOperation(ExplainRequest request, ShardId shardId, @Override protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) throws IOException { - String[] types; - if (MapperService.SINGLE_MAPPING_NAME.equals(request.type())) { // typeless explain call - types = Strings.EMPTY_ARRAY; - } else { - types = new String[] { request.type() }; - } - ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(shardId, - types, request.nowInMillis, request.filteringAlias()); + ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(shardId, request.nowInMillis, + request.filteringAlias()); SearchContext context = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT); Engine.GetResult result = null; try { diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index 528be0369166e..25500efd5ed5b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -170,7 +170,6 @@ public static void readMultiLineFormat(BytesReference data, CheckedBiConsumer consumer, String[] indices, IndicesOptions indicesOptions, - String[] types, String routing, String searchType, Boolean ccsMinimizeRoundtrips, @@ -190,9 +189,6 @@ public static void readMultiLineFormat(BytesReference data, if (indicesOptions != null) { searchRequest.indicesOptions(indicesOptions); } - if (types != null && types.length > 0) { - searchRequest.types(types); - } if (routing != null) { searchRequest.routing(routing); } @@ -219,8 +215,6 @@ public static void readMultiLineFormat(BytesReference data, throw new IllegalArgumentException("explicit index in multi search is not allowed"); } searchRequest.indices(nodeStringArrayValue(value)); - } else if ("type".equals(entry.getKey()) || "types".equals(entry.getKey())) { - searchRequest.types(nodeStringArrayValue(value)); } else if ("search_type".equals(entry.getKey()) || "searchType".equals(entry.getKey())) { searchRequest.searchType(nodeStringValue(value, null)); } else if ("ccs_minimize_roundtrips".equals(entry.getKey()) || "ccsMinimizeRoundtrips".equals(entry.getKey())) { @@ -320,9 +314,6 @@ public static void writeSearchRequestParams(SearchRequest request, XContentBuild xContentBuilder.field("ignore_unavailable", request.indicesOptions().ignoreUnavailable()); xContentBuilder.field("allow_no_indices", request.indicesOptions().allowNoIndices()); } - if (request.types() != null) { - xContentBuilder.field("types", request.types()); - } if (request.searchType() != null) { xContentBuilder.field("search_type", request.searchType().name().toLowerCase(Locale.ROOT)); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 53dafc153fc4b..602853e10b292 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.search; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -91,8 +92,6 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest private int preFilterShardSize = DEFAULT_PRE_FILTER_SHARD_SIZE; - private String[] types = Strings.EMPTY_ARRAY; - private boolean ccsMinimizeRoundtrips = true; public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpenAndForbidClosedIgnoreThrottled(); @@ -172,7 +171,6 @@ private SearchRequest(SearchRequest searchRequest, String[] indices, String loca this.scroll = searchRequest.scroll; this.searchType = searchRequest.searchType; this.source = searchRequest.source; - this.types = searchRequest.types; this.localClusterAlias = localClusterAlias; this.absoluteStartMillis = absoluteStartMillis; this.finalReduce = finalReduce; @@ -192,7 +190,14 @@ public SearchRequest(StreamInput in) throws IOException { preference = in.readOptionalString(); scroll = in.readOptionalWriteable(Scroll::new); source = in.readOptionalWriteable(SearchSourceBuilder::new); - types = in.readStringArray(); + if (in.getVersion().before(Version.V_8_0_0)) { + // types no longer relevant so ignore + String[] types = in.readStringArray(); + if (types.length > 0) { + throw new IllegalStateException( + "types are no longer supported in search requests but found [" + Arrays.toString(types) + "]"); + } + } indicesOptions = IndicesOptions.readIndicesOptions(in); requestCache = in.readOptionalBoolean(); batchedReduceSize = in.readVInt(); @@ -219,7 +224,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(preference); out.writeOptionalWriteable(scroll); out.writeOptionalWriteable(source); - out.writeStringArray(types); + if (out.getVersion().before(Version.V_8_0_0)) { + // types not supported so send an empty array to previous versions + out.writeStringArray(Strings.EMPTY_ARRAY); + } indicesOptions.writeIndicesOptions(out); out.writeOptionalBoolean(requestCache); out.writeVInt(batchedReduceSize); @@ -342,35 +350,6 @@ public void setCcsMinimizeRoundtrips(boolean ccsMinimizeRoundtrips) { this.ccsMinimizeRoundtrips = ccsMinimizeRoundtrips; } - /** - * The document types to execute the search against. Defaults to be executed against - * all types. - * - * @deprecated Types are in the process of being removed. Instead of using a type, prefer to - * filter on a field on the document. - */ - @Deprecated - public String[] types() { - return types; - } - - /** - * The document types to execute the search against. Defaults to be executed against - * all types. - * - * @deprecated Types are in the process of being removed. Instead of using a type, prefer to - * filter on a field on the document. - */ - @Deprecated - public SearchRequest types(String... types) { - Objects.requireNonNull(types, "types must not be null"); - for (String type : types) { - Objects.requireNonNull(type, "type must not be null"); - } - this.types = types; - return this; - } - /** * A comma separated list of routing values to control the shards the search will be executed on. */ @@ -590,9 +569,6 @@ public String getDescription() { sb.append("indices["); Strings.arrayToDelimitedString(indices, ",", sb); sb.append("], "); - sb.append("types["); - Strings.arrayToDelimitedString(types, ",", sb); - sb.append("], "); sb.append("search_type[").append(searchType).append("], "); if (source != null) { @@ -626,7 +602,6 @@ public boolean equals(Object o) { Objects.equals(source, that.source) && Objects.equals(requestCache, that.requestCache) && Objects.equals(scroll, that.scroll) && - Arrays.equals(types, that.types) && Objects.equals(batchedReduceSize, that.batchedReduceSize) && Objects.equals(maxConcurrentShardRequests, that.maxConcurrentShardRequests) && Objects.equals(preFilterShardSize, that.preFilterShardSize) && @@ -640,7 +615,7 @@ public boolean equals(Object o) { @Override public int hashCode() { return Objects.hash(searchType, Arrays.hashCode(indices), routing, preference, source, requestCache, - scroll, Arrays.hashCode(types), indicesOptions, batchedReduceSize, maxConcurrentShardRequests, preFilterShardSize, + scroll, indicesOptions, batchedReduceSize, maxConcurrentShardRequests, preFilterShardSize, allowPartialSearchResults, localClusterAlias, absoluteStartMillis, ccsMinimizeRoundtrips); } @@ -650,7 +625,6 @@ public String toString() { "searchType=" + searchType + ", indices=" + Arrays.toString(indices) + ", indicesOptions=" + indicesOptions + - ", types=" + Arrays.toString(types) + ", routing='" + routing + '\'' + ", preference='" + preference + '\'' + ", requestCache=" + requestCache + diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 3e2d835a4b803..ceaee96f5c131 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -58,17 +58,6 @@ public SearchRequestBuilder setIndices(String... indices) { return this; } - /** - * The document types to execute the search against. Defaults to be executed against - * all types. - * @deprecated Types are going away, prefer filtering on a field. - */ - @Deprecated - public SearchRequestBuilder setTypes(String... types) { - request.types(types); - return this; - } - /** * The search type to execute, defaults to {@link org.elasticsearch.action.search.SearchType#DEFAULT}. */ diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 2d86a2b436d9d..a7751bc765552 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -519,11 +519,10 @@ public IndexSettings getIndexSettings() { } /** - * Creates a new QueryShardContext. The context has not types set yet, if types are required set them via - * {@link QueryShardContext#setTypes(String...)}. + * Creates a new QueryShardContext. * - * Passing a {@code null} {@link IndexReader} will return a valid context, however it won't be able to make - * {@link IndexReader}-specific optimizations, such as rewriting containing range queries. + * Passing a {@code null} {@link IndexReader} will return a valid context, however it won't be able to make {@link IndexReader}-specific + * optimizations, such as rewriting containing range queries. */ public QueryShardContext newQueryShardContext(int shardId, IndexReader indexReader, LongSupplier nowInMillis, String clusterAlias) { return new QueryShardContext( diff --git a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java index abd67a47049d3..a2193a1bdf73d 100644 --- a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java @@ -19,8 +19,8 @@ package org.elasticsearch.index; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; @@ -170,13 +170,6 @@ public String toString() { sb.append("-1"); } sb.append("], "); - if (context.getQueryShardContext().getTypes() == null) { - sb.append("types[], "); - } else { - sb.append("types["); - Strings.arrayToDelimitedString(context.getQueryShardContext().getTypes(), ",", sb); - sb.append("], "); - } if (context.groupStats() == null) { sb.append("stats[], "); } else { diff --git a/server/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java index 358a2fccff108..e09d71938add4 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java @@ -22,7 +22,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -33,14 +33,12 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.Objects; @@ -62,8 +60,6 @@ public class IdsQueryBuilder extends AbstractQueryBuilder { private final Set ids = new HashSet<>(); - private String[] types = Strings.EMPTY_ARRAY; - /** * Creates a new IdsQueryBuilder with no types specified upfront */ @@ -76,38 +72,23 @@ public IdsQueryBuilder() { */ public IdsQueryBuilder(StreamInput in) throws IOException { super(in); - types = in.readStringArray(); + if (in.getVersion().before(Version.V_8_0_0)) { + // types no longer relevant so ignore + String[] types = in.readStringArray(); + if (types.length > 0) { + throw new IllegalStateException("types are no longer supported in ids query but found [" + Arrays.toString(types) + "]"); + } + } Collections.addAll(ids, in.readStringArray()); } @Override protected void doWriteTo(StreamOutput out) throws IOException { - out.writeStringArray(types); - out.writeStringArray(ids.toArray(new String[ids.size()])); - } - - /** - * Add types to query - * - * @deprecated Types are in the process of being removed, prefer to filter on a field instead. - */ - @Deprecated - public IdsQueryBuilder types(String... types) { - if (types == null) { - throw new IllegalArgumentException("[" + NAME + "] types cannot be null"); + if (out.getVersion().before(Version.V_8_0_0)) { + // types not supported so send an empty array to previous versions + out.writeStringArray(Strings.EMPTY_ARRAY); } - this.types = types; - return this; - } - - /** - * Returns the types used in this query - * - * @deprecated Types are in the process of being removed, prefer to filter on a field instead. - */ - @Deprecated - public String[] types() { - return this.types; + out.writeStringArray(ids.toArray(new String[ids.size()])); } /** @@ -131,9 +112,6 @@ public Set ids() { @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(NAME); - if (types.length > 0) { - builder.array(TYPE_FIELD.getPreferredName(), types); - } builder.startArray(VALUES_FIELD.getPreferredName()); for (String value : ids) { builder.value(value); @@ -147,18 +125,13 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep () -> new IdsQueryBuilder()); static { - PARSER.declareStringArray(fromList(String.class, IdsQueryBuilder::types), IdsQueryBuilder.TYPE_FIELD); PARSER.declareStringArray(fromList(String.class, IdsQueryBuilder::addIds), IdsQueryBuilder.VALUES_FIELD); declareStandardFields(PARSER); } public static IdsQueryBuilder fromXContent(XContentParser parser) { try { - IdsQueryBuilder builder = PARSER.apply(parser, null); - if (builder.types().length > 0) { - deprecationLogger.deprecatedAndMaybeLog("ids_query_with_types", TYPES_DEPRECATION_MESSAGE); - } - return builder; + return PARSER.apply(parser, null); } catch (IllegalArgumentException e) { throw new ParsingException(parser.getTokenLocation(), e.getMessage(), e); } @@ -179,33 +152,17 @@ protected Query doToQuery(QueryShardContext context) throws IOException { if (this.ids.isEmpty()) { return Queries.newMatchNoDocsQuery("Missing ids in \"" + this.getName() + "\" query."); } else { - final DocumentMapper mapper = context.getMapperService().documentMapper(); - Collection typesForQuery; - if (types.length == 0) { - typesForQuery = context.queryTypes(); - } else if (types.length == 1 && MetaData.ALL.equals(types[0])) { - typesForQuery = Collections.singleton(mapper.type()); - } else { - typesForQuery = new HashSet<>(Arrays.asList(types)); - } - - if (typesForQuery.contains(mapper.type())) { - return idField.termsQuery(new ArrayList<>(ids), context); - } else { - return new MatchNoDocsQuery("Type mismatch"); - } - + return idField.termsQuery(new ArrayList<>(ids), context); } } @Override protected int doHashCode() { - return Objects.hash(ids, Arrays.hashCode(types)); + return Objects.hash(ids); } @Override protected boolean doEquals(IdsQueryBuilder other) { - return Objects.equals(ids, other.ids) && - Arrays.equals(types, other.types); + return Objects.equals(ids, other.ids); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java index 0c33ee7102346..d65fcc7fc6d35 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java @@ -31,7 +31,6 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.lucene.search.Queries; @@ -57,9 +56,7 @@ import org.elasticsearch.transport.RemoteClusterAware; import java.io.IOException; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -84,19 +81,10 @@ public class QueryShardContext extends QueryRewriteContext { private final BiFunction> indexFieldDataService; private final int shardId; private final IndexReader reader; - private String[] types = Strings.EMPTY_ARRAY; private boolean cacheable = true; private final SetOnce frozen = new SetOnce<>(); private final Index fullyQualifiedIndex; - public void setTypes(String... types) { - this.types = types; - } - - public String[] getTypes() { - return types; - } - private final Map namedQueries = new HashMap<>(); private boolean allowUnmappedFields; private boolean mapUnmappedFieldAsString; @@ -116,7 +104,6 @@ public QueryShardContext(QueryShardContext source) { this(source.shardId, source.indexSettings, source.bitsetFilterCache, source.indexFieldDataService, source.mapperService, source.similarityService, source.scriptService, source.getXContentRegistry(), source.getWriteableRegistry(), source.client, source.reader, source.nowInMillis, source.fullyQualifiedIndex); - this.types = source.getTypes(); } private QueryShardContext(int shardId, IndexSettings indexSettings, BitsetFilterCache bitsetFilterCache, @@ -256,24 +243,12 @@ MappedFieldType failIfFieldMappingNotFound(String name, MappedFieldType fieldMap } } - /** - * Returns the narrowed down explicit types, or, if not set, all types. - */ - public Collection queryTypes() { - String[] types = getTypes(); - if (types == null || types.length == 0 || (types.length == 1 && types[0].equals("_all"))) { - DocumentMapper mapper = getMapperService().documentMapper(); - return mapper == null ? Collections.emptyList() : Collections.singleton(mapper.type()); - } - return Arrays.asList(types); - } - private SearchLookup lookup = null; public SearchLookup lookup() { if (lookup == null) { lookup = new SearchLookup(getMapperService(), - mappedFieldType -> indexFieldDataService.apply(mappedFieldType, fullyQualifiedIndex.getName()), types); + mappedFieldType -> indexFieldDataService.apply(mappedFieldType, fullyQualifiedIndex.getName())); } return lookup; } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java index add8eb6b668ae..67385640beb7b 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java @@ -460,9 +460,6 @@ protected void searchToString(StringBuilder b) { } else { b.append("[all indices]"); } - if (searchRequest.types() != null && searchRequest.types().length != 0) { - b.append(Arrays.toString(searchRequest.types())); - } } @Override diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java b/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java index 454983ba7942a..bbc12fb2c2aea 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java @@ -70,9 +70,8 @@ public ClientScrollableHitSource(Logger logger, BackoffPolicy backoffPolicy, Thr @Override public void doStart(Consumer onResponse) { if (logger.isDebugEnabled()) { - logger.debug("executing initial scroll against {}{}", - isEmpty(firstSearchRequest.indices()) ? "all indices" : firstSearchRequest.indices(), - isEmpty(firstSearchRequest.types()) ? "" : firstSearchRequest.types()); + logger.debug("executing initial scroll against {}", + isEmpty(firstSearchRequest.indices()) ? "all indices" : firstSearchRequest.indices()); } searchWithRetry(listener -> client.search(firstSearchRequest, listener), r -> consume(r, onResponse)); } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java index 227ddd489779c..c1e2f011a99de 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java @@ -87,19 +87,6 @@ public DeleteByQueryRequest setQuery(QueryBuilder query) { return this; } - /** - * Set the document types for the delete - * @deprecated Types are in the process of being removed. Instead of - * using a type, prefer to filter on a field of the document. - */ - @Deprecated - public DeleteByQueryRequest setDocTypes(String... types) { - if (types != null) { - getSearchRequest().types(types); - } - return this; - } - /** * Set routing limiting the process to the shards that match that routing value */ @@ -140,21 +127,6 @@ public String getRouting() { return getSearchRequest().routing(); } - /** - * Gets the document types on which this request would be executed. Returns an empty array if all - * types are to be processed. - * @deprecated Types are in the process of being removed. Instead of - * using a type, prefer to filter on a field of the document. - */ - @Deprecated - public String[] getDocTypes() { - if (getSearchRequest().types() != null) { - return getSearchRequest().types(); - } else { - return new String[0]; - } - } - @Override protected DeleteByQueryRequest self() { return this; @@ -208,29 +180,6 @@ public IndicesOptions indicesOptions() { return getSearchRequest().indicesOptions(); } - /** - * Gets the document types on which this request would be executed. - * @deprecated Types are in the process of being removed. Instead of - * using a type, prefer to filter on a field of the document. - */ - @Deprecated - public String[] types() { - assert getSearchRequest() != null; - return getSearchRequest().types(); - } - - /** - * Set the document types for the delete - * @deprecated Types are in the process of being removed. Instead of - * using a type, prefer to filter on a field of the document. - */ - @Deprecated - public DeleteByQueryRequest types(String... types) { - assert getSearchRequest() != null; - getSearchRequest().types(types); - return this; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java index de171e88fbca1..79445a6a8740d 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java @@ -138,16 +138,6 @@ public ReindexRequest setSourceIndices(String... sourceIndices) { return this; } - /** - * Set the document types which need to be copied from the source indices - */ - public ReindexRequest setSourceDocTypes(String... docTypes) { - if (docTypes != null) { - this.getSearchRequest().types(docTypes); - } - return this; - } - /** * Sets the scroll size for setting how many documents are to be processed in one batch during reindex */ @@ -295,10 +285,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.rawField("query", remoteInfo.getQuery().streamInput(), builder.contentType()); } builder.array("index", getSearchRequest().indices()); - String[] types = getSearchRequest().types(); - if (types.length > 0) { - builder.array("type", types); - } getSearchRequest().source().innerToXContent(builder, params); builder.endObject(); } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java index 03922ccc79bd4..138af524bf9c9 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java @@ -83,19 +83,6 @@ public UpdateByQueryRequest setQuery(QueryBuilder query) { return this; } - /** - * Set the document types for the update - * @deprecated Types are in the process of being removed. Instead of - * using a type, prefer to filter on a field of the document. - */ - @Deprecated - public UpdateByQueryRequest setDocTypes(String... types) { - if (types != null) { - getSearchRequest().types(types); - } - return this; - } - /** * Set routing limiting the process to the shards that match that routing value */ @@ -136,21 +123,6 @@ public String getRouting() { return getSearchRequest().routing(); } - /** - * Gets the document types on which this request would be executed. Returns an empty array if all - * types are to be processed. - * @deprecated Types are in the process of being removed. Instead of - * using a type, prefer to filter on a field of the document. - */ - @Deprecated - public String[] getDocTypes() { - if (getSearchRequest().types() != null) { - return getSearchRequest().types(); - } else { - return new String[0]; - } - } - /** * Ingest pipeline to set on index requests made by this action. */ diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java index ecdd34ca07c88..df744978ab198 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java @@ -19,13 +19,11 @@ package org.elasticsearch.rest.action.search; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -46,10 +44,6 @@ import static org.elasticsearch.search.internal.SearchContext.DEFAULT_TERMINATE_AFTER; public class RestCountAction extends BaseRestHandler { - private static final DeprecationLogger deprecationLogger = new DeprecationLogger( - LogManager.getLogger(RestCountAction.class)); - static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + - " Specifying types in count requests is deprecated."; public RestCountAction(Settings settings, RestController controller) { super(settings); @@ -57,10 +51,6 @@ public RestCountAction(Settings settings, RestController controller) { controller.registerHandler(GET, "/_count", this); controller.registerHandler(POST, "/{index}/_count", this); controller.registerHandler(GET, "/{index}/_count", this); - - // Deprecated typed endpoints. - controller.registerHandler(POST, "/{index}/{type}/_count", this); - controller.registerHandler(GET, "/{index}/{type}/_count", this); } @Override @@ -90,11 +80,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC searchSourceBuilder.minScore(minScore); } - if (request.hasParam("type")) { - deprecationLogger.deprecatedAndMaybeLog("count_with_types", TYPES_DEPRECATION_MESSAGE); - countRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); - } - countRequest.preference(request.param("preference")); final int terminateAfter = request.paramAsInt("terminate_after", DEFAULT_TERMINATE_AFTER); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index d9beba089857f..1aef4aa5254a4 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -134,7 +134,7 @@ public static void parseMultiLineRequest(RestRequest request, IndicesOptions ind final Tuple sourceTuple = request.contentOrSourceParam(); final XContent xContent = sourceTuple.v1().xContent(); final BytesReference data = sourceTuple.v2(); - MultiSearchRequest.readMultiLineFormat(data, xContent, consumer, indices, indicesOptions, Strings.EMPTY_ARRAY, routing, + MultiSearchRequest.readMultiLineFormat(data, xContent, consumer, indices, indicesOptions, routing, searchType, ccsMinimizeRoundtrips, request.getXContentRegistry(), allowExplicitIndex); } diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index f0eaa0d51dadf..1c6c2cbfa0c33 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -43,7 +43,6 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ObjectMapper; -import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryBuilder; @@ -76,7 +75,6 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -181,7 +179,6 @@ final class DefaultSearchContext extends SearchContext { this.minNodeVersion = minNodeVersion; queryShardContext = indexService.newQueryShardContext(request.shardId().id(), searcher.getIndexReader(), request::nowInMillis, shardTarget.getClusterAlias()); - queryShardContext.setTypes(request.types()); queryBoost = request.indexBoost(); } @@ -269,13 +266,8 @@ public void preProcess(boolean rewrite) { @Override public Query buildFilteredQuery(Query query) { List filters = new ArrayList<>(); - Query typeFilter = createTypeFilter(queryShardContext.getTypes()); - if (typeFilter != null) { - filters.add(typeFilter); - } if (mapperService().hasNested() - && typeFilter == null // when a _type filter is set, it will automatically exclude nested docs && new NestedHelper(mapperService()).mightMatchNestedDocs(query) && (aliasFilter == null || new NestedHelper(mapperService()).mightMatchNestedDocs(aliasFilter))) { filters.add(Queries.newNonNestedFilter()); @@ -301,17 +293,6 @@ && new NestedHelper(mapperService()).mightMatchNestedDocs(query) } } - private Query createTypeFilter(String[] types) { - if (types != null && types.length >= 1) { - MappedFieldType ft = mapperService().fullName(TypeFieldMapper.NAME); - if (ft != null) { - // ft might be null if no documents have been indexed yet - return ft.termsQuery(Arrays.asList(types), queryShardContext); - } - } - return null; - } - @Override public long id() { return this.id; diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index 12eef5dcf29de..70c2aa6e5ac6d 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.internal; +import org.elasticsearch.Version; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.Nullable; @@ -35,6 +36,7 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import java.io.IOException; +import java.util.Arrays; /** * Shard level search request that gets created and consumed on the local node. @@ -61,7 +63,6 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { private final int numberOfShards; private final SearchType searchType; private final Scroll scroll; - private final String[] types; private final float indexBoost; private final Boolean requestCache; private final long nowInMillis; @@ -74,7 +75,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { public ShardSearchLocalRequest(SearchRequest searchRequest, ShardId shardId, int numberOfShards, AliasFilter aliasFilter, float indexBoost, long nowInMillis, @Nullable String clusterAlias, String[] indexRoutings) { - this(shardId, numberOfShards, searchRequest.searchType(), searchRequest.source(), searchRequest.types(), + this(shardId, numberOfShards, searchRequest.searchType(), searchRequest.source(), searchRequest.requestCache(), aliasFilter, indexBoost, searchRequest.allowPartialSearchResults(), indexRoutings, searchRequest.preference(), searchRequest.scroll(), nowInMillis, clusterAlias); // If allowPartialSearchResults is unset (ie null), the cluster-level default should have been substituted @@ -82,11 +83,11 @@ public ShardSearchLocalRequest(SearchRequest searchRequest, ShardId shardId, int assert searchRequest.allowPartialSearchResults() != null; } - public ShardSearchLocalRequest(ShardId shardId, String[] types, long nowInMillis, AliasFilter aliasFilter) { - this(shardId, -1, null, null, types, null, aliasFilter, 1.0f, false, Strings.EMPTY_ARRAY, null, null, nowInMillis, null); + public ShardSearchLocalRequest(ShardId shardId, long nowInMillis, AliasFilter aliasFilter) { + this(shardId, -1, null, null, null, aliasFilter, 1.0f, false, Strings.EMPTY_ARRAY, null, null, nowInMillis, null); } - private ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types, + private ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, Boolean requestCache, AliasFilter aliasFilter, float indexBoost, boolean allowPartialSearchResults, String[] indexRoutings, String preference, Scroll scroll, long nowInMillis, @Nullable String clusterAlias) { @@ -94,7 +95,6 @@ private ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType this.numberOfShards = numberOfShards; this.searchType = searchType; this.source = source; - this.types = types; this.requestCache = requestCache; this.aliasFilter = aliasFilter; this.indexBoost = indexBoost; @@ -112,7 +112,14 @@ private ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType numberOfShards = in.readVInt(); scroll = in.readOptionalWriteable(Scroll::new); source = in.readOptionalWriteable(SearchSourceBuilder::new); - types = in.readStringArray(); + if (in.getVersion().before(Version.V_8_0_0)) { + // types no longer relevant so ignore + String[] types = in.readStringArray(); + if (types.length > 0) { + throw new IllegalStateException( + "types are no longer supported in search requests but found [" + Arrays.toString(types) + "]"); + } + } aliasFilter = new AliasFilter(in); indexBoost = in.readFloat(); nowInMillis = in.readVLong(); @@ -131,7 +138,10 @@ protected final void innerWriteTo(StreamOutput out, boolean asKey) throws IOExce } out.writeOptionalWriteable(scroll); out.writeOptionalWriteable(source); - out.writeStringArray(types); + if (out.getVersion().before(Version.V_8_0_0)) { + // types not supported so send an empty array to previous versions + out.writeStringArray(Strings.EMPTY_ARRAY); + } aliasFilter.writeTo(out); out.writeFloat(indexBoost); if (asKey == false) { @@ -151,11 +161,6 @@ public ShardId shardId() { return shardId; } - @Override - public String[] types() { - return types; - } - @Override public SearchSourceBuilder source() { return source; diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index b88bda9009043..0e9d5de9788f3 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -49,8 +49,6 @@ public interface ShardSearchRequest { ShardId shardId(); - String[] types(); - SearchSourceBuilder source(); AliasFilter getAliasFilter(); diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java index 9aae2df27779f..07557d9459973 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java @@ -93,11 +93,6 @@ public ShardId shardId() { return shardSearchLocalRequest.shardId(); } - @Override - public String[] types() { - return shardSearchLocalRequest.types(); - } - @Override public SearchSourceBuilder source() { return shardSearchLocalRequest.source(); diff --git a/server/src/main/java/org/elasticsearch/search/lookup/DocLookup.java b/server/src/main/java/org/elasticsearch/search/lookup/DocLookup.java index 9cfe121fe7ebd..0022cdfdc9d99 100644 --- a/server/src/main/java/org/elasticsearch/search/lookup/DocLookup.java +++ b/server/src/main/java/org/elasticsearch/search/lookup/DocLookup.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.lookup; import org.apache.lucene.index.LeafReaderContext; -import org.elasticsearch.common.Nullable; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; @@ -31,13 +30,9 @@ public class DocLookup { private final MapperService mapperService; private final Function> fieldDataLookup; - @Nullable - private final String[] types; - - DocLookup(MapperService mapperService, Function> fieldDataLookup, @Nullable String[] types) { + DocLookup(MapperService mapperService, Function> fieldDataLookup) { this.mapperService = mapperService; this.fieldDataLookup = fieldDataLookup; - this.types = types; } public MapperService mapperService() { @@ -49,10 +44,6 @@ public IndexFieldData getForField(MappedFieldType fieldType) { } public LeafDocLookup getLeafDocLookup(LeafReaderContext context) { - return new LeafDocLookup(mapperService, fieldDataLookup, types, context); - } - - public String[] getTypes() { - return types; + return new LeafDocLookup(mapperService, fieldDataLookup, context); } } diff --git a/server/src/main/java/org/elasticsearch/search/lookup/FieldsLookup.java b/server/src/main/java/org/elasticsearch/search/lookup/FieldsLookup.java index feefb1fcb30e4..c089501e38596 100644 --- a/server/src/main/java/org/elasticsearch/search/lookup/FieldsLookup.java +++ b/server/src/main/java/org/elasticsearch/search/lookup/FieldsLookup.java @@ -19,22 +19,18 @@ package org.elasticsearch.search.lookup; import org.apache.lucene.index.LeafReaderContext; -import org.elasticsearch.common.Nullable; import org.elasticsearch.index.mapper.MapperService; public class FieldsLookup { private final MapperService mapperService; - @Nullable - private final String[] types; - FieldsLookup(MapperService mapperService, @Nullable String[] types) { + FieldsLookup(MapperService mapperService) { this.mapperService = mapperService; - this.types = types; } public LeafFieldsLookup getLeafFieldsLookup(LeafReaderContext context) { - return new LeafFieldsLookup(mapperService, types, context.reader()); + return new LeafFieldsLookup(mapperService, context.reader()); } } diff --git a/server/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java b/server/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java index 04522834579e4..4fd6c78e47513 100644 --- a/server/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java +++ b/server/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; @@ -31,7 +30,6 @@ import java.io.IOException; import java.security.AccessController; import java.security.PrivilegedAction; -import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.Map; @@ -51,18 +49,14 @@ public class LeafDocLookup implements Map> { private final MapperService mapperService; private final Function> fieldDataLookup; - @Nullable - private final String[] types; - private final LeafReaderContext reader; private int docId = -1; - LeafDocLookup(MapperService mapperService, Function> fieldDataLookup, @Nullable String[] types, + LeafDocLookup(MapperService mapperService, Function> fieldDataLookup, LeafReaderContext reader) { this.mapperService = mapperService; this.fieldDataLookup = fieldDataLookup; - this.types = types; this.reader = reader; } @@ -90,8 +84,7 @@ public ScriptDocValues get(Object key) { if (scriptValues == null) { final MappedFieldType fieldType = mapperService.fullName(fieldName); if (fieldType == null) { - throw new IllegalArgumentException("No field found for [" + fieldName + "] in mapping with types " + - Arrays.toString(types)); + throw new IllegalArgumentException("No field found for [" + fieldName + "] in mapping"); } // load fielddata on behalf of the script: otherwise it would need additional permissions // to deal with pagedbytes/ramusagestimator/etc diff --git a/server/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java b/server/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java index d98a8585ecf6a..f614ce400ef36 100644 --- a/server/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java +++ b/server/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java @@ -20,7 +20,6 @@ import org.apache.lucene.index.LeafReader; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.Nullable; import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -29,7 +28,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.List; @@ -42,18 +40,14 @@ public class LeafFieldsLookup implements Map { private final MapperService mapperService; - @Nullable - private final String[] types; - private final LeafReader reader; private int docId = -1; private final Map cachedFieldData = new HashMap<>(); - LeafFieldsLookup(MapperService mapperService, @Nullable String[] types, LeafReader reader) { + LeafFieldsLookup(MapperService mapperService, LeafReader reader) { this.mapperService = mapperService; - this.types = types; this.reader = reader; } @@ -136,7 +130,7 @@ private FieldLookup loadFieldData(String name) { if (data == null) { MappedFieldType fieldType = mapperService.fullName(name); if (fieldType == null) { - throw new IllegalArgumentException("No field found for [" + name + "] in mapping with types " + Arrays.toString(types)); + throw new IllegalArgumentException("No field found for [" + name + "] in mapping"); } data = new FieldLookup(fieldType); cachedFieldData.put(name, data); diff --git a/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java b/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java index 8f4b5143dc6cd..04aef7d2e8f63 100644 --- a/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java +++ b/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.lookup; import org.apache.lucene.index.LeafReaderContext; -import org.elasticsearch.common.Nullable; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; @@ -35,11 +34,10 @@ public class SearchLookup { final FieldsLookup fieldsLookup; - public SearchLookup(MapperService mapperService, Function> fieldDataLookup, - @Nullable String[] types) { - docMap = new DocLookup(mapperService, fieldDataLookup, types); + public SearchLookup(MapperService mapperService, Function> fieldDataLookup) { + docMap = new DocLookup(mapperService, fieldDataLookup); sourceLookup = new SourceLookup(); - fieldsLookup = new FieldsLookup(mapperService, types); + fieldsLookup = new FieldsLookup(mapperService); } public LeafSearchLookup getLeafSearchLookup(LeafReaderContext context) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index fc758788e6197..40d5b028b4ba8 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -354,12 +354,12 @@ public void testSearchTaskDescriptions() { headers.put("Foo-Header", "bar"); headers.put("Custom-Task-Header", "my_value"); assertSearchResponse( - client().filterWithHeader(headers).prepareSearch("test").setTypes("doc").setQuery(QueryBuilders.matchAllQuery()).get()); + client().filterWithHeader(headers).prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).get()); // the search operation should produce one main task List mainTask = findEvents(SearchAction.NAME, Tuple::v1); assertEquals(1, mainTask.size()); - assertThat(mainTask.get(0).getDescription(), startsWith("indices[test], types[doc], search_type[")); + assertThat(mainTask.get(0).getDescription(), startsWith("indices[test], search_type[")); assertThat(mainTask.get(0).getDescription(), containsString("\"query\":{\"match_all\"")); assertTaskHeaders(mainTask.get(0)); @@ -747,13 +747,12 @@ public void testTaskStoringSuccesfulResult() throws Exception { assertNoFailures(client().admin().indices().prepareRefresh(TaskResultsService.TASK_INDEX).get()); SearchResponse searchResponse = client().prepareSearch(TaskResultsService.TASK_INDEX) - .setTypes(TaskResultsService.TASK_TYPE) .setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.action", taskInfo.getAction()))) .get(); assertEquals(1L, searchResponse.getHits().getTotalHits().value); - searchResponse = client().prepareSearch(TaskResultsService.TASK_INDEX).setTypes(TaskResultsService.TASK_TYPE) + searchResponse = client().prepareSearch(TaskResultsService.TASK_INDEX) .setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.node", taskInfo.getTaskId().getNodeId()))) .get(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java index e7285ff6f97ed..41e3e8d933fe2 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java @@ -146,7 +146,6 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) SearchResponse results = client() .prepareSearch(INDEX_NAME) - .setTypes(TYPE_NAME) .setQuery(QueryBuilders.matchAllQuery()) .setSize(0) .get(); diff --git a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java index d9de69a1c6c62..67e3769d445dc 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java @@ -87,7 +87,6 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL assertThat(groupBuilder.must(), Matchers.contains(QueryBuilders.termQuery("foo", "bar"))); } assertArrayEquals(mockSearchPhaseContext.getRequest().indices(), searchRequest.indices()); - assertArrayEquals(mockSearchPhaseContext.getRequest().types(), searchRequest.types()); List mSearchResponses = new ArrayList<>(numInnerHits); diff --git a/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java index afe957e2bf3ee..35f60546bb023 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java @@ -61,14 +61,10 @@ public void testSimpleAdd() throws Exception { equalTo("test")); assertThat(request.requests().get(0).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true, SearchRequest.DEFAULT_INDICES_OPTIONS))); - assertThat(request.requests().get(0).types().length, - equalTo(0)); assertThat(request.requests().get(1).indices()[0], equalTo("test")); assertThat(request.requests().get(1).indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, true, SearchRequest.DEFAULT_INDICES_OPTIONS))); - assertThat(request.requests().get(1).types()[0], - equalTo("type1")); assertThat(request.requests().get(2).indices()[0], equalTo("test")); assertThat(request.requests().get(2).indicesOptions(), @@ -83,12 +79,9 @@ public void testSimpleAdd() throws Exception { equalTo(IndicesOptions.fromOptions(true, false, false, true, SearchRequest.DEFAULT_INDICES_OPTIONS))); assertThat(request.requests().get(5).indices(), is(Strings.EMPTY_ARRAY)); - assertThat(request.requests().get(5).types().length, equalTo(0)); assertThat(request.requests().get(6).indices(), is(Strings.EMPTY_ARRAY)); - assertThat(request.requests().get(6).types().length, equalTo(0)); assertThat(request.requests().get(6).searchType(), equalTo(SearchType.DFS_QUERY_THEN_FETCH)); assertThat(request.requests().get(7).indices(), is(Strings.EMPTY_ARRAY)); - assertThat(request.requests().get(7).types().length, equalTo(0)); } public void testFailWithUnknownKey() { @@ -111,7 +104,6 @@ public void testSimpleAddWithCarriageReturn() throws Exception { assertThat(request.requests().get(0).indices()[0], equalTo("test")); assertThat(request.requests().get(0).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true, SearchRequest.DEFAULT_INDICES_OPTIONS))); - assertThat(request.requests().get(0).types().length, equalTo(0)); } public void testDefaultIndicesOptions() throws IOException { @@ -126,23 +118,17 @@ public void testDefaultIndicesOptions() throws IOException { assertThat(request.requests().get(0).indices()[0], equalTo("test")); assertThat(request.requests().get(0).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true, SearchRequest.DEFAULT_INDICES_OPTIONS))); - assertThat(request.requests().get(0).types().length, equalTo(0)); } public void testSimpleAdd2() throws Exception { MultiSearchRequest request = parseMultiSearchRequest("/org/elasticsearch/action/search/simple-msearch2.json"); assertThat(request.requests().size(), equalTo(5)); assertThat(request.requests().get(0).indices()[0], equalTo("test")); - assertThat(request.requests().get(0).types().length, equalTo(0)); assertThat(request.requests().get(1).indices()[0], equalTo("test")); - assertThat(request.requests().get(1).types()[0], equalTo("type1")); assertThat(request.requests().get(2).indices(), is(Strings.EMPTY_ARRAY)); - assertThat(request.requests().get(2).types().length, equalTo(0)); assertThat(request.requests().get(3).indices(), is(Strings.EMPTY_ARRAY)); - assertThat(request.requests().get(3).types().length, equalTo(0)); assertThat(request.requests().get(3).searchType(), equalTo(SearchType.DFS_QUERY_THEN_FETCH)); assertThat(request.requests().get(4).indices(), is(Strings.EMPTY_ARRAY)); - assertThat(request.requests().get(4).types().length, equalTo(0)); } public void testSimpleAdd3() throws Exception { @@ -152,13 +138,9 @@ public void testSimpleAdd3() throws Exception { assertThat(request.requests().get(0).indices()[1], equalTo("test1")); assertThat(request.requests().get(1).indices()[0], equalTo("test2")); assertThat(request.requests().get(1).indices()[1], equalTo("test3")); - assertThat(request.requests().get(1).types()[0], equalTo("type1")); assertThat(request.requests().get(2).indices()[0], equalTo("test4")); assertThat(request.requests().get(2).indices()[1], equalTo("test1")); - assertThat(request.requests().get(2).types()[0], equalTo("type2")); - assertThat(request.requests().get(2).types()[1], equalTo("type1")); assertThat(request.requests().get(3).indices(), is(Strings.EMPTY_ARRAY)); - assertThat(request.requests().get(3).types().length, equalTo(0)); assertThat(request.requests().get(3).searchType(), equalTo(SearchType.DFS_QUERY_THEN_FETCH)); } @@ -171,13 +153,10 @@ public void testSimpleAdd4() throws Exception { assertThat(request.requests().get(0).preference(), nullValue()); assertThat(request.requests().get(1).indices()[0], equalTo("test2")); assertThat(request.requests().get(1).indices()[1], equalTo("test3")); - assertThat(request.requests().get(1).types()[0], equalTo("type1")); assertThat(request.requests().get(1).requestCache(), nullValue()); assertThat(request.requests().get(1).preference(), equalTo("_local")); assertThat(request.requests().get(2).indices()[0], equalTo("test4")); assertThat(request.requests().get(2).indices()[1], equalTo("test1")); - assertThat(request.requests().get(2).types()[0], equalTo("type2")); - assertThat(request.requests().get(2).types()[1], equalTo("type1")); assertThat(request.requests().get(2).routing(), equalTo("123")); } @@ -272,7 +251,7 @@ public void testMultiLineSerialization() throws IOException { parsedRequest.add(r); }; MultiSearchRequest.readMultiLineFormat(new BytesArray(originalBytes), xContentType.xContent(), - consumer, null, null, null, null, null, null, xContentRegistry(), true); + consumer, null, null, null, null, null, xContentRegistry(), true); assertEquals(originalRequest, parsedRequest); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 06231db26d67e..7ba20d24efd8f 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -88,7 +88,6 @@ public void testIllegalArguments() { SearchRequest searchRequest = new SearchRequest(); assertNotNull(searchRequest.indices()); assertNotNull(searchRequest.indicesOptions()); - assertNotNull(searchRequest.types()); assertNotNull(searchRequest.searchType()); NullPointerException e = expectThrows(NullPointerException.class, () -> searchRequest.indices((String[]) null)); @@ -99,11 +98,6 @@ public void testIllegalArguments() { e = expectThrows(NullPointerException.class, () -> searchRequest.indicesOptions(null)); assertEquals("indicesOptions must not be null", e.getMessage()); - e = expectThrows(NullPointerException.class, () -> searchRequest.types((String[]) null)); - assertEquals("types must not be null", e.getMessage()); - e = expectThrows(NullPointerException.class, () -> searchRequest.types((String) null)); - assertEquals("type must not be null", e.getMessage()); - e = expectThrows(NullPointerException.class, () -> searchRequest.searchType((SearchType)null)); assertEquals("searchType must not be null", e.getMessage()); @@ -187,7 +181,6 @@ private SearchRequest mutate(SearchRequest searchRequest) { mutators.add(() -> mutation.indices(ArrayUtils.concat(searchRequest.indices(), new String[] { randomAlphaOfLength(10) }))); mutators.add(() -> mutation.indicesOptions(randomValueOtherThan(searchRequest.indicesOptions(), () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())))); - mutators.add(() -> mutation.types(ArrayUtils.concat(searchRequest.types(), new String[] { randomAlphaOfLength(10) }))); mutators.add(() -> mutation.preference(randomValueOtherThan(searchRequest.preference(), () -> randomAlphaOfLengthBetween(3, 10)))); mutators.add(() -> mutation.routing(randomValueOtherThan(searchRequest.routing(), () -> randomAlphaOfLengthBetween(3, 10)))); mutators.add(() -> mutation.requestCache((randomValueOtherThan(searchRequest.requestCache(), ESTestCase::randomBoolean)))); diff --git a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java index c054b2008dba6..2f682f437a2cb 100644 --- a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java @@ -63,11 +63,6 @@ public ShardId shardId() { return new ShardId(indexService.index(), 0); } - @Override - public String[] types() { - return new String[0]; - } - @Override public SearchSourceBuilder source() { return searchSourceBuilder; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java index f53610d23aaad..28d712e9854ab 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java @@ -28,9 +28,6 @@ import org.elasticsearch.index.query.QueryShardContext; import org.mockito.Mockito; -import java.util.Collection; -import java.util.Collections; - public class IdFieldTypeTests extends FieldTypeTestCase { @Override protected MappedFieldType createDefaultFieldType() { @@ -58,8 +55,6 @@ public void testTermsQuery() throws Exception { Mockito.when(context.indexVersionCreated()).thenReturn(indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null)); MapperService mapperService = Mockito.mock(MapperService.class); - Collection types = Collections.emptySet(); - Mockito.when(context.queryTypes()).thenReturn(types); Mockito.when(context.getMapperService()).thenReturn(mapperService); MappedFieldType ft = IdFieldMapper.defaultFieldType(mockSettings); @@ -67,8 +62,6 @@ public void testTermsQuery() throws Exception { Query query = ft.termQuery("id", context); assertEquals(new TermInSetQuery("_id", Uid.encodeId("id")), query); - types = Collections.singleton("type"); - Mockito.when(context.queryTypes()).thenReturn(types); query = ft.termQuery("id", context); assertEquals(new TermInSetQuery("_id", Uid.encodeId("id")), query); } diff --git a/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryParserTests.java b/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryParserTests.java index f4e737ea4b024..2be41f9002015 100644 --- a/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryParserTests.java @@ -42,7 +42,7 @@ public void testWhenParsedQueryIsNullNoNullPointerExceptionIsThrown() throws IOE // the named query parses to null; we are testing this does not cause a NullPointerException SearchResponse response = - client().prepareSearch(index).setTypes(type).setQuery(commonTermsQueryBuilder).execute().actionGet(); + client().prepareSearch(index).setQuery(commonTermsQueryBuilder).execute().actionGet(); assertNotNull(response); assertEquals(response.getHits().getHits().length, 0); diff --git a/server/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java index 2aed8202dd698..1083248b850d1 100644 --- a/server/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java @@ -23,15 +23,12 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermInSetQuery; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; -import java.util.Arrays; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.contains; @@ -41,44 +38,19 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase @Override protected IdsQueryBuilder doCreateTestQueryBuilder() { - final String type; - if (randomBoolean()) { - if (frequently()) { - type = "_doc"; - } else { - type = randomAlphaOfLengthBetween(1, 10); - } - } else if (randomBoolean()) { - type = MetaData.ALL; - } else { - type = null; - } int numberOfIds = randomIntBetween(0, 10); String[] ids = new String[numberOfIds]; for (int i = 0; i < numberOfIds; i++) { ids[i] = randomAlphaOfLengthBetween(1, 10); } - IdsQueryBuilder query; - if (type != null && randomBoolean()) { - query = new IdsQueryBuilder().types(type); - query.addIds(ids); - } else { - query = new IdsQueryBuilder(); - query.addIds(ids); - } + IdsQueryBuilder query = new IdsQueryBuilder(); + query.addIds(ids); return query; } @Override protected void doAssertLuceneQuery(IdsQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { - boolean allTypes = queryBuilder.types().length == 0 || - queryBuilder.types().length == 1 && "_all".equals(queryBuilder.types()[0]); - if (queryBuilder.ids().size() == 0 - // no types - || context.getQueryShardContext().fieldMapper(IdFieldMapper.NAME) == null - // there are types, but disjoint from the query - || (allTypes == false && - Arrays.asList(queryBuilder.types()).indexOf(context.mapperService().documentMapper().type()) == -1)) { + if (queryBuilder.ids().size() == 0) { assertThat(query, instanceOf(MatchNoDocsQuery.class)); } else { assertThat(query, instanceOf(TermInSetQuery.class)); @@ -86,11 +58,8 @@ protected void doAssertLuceneQuery(IdsQueryBuilder queryBuilder, Query query, Se } public void testIllegalArguments() { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new IdsQueryBuilder().types((String[]) null)); - assertEquals("[ids] types cannot be null", e.getMessage()); - IdsQueryBuilder idsQueryBuilder = new IdsQueryBuilder(); - e = expectThrows(IllegalArgumentException.class, () -> idsQueryBuilder.addIds((String[]) null)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> idsQueryBuilder.addIds((String[]) null)); assertEquals("[ids] ids cannot be null", e.getMessage()); } @@ -105,7 +74,6 @@ public void testFromJson() throws IOException { String json = "{\n" + " \"ids\" : {\n" + - " \"type\" : [ \"my_type\" ],\n" + " \"values\" : [ \"1\", \"100\", \"4\" ],\n" + " \"boost\" : 1.0\n" + " }\n" + @@ -113,56 +81,23 @@ public void testFromJson() throws IOException { IdsQueryBuilder parsed = (IdsQueryBuilder) parseQuery(json); checkGeneratedJson(json, parsed); assertThat(parsed.ids(), contains("1","100","4")); - assertEquals(json, "my_type", parsed.types()[0]); // check that type that is not an array and also ids that are numbers are parsed json = "{\n" + " \"ids\" : {\n" + - " \"type\" : \"my_type\",\n" + " \"values\" : [ 1, 100, 4 ],\n" + " \"boost\" : 1.0\n" + " }\n" + "}"; parsed = (IdsQueryBuilder) parseQuery(json); assertThat(parsed.ids(), contains("1","100","4")); - assertEquals(json, "my_type", parsed.types()[0]); - - // check with empty type array - json = - "{\n" + - " \"ids\" : {\n" + - " \"type\" : [ ],\n" + - " \"values\" : [ \"1\", \"100\", \"4\" ],\n" + - " \"boost\" : 1.0\n" + - " }\n" + - "}"; - parsed = (IdsQueryBuilder) parseQuery(json); - assertThat(parsed.ids(), contains("1","100","4")); - assertEquals(json, 0, parsed.types().length); - - // check without type - json = - "{\n" + - " \"ids\" : {\n" + - " \"values\" : [ \"1\", \"100\", \"4\" ],\n" + - " \"boost\" : 1.0\n" + - " }\n" + - "}"; - parsed = (IdsQueryBuilder) parseQuery(json); - assertThat(parsed.ids(), contains("1","100","4")); - assertEquals(json, 0, parsed.types().length); } @Override protected QueryBuilder parseQuery(XContentParser parser) throws IOException { QueryBuilder query = super.parseQuery(parser); assertThat(query, instanceOf(IdsQueryBuilder.class)); - - IdsQueryBuilder idsQuery = (IdsQueryBuilder) query; - if (idsQuery.types().length > 0) { - assertWarnings(IdsQueryBuilder.TYPES_DEPRECATION_MESSAGE); - } - return query; + return (IdsQueryBuilder) query; } } diff --git a/server/src/test/java/org/elasticsearch/index/query/RandomQueryBuilder.java b/server/src/test/java/org/elasticsearch/index/query/RandomQueryBuilder.java index 04d2d2c347bbf..1609b6a511d22 100644 --- a/server/src/test/java/org/elasticsearch/index/query/RandomQueryBuilder.java +++ b/server/src/test/java/org/elasticsearch/index/query/RandomQueryBuilder.java @@ -21,7 +21,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomStrings; -import org.elasticsearch.common.Strings; import java.util.Random; @@ -50,7 +49,7 @@ public static QueryBuilder createQuery(Random r) { case 2: // We make sure this query has no types to avoid deprecation warnings in the // tests that use this method. - return new IdsQueryBuilderTests().createTestQueryBuilder().types(Strings.EMPTY_ARRAY); + return new IdsQueryBuilderTests().createTestQueryBuilder(); case 3: return createMultiTermQuery(r); default: diff --git a/server/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryRequestTests.java b/server/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryRequestTests.java index 76c6dc03b5a49..661d9b748dbda 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryRequestTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryRequestTests.java @@ -79,30 +79,6 @@ protected void extraForSliceAssertions(DeleteByQueryRequest original, DeleteByQu // No extra assertions needed } - public void testTypesGetter() { - int numTypes = between(1, 50); - String[] types = new String[numTypes]; - for (int i = 0; i < numTypes; i++) { - types[i] = randomSimpleString(random(), 1, 30); - } - SearchRequest searchRequest = new SearchRequest(); - searchRequest.types(types); - DeleteByQueryRequest request = new DeleteByQueryRequest(searchRequest); - assertArrayEquals(request.types(), types); - } - - public void testTypesSetter() { - int numTypes = between(1, 50); - String[] types = new String[numTypes]; - for (int i = 0; i < numTypes; i++) { - types[i] = randomSimpleString(random(), 1, 30); - } - SearchRequest searchRequest = new SearchRequest(); - DeleteByQueryRequest request = new DeleteByQueryRequest(searchRequest); - request.types(types); - assertArrayEquals(request.types(), types); - } - public void testValidateGivenNoQuery() { SearchRequest searchRequest = new SearchRequest(); DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(searchRequest); diff --git a/server/src/test/java/org/elasticsearch/indexing/IndexActionIT.java b/server/src/test/java/org/elasticsearch/indexing/IndexActionIT.java index 36488addb3737..90814ed934475 100644 --- a/server/src/test/java/org/elasticsearch/indexing/IndexActionIT.java +++ b/server/src/test/java/org/elasticsearch/indexing/IndexActionIT.java @@ -84,7 +84,7 @@ public void testAutoGenerateIdNoDuplicates() throws Exception { } try { logger.debug("running search with a specific type"); - SearchResponse response = client().prepareSearch("test").setTypes("type").get(); + SearchResponse response = client().prepareSearch("test").get(); if (response.getHits().getTotalHits().value != numOfDocs) { final String message = "Count is " + response.getHits().getTotalHits().value + " but " + numOfDocs + " was expected. " + diff --git a/server/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java b/server/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java index e9e9108f5e8f1..310789621e152 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java @@ -287,7 +287,7 @@ public void testOpenCloseWithDocs() throws IOException, ExecutionException, Inte // check the index still contains the records that we indexed client().admin().indices().prepareOpen("test").execute().get(); ensureGreen(); - SearchResponse searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchQuery("test", "init")).get(); + SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchQuery("test", "init")).get(); assertNoFailures(searchResponse); assertHitCount(searchResponse, docs); } diff --git a/server/src/test/java/org/elasticsearch/rest/action/search/RestCountActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/search/RestCountActionTests.java deleted file mode 100644 index c6af3d12e2913..0000000000000 --- a/server/src/test/java/org/elasticsearch/rest/action/search/RestCountActionTests.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.search; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestRequest.Method; -import org.elasticsearch.test.rest.RestActionTestCase; -import org.elasticsearch.test.rest.FakeRestRequest; -import org.junit.Before; - -import java.util.HashMap; -import java.util.Map; - -public class RestCountActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - new RestCountAction(Settings.EMPTY, controller()); - } - - public void testTypeInPath() { - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) - .withMethod(Method.POST) - .withPath("/some_index/some_type/_count") - .build(); - - dispatchRequest(request); - assertWarnings(RestCountAction.TYPES_DEPRECATION_MESSAGE); - } - - public void testTypeParameter() { - Map params = new HashMap<>(); - params.put("type", "some_type"); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) - .withMethod(Method.GET) - .withPath("/some_index/_count") - .withParams(params) - .build(); - - dispatchRequest(request); - assertWarnings(RestCountAction.TYPES_DEPRECATION_MESSAGE); - } -} diff --git a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java index 189929171a5d1..eda60f5cb2ba8 100644 --- a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java @@ -72,7 +72,6 @@ public void testPreProcess() throws Exception { when(shardSearchRequest.searchType()).thenReturn(SearchType.DEFAULT); ShardId shardId = new ShardId("index", UUID.randomUUID().toString(), 1); when(shardSearchRequest.shardId()).thenReturn(shardId); - when(shardSearchRequest.types()).thenReturn(new String[]{}); IndexShard indexShard = mock(IndexShard.class); QueryCachingPolicy queryCachingPolicy = mock(QueryCachingPolicy.class); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java index 8cbdcc41a9840..7272c55194d79 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java @@ -83,7 +83,7 @@ public void setupSuiteScopeCluster() throws Exception { } public void testSingleValueField() throws Exception { - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values()))) @@ -117,7 +117,7 @@ public void testSingleValueField() throws Exception { } public void testMultiValueField() throws Exception { - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .addAggregation(terms("terms") .field(MULTI_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values()))) @@ -151,7 +151,7 @@ public void testMultiValueField() throws Exception { } public void testUnmapped() throws Exception { - SearchResponse response = client().prepareSearch("idx_unmapped").setTypes("type") + SearchResponse response = client().prepareSearch("idx_unmapped") .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .size(between(1, 5)) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index ad2939347edb1..fcf8d9a763b49 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -1549,7 +1549,6 @@ private void assertMultiSortResponse(int[] expectedDays, BucketOrder... order) { ZonedDateTime[] expectedKeys = Arrays.stream(expectedDays).mapToObj(d -> date(1, d)).toArray(ZonedDateTime[]::new); SearchResponse response = client() .prepareSearch("sort_idx") - .setTypes("type") .addAggregation( dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY).order(BucketOrder.compound(order)) .subAggregation(avg("avg_l").field("l")).subAggregation(sum("sum_d").field("d"))).get(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java index 4ae523a168219..c65a1d327a93e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.sampler.Sampler; import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregator; @@ -30,7 +31,6 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.Max; -import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.Collection; @@ -104,7 +104,7 @@ public void testIssue10719() throws Exception { // Tests that we can refer to nested elements under a sample in a path // statement boolean asc = randomBoolean(); - SearchResponse response = client().prepareSearch("test").setTypes("book").setSearchType(SearchType.QUERY_THEN_FETCH) + SearchResponse response = client().prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) .addAggregation(terms("genres") .field("genre") .order(BucketOrder.aggregation("sample>max_price.value", asc)) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java index 23842a3f9df55..ad55c2c6866e4 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java @@ -35,9 +35,9 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; import org.elasticsearch.search.aggregations.metrics.Avg; +import org.elasticsearch.search.aggregations.metrics.ExtendedStats; import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.search.aggregations.metrics.Stats; -import org.elasticsearch.search.aggregations.metrics.ExtendedStats; import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.test.ESIntegTestCase; @@ -885,7 +885,6 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound private void assertMultiSortResponse(double[] expectedKeys, BucketOrder... order) { SearchResponse response = client() .prepareSearch("sort_idx") - .setTypes("multi_sort_type") .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) .order(BucketOrder.compound(order)).subAggregation(avg("avg_l").field("l")) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java index bee32d571b69f..94a89067285ba 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket; import com.carrotsearch.hppc.LongHashSet; + import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; @@ -30,6 +31,7 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationExecutionException; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; @@ -38,7 +40,6 @@ import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.search.aggregations.metrics.Stats; import org.elasticsearch.search.aggregations.metrics.Sum; -import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; @@ -1178,7 +1179,6 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound private void assertMultiSortResponse(long[] expectedKeys, BucketOrder... order) { SearchResponse response = client() .prepareSearch("sort_idx") - .setTypes("type") .addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).order(BucketOrder.compound(order)) .subAggregation(avg("avg_l").field("l")).subAggregation(sum("sum_d").field("d"))).get(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java index 6b704a6711ad9..bacb67605bc53 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java @@ -35,9 +35,9 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; import org.elasticsearch.search.aggregations.metrics.Avg; +import org.elasticsearch.search.aggregations.metrics.ExtendedStats; import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.search.aggregations.metrics.Stats; -import org.elasticsearch.search.aggregations.metrics.ExtendedStats; import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.test.ESIntegTestCase; @@ -858,7 +858,7 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound } private void assertMultiSortResponse(long[] expectedKeys, BucketOrder... order) { - SearchResponse response = client().prepareSearch("sort_idx").setTypes("multi_sort_type") + SearchResponse response = client().prepareSearch("sort_idx") .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java index b09277aca6c6d..aa762c3a94392 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java @@ -22,6 +22,7 @@ import com.carrotsearch.hppc.LongHashSet; import com.carrotsearch.hppc.LongSet; import com.carrotsearch.randomizedtesting.generators.RandomStrings; + import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -308,7 +309,7 @@ private void testMinDocCountOnTerms(String field, Script script, BucketOrder ord private void testMinDocCountOnTerms(String field, Script script, BucketOrder order, String include, boolean retry) throws Exception { // all terms - final SearchResponse allTermsResponse = client().prepareSearch("idx").setTypes("type") + final SearchResponse allTermsResponse = client().prepareSearch("idx") .setSize(0) .setQuery(QUERY) .addAggregation(script.apply(terms("terms"), field) @@ -325,7 +326,7 @@ private void testMinDocCountOnTerms(String field, Script script, BucketOrder ord for (long minDocCount = 0; minDocCount < 20; ++minDocCount) { final int size = randomIntBetween(1, cardinality + 2); - final SearchRequest request = client().prepareSearch("idx").setTypes("type") + final SearchRequest request = client().prepareSearch("idx") .setSize(0) .setQuery(QUERY) .addAggregation(script.apply(terms("terms"), field) @@ -376,7 +377,7 @@ public void testDateHistogramKeyDesc() throws Exception { private void testMinDocCountOnHistogram(BucketOrder order) throws Exception { final int interval = randomIntBetween(1, 3); - final SearchResponse allResponse = client().prepareSearch("idx").setTypes("type") + final SearchResponse allResponse = client().prepareSearch("idx") .setSize(0) .setQuery(QUERY) .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(0)) @@ -385,7 +386,7 @@ private void testMinDocCountOnHistogram(BucketOrder order) throws Exception { final Histogram allHisto = allResponse.getAggregations().get("histo"); for (long minDocCount = 0; minDocCount < 50; ++minDocCount) { - final SearchResponse response = client().prepareSearch("idx").setTypes("type") + final SearchResponse response = client().prepareSearch("idx") .setSize(0) .setQuery(QUERY) .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(minDocCount)) @@ -395,7 +396,7 @@ private void testMinDocCountOnHistogram(BucketOrder order) throws Exception { } private void testMinDocCountOnDateHistogram(BucketOrder order) throws Exception { - final SearchResponse allResponse = client().prepareSearch("idx").setTypes("type") + final SearchResponse allResponse = client().prepareSearch("idx") .setSize(0) .setQuery(QUERY) .addAggregation( @@ -409,7 +410,7 @@ private void testMinDocCountOnDateHistogram(BucketOrder order) throws Exception final Histogram allHisto = allResponse.getAggregations().get("histo"); for (long minDocCount = 0; minDocCount < 50; ++minDocCount) { - final SearchResponse response = client().prepareSearch("idx").setTypes("type") + final SearchResponse response = client().prepareSearch("idx") .setSize(0) .setQuery(QUERY) .addAggregation( diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java index 14fa6a9f565ef..fc76cf9f1d39c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java @@ -414,7 +414,7 @@ public void testParentFilterResolvedCorrectly() throws Exception { "{\"tid\" : 22, \"name\": \"DataChannels\"}], \"identifier\": \"29101\"}]}", XContentType.JSON)); indexRandom(true, indexRequests); - SearchResponse response = client().prepareSearch("idx2").setTypes("provider") + SearchResponse response = client().prepareSearch("idx2") .addAggregation( terms("startDate").field("dates.month.start").subAggregation( terms("endDate").field("dates.month.end").subAggregation( @@ -499,7 +499,7 @@ public void testNestedSameDocIdProcessedMultipleTime() throws Exception { .endObject()).get(); refresh(); - SearchResponse response = client().prepareSearch("idx4").setTypes("product") + SearchResponse response = client().prepareSearch("idx4") .addAggregation(terms("category").field("categories").subAggregation( nested("property", "property").subAggregation( terms("property_id").field("property.id") diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java index 42192bbebf209..852f929ffe53a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java @@ -23,13 +23,13 @@ import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.sampler.Sampler; -import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregator; import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregator; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; import org.elasticsearch.search.aggregations.metrics.Max; -import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.List; @@ -102,7 +102,7 @@ public void testIssue10719() throws Exception { // Tests that we can refer to nested elements under a sample in a path // statement boolean asc = randomBoolean(); - SearchResponse response = client().prepareSearch("test").setTypes("book").setSearchType(SearchType.QUERY_THEN_FETCH) + SearchResponse response = client().prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) .addAggregation(terms("genres") .field("genre") .order(BucketOrder.aggregation("sample>max_price.value", asc)) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java index 2953324c58cfa..0b16714788eef 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java @@ -20,8 +20,8 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; -import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; import java.util.HashMap; import java.util.List; @@ -37,7 +37,7 @@ public void testNoShardSizeString() throws Exception { indexData(); - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) @@ -60,7 +60,7 @@ public void testShardSizeEqualsSizeString() throws Exception { indexData(); - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3).shardSize(3) .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) @@ -84,7 +84,7 @@ public void testWithShardSizeString() throws Exception { indexData(); - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) @@ -108,7 +108,7 @@ public void testWithShardSizeStringSingleShard() throws Exception { indexData(); - SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting(routing1) + SearchResponse response = client().prepareSearch("idx").setRouting(routing1) .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) @@ -131,7 +131,7 @@ public void testNoShardSizeTermOrderString() throws Exception { indexData(); - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true))) @@ -154,7 +154,7 @@ public void testNoShardSizeLong() throws Exception { indexData(); - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) @@ -177,7 +177,7 @@ public void testShardSizeEqualsSizeLong() throws Exception { indexData(); - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3).shardSize(3) .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) @@ -200,7 +200,7 @@ public void testWithShardSizeLong() throws Exception { indexData(); - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) @@ -224,7 +224,7 @@ public void testWithShardSizeLongSingleShard() throws Exception { indexData(); - SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting(routing1) + SearchResponse response = client().prepareSearch("idx").setRouting(routing1) .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) @@ -247,7 +247,7 @@ public void testNoShardSizeTermOrderLong() throws Exception { indexData(); - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true))) @@ -270,7 +270,7 @@ public void testNoShardSizeDouble() throws Exception { indexData(); - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) @@ -293,7 +293,7 @@ public void testShardSizeEqualsSizeDouble() throws Exception { indexData(); - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3).shardSize(3) .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) @@ -316,7 +316,7 @@ public void testWithShardSizeDouble() throws Exception { indexData(); - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) @@ -339,7 +339,7 @@ public void testWithShardSizeDoubleSingleShard() throws Exception { indexData(); - SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting(routing1) + SearchResponse response = client().prepareSearch("idx").setRouting(routing1) .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) @@ -362,7 +362,7 @@ public void testNoShardSizeTermOrderDouble() throws Exception { indexData(); - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true))) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTestCase.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTestCase.java index 4e4bfce5ecbb2..6112163bb1faa 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTestCase.java @@ -90,11 +90,11 @@ protected void indexData() throws Exception { indexRandom(true, docs); - SearchResponse resp = client().prepareSearch("idx").setTypes("type").setRouting(routing1).setQuery(matchAllQuery()).get(); + SearchResponse resp = client().prepareSearch("idx").setRouting(routing1).setQuery(matchAllQuery()).get(); assertSearchResponse(resp); long totalOnOne = resp.getHits().getTotalHits().value; assertThat(totalOnOne, is(15L)); - resp = client().prepareSearch("idx").setTypes("type").setRouting(routing2).setQuery(matchAllQuery()).get(); + resp = client().prepareSearch("idx").setRouting(routing2).setQuery(matchAllQuery()).get(); assertSearchResponse(resp); long totalOnTwo = resp.getHits().getTotalHits().value; assertThat(totalOnTwo, is(12L)); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index 81ea71621ae3e..dd7ecc8c1f1ed 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -106,7 +106,7 @@ public void testPlugin() throws Exception { if ("text".equals(type) && randomBoolean()) { // Use significant_text on text fields but occasionally run with alternative of // significant_terms on legacy fieldData=true too. - request = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE) + request = client().prepareSearch(INDEX_NAME) .addAggregation( terms("class") .field(CLASS_FIELD) @@ -117,7 +117,7 @@ public void testPlugin() throws Exception { ); }else { - request = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE) + request = client().prepareSearch(INDEX_NAME) .addAggregation( terms("class") .field(CLASS_FIELD) @@ -271,11 +271,11 @@ public void testXContentResponse() throws Exception { if ("text".equals(type) && randomBoolean() ) { // Use significant_text on text fields but occasionally run with alternative of // significant_terms on legacy fieldData=true too. - request = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE) + request = client().prepareSearch(INDEX_NAME) .addAggregation(terms("class").field(CLASS_FIELD) .subAggregation(significantText("sig_terms", TEXT_FIELD))); } else { - request = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE) + request = client().prepareSearch(INDEX_NAME) .addAggregation(terms("class").field(CLASS_FIELD) .subAggregation(significantTerms("sig_terms").field(TEXT_FIELD))); } @@ -370,7 +370,7 @@ public void testDeletesIssue7951() throws Exception { SearchRequestBuilder request; if (randomBoolean() ) { - request = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE) + request = client().prepareSearch(INDEX_NAME) .addAggregation( terms("class") .field(CLASS_FIELD) @@ -380,7 +380,7 @@ public void testDeletesIssue7951() throws Exception { .minDocCount(1))); }else { - request = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE) + request = client().prepareSearch(INDEX_NAME) .addAggregation( terms("class") .field(CLASS_FIELD) @@ -413,7 +413,7 @@ public void testBackgroundVsSeparateSet(SignificanceHeuristic significanceHeuris final boolean useSigText = randomBoolean() && type.equals("text"); SearchRequestBuilder request1; if (useSigText) { - request1 = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE) + request1 = client().prepareSearch(INDEX_NAME) .addAggregation(terms("class") .field(CLASS_FIELD) .subAggregation( @@ -423,7 +423,7 @@ public void testBackgroundVsSeparateSet(SignificanceHeuristic significanceHeuris significanceHeuristicExpectingSuperset))); }else { - request1 = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE) + request1 = client().prepareSearch(INDEX_NAME) .addAggregation(terms("class") .field(CLASS_FIELD) .subAggregation( @@ -439,7 +439,7 @@ public void testBackgroundVsSeparateSet(SignificanceHeuristic significanceHeuris SearchRequestBuilder request2; if (useSigText) { - request2 = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE) + request2 = client().prepareSearch(INDEX_NAME) .addAggregation(filter("0", QueryBuilders.termQuery(CLASS_FIELD, "0")) .subAggregation(significantText("sig_terms", TEXT_FIELD) .minDocCount(1) @@ -452,7 +452,7 @@ public void testBackgroundVsSeparateSet(SignificanceHeuristic significanceHeuris .significanceHeuristic(significanceHeuristicExpectingSeparateSets))); }else { - request2 = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE) + request2 = client().prepareSearch(INDEX_NAME) .addAggregation(filter("0", QueryBuilders.termQuery(CLASS_FIELD, "0")) .subAggregation(significantTerms("sig_terms") .field(TEXT_FIELD) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java index 04fe0d5751887..f3e6eb544ced8 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java @@ -25,10 +25,10 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; -import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -263,7 +263,7 @@ private void assertUnboundedDocCountError(int size, SearchResponse accurateRespo public void testStringValueField() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -274,7 +274,7 @@ public void testStringValueField() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -292,7 +292,7 @@ public void testStringValueField() throws Exception { public void testStringValueFieldSingleShard() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -303,7 +303,7 @@ public void testStringValueFieldSingleShard() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -322,7 +322,7 @@ public void testStringValueFieldWithRouting() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse testResponse = client().prepareSearch("idx_with_routing").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_with_routing") .setRouting(String.valueOf(between(1, numRoutingValues))) .addAggregation(terms("terms") .executionHint(randomExecutionHint()) @@ -341,7 +341,7 @@ public void testStringValueFieldWithRouting() throws Exception { public void testStringValueFieldDocCountAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -353,7 +353,7 @@ public void testStringValueFieldDocCountAsc() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -372,7 +372,7 @@ public void testStringValueFieldDocCountAsc() throws Exception { public void testStringValueFieldTermSortAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -384,7 +384,7 @@ public void testStringValueFieldTermSortAsc() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -403,7 +403,7 @@ public void testStringValueFieldTermSortAsc() throws Exception { public void testStringValueFieldTermSortDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -415,7 +415,7 @@ public void testStringValueFieldTermSortDesc() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -434,7 +434,7 @@ public void testStringValueFieldTermSortDesc() throws Exception { public void testStringValueFieldSubAggAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -447,7 +447,7 @@ public void testStringValueFieldSubAggAsc() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -467,7 +467,7 @@ public void testStringValueFieldSubAggAsc() throws Exception { public void testStringValueFieldSubAggDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -480,7 +480,7 @@ public void testStringValueFieldSubAggDesc() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -500,7 +500,7 @@ public void testStringValueFieldSubAggDesc() throws Exception { public void testLongValueField() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -511,7 +511,7 @@ public void testLongValueField() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -529,7 +529,7 @@ public void testLongValueField() throws Exception { public void testLongValueFieldSingleShard() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -540,7 +540,7 @@ public void testLongValueFieldSingleShard() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -559,7 +559,7 @@ public void testLongValueFieldWithRouting() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse testResponse = client().prepareSearch("idx_with_routing").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_with_routing") .setRouting(String.valueOf(between(1, numRoutingValues))) .addAggregation(terms("terms") .executionHint(randomExecutionHint()) @@ -578,7 +578,7 @@ public void testLongValueFieldWithRouting() throws Exception { public void testLongValueFieldDocCountAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -590,7 +590,7 @@ public void testLongValueFieldDocCountAsc() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -609,7 +609,7 @@ public void testLongValueFieldDocCountAsc() throws Exception { public void testLongValueFieldTermSortAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -621,7 +621,7 @@ public void testLongValueFieldTermSortAsc() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -640,7 +640,7 @@ public void testLongValueFieldTermSortAsc() throws Exception { public void testLongValueFieldTermSortDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -652,7 +652,7 @@ public void testLongValueFieldTermSortDesc() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -671,7 +671,7 @@ public void testLongValueFieldTermSortDesc() throws Exception { public void testLongValueFieldSubAggAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -684,7 +684,7 @@ public void testLongValueFieldSubAggAsc() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -704,7 +704,7 @@ public void testLongValueFieldSubAggAsc() throws Exception { public void testLongValueFieldSubAggDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -717,7 +717,7 @@ public void testLongValueFieldSubAggDesc() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -737,7 +737,7 @@ public void testLongValueFieldSubAggDesc() throws Exception { public void testDoubleValueField() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -748,7 +748,7 @@ public void testDoubleValueField() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -766,7 +766,7 @@ public void testDoubleValueField() throws Exception { public void testDoubleValueFieldSingleShard() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -777,7 +777,7 @@ public void testDoubleValueFieldSingleShard() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -796,7 +796,7 @@ public void testDoubleValueFieldWithRouting() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse testResponse = client().prepareSearch("idx_with_routing").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_with_routing") .setRouting(String.valueOf(between(1, numRoutingValues))) .addAggregation(terms("terms") .executionHint(randomExecutionHint()) @@ -815,7 +815,7 @@ public void testDoubleValueFieldWithRouting() throws Exception { public void testDoubleValueFieldDocCountAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -827,7 +827,7 @@ public void testDoubleValueFieldDocCountAsc() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -846,7 +846,7 @@ public void testDoubleValueFieldDocCountAsc() throws Exception { public void testDoubleValueFieldTermSortAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -858,7 +858,7 @@ public void testDoubleValueFieldTermSortAsc() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -877,7 +877,7 @@ public void testDoubleValueFieldTermSortAsc() throws Exception { public void testDoubleValueFieldTermSortDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -889,7 +889,7 @@ public void testDoubleValueFieldTermSortDesc() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -908,7 +908,7 @@ public void testDoubleValueFieldTermSortDesc() throws Exception { public void testDoubleValueFieldSubAggAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -921,7 +921,7 @@ public void testDoubleValueFieldSubAggAsc() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -941,7 +941,7 @@ public void testDoubleValueFieldSubAggAsc() throws Exception { public void testDoubleValueFieldSubAggDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -954,7 +954,7 @@ public void testDoubleValueFieldSubAggDesc() throws Exception { assertSearchResponse(accurateResponse); - SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type") + SearchResponse testResponse = client().prepareSearch("idx_single_shard") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -977,7 +977,7 @@ public void testDoubleValueFieldSubAggDesc() throws Exception { * 3 one-shard indices. */ public void testFixedDocs() throws Exception { - SearchResponse response = client().prepareSearch("idx_fixed_docs_0", "idx_fixed_docs_1", "idx_fixed_docs_2").setTypes("type") + SearchResponse response = client().prepareSearch("idx_fixed_docs_0", "idx_fixed_docs_1", "idx_fixed_docs_2") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java index 11eed6f90e739..ee17b70f737c7 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java @@ -262,7 +262,7 @@ public void testMultiValueFieldWithPartitionedFiltering() throws Exception { private void runTestFieldWithPartitionedFiltering(String field) throws Exception { // Find total number of unique terms - SearchResponse allResponse = client().prepareSearch("idx").setTypes("type") + SearchResponse allResponse = client().prepareSearch("idx") .addAggregation(terms("terms").field(field).size(10000).collectMode(randomFrom(SubAggCollectionMode.values()))) .get(); assertSearchResponse(allResponse); @@ -275,7 +275,7 @@ private void runTestFieldWithPartitionedFiltering(String field) throws Exception final int numPartitions = randomIntBetween(2, 4); Set foundTerms = new HashSet<>(); for (int partition = 0; partition < numPartitions; partition++) { - SearchResponse response = client().prepareSearch("idx").setTypes("type").addAggregation(terms("terms").field(field) + SearchResponse response = client().prepareSearch("idx").addAggregation(terms("terms").field(field) .includeExclude(new IncludeExclude(partition, numPartitions)).collectMode(randomFrom(SubAggCollectionMode.values()))) .get(); assertSearchResponse(response); @@ -292,7 +292,7 @@ private void runTestFieldWithPartitionedFiltering(String field) throws Exception public void testSingleValuedFieldWithValueScript() throws Exception { SearchResponse response = client() .prepareSearch("idx") - .setTypes("type") + .addAggregation( terms("terms") .executionHint(randomExecutionHint()) @@ -319,7 +319,7 @@ public void testSingleValuedFieldWithValueScript() throws Exception { public void testMultiValuedFieldWithValueScriptNotUnique() throws Exception { SearchResponse response = client() .prepareSearch("idx") - .setTypes("type") + .addAggregation( terms("terms") .executionHint(randomExecutionHint()) @@ -345,7 +345,7 @@ public void testMultiValuedFieldWithValueScriptNotUnique() throws Exception { public void testMultiValuedScript() throws Exception { SearchResponse response = client() .prepareSearch("idx") - .setTypes("type") + .addAggregation( terms("terms") .executionHint(randomExecutionHint()) @@ -376,7 +376,7 @@ public void testMultiValuedScript() throws Exception { public void testMultiValuedFieldWithValueScript() throws Exception { SearchResponse response = client() .prepareSearch("idx") - .setTypes("type") + .addAggregation( terms("terms") .executionHint(randomExecutionHint()) @@ -422,7 +422,7 @@ public void testScriptSingleValue() throws Exception { SearchResponse response = client() .prepareSearch("idx") - .setTypes("type") + .addAggregation( terms("terms") .collectMode(randomFrom(SubAggCollectionMode.values())) @@ -451,7 +451,7 @@ public void testScriptSingleValueExplicitSingleValue() throws Exception { SearchResponse response = client() .prepareSearch("idx") - .setTypes("type") + .addAggregation( terms("terms") .collectMode(randomFrom(SubAggCollectionMode.values())) @@ -477,7 +477,7 @@ public void testScriptSingleValueExplicitSingleValue() throws Exception { public void testScriptMultiValued() throws Exception { SearchResponse response = client() .prepareSearch("idx") - .setTypes("type") + .addAggregation( terms("terms") .collectMode(randomFrom(SubAggCollectionMode.values())) @@ -508,7 +508,7 @@ public void testScriptMultiValued() throws Exception { public void testPartiallyUnmapped() throws Exception { SearchResponse response = client() .prepareSearch("idx", "idx_unmapped") - .setTypes("type") + .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values()))).get(); @@ -532,7 +532,7 @@ public void testStringTermsNestedIntoPerBucketAggregator() throws Exception { // no execution hint so that the logic that decides whether or not to use ordinals is executed SearchResponse response = client() .prepareSearch("idx") - .setTypes("type") + .addAggregation( filter("filter", termQuery(MULTI_VALUED_FIELD_NAME, "val3")).subAggregation( terms("terms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())))) @@ -560,7 +560,7 @@ public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { try { client() .prepareSearch("idx") - .setTypes("type") + .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) @@ -588,7 +588,7 @@ public void testSingleValuedFieldOrderedBySingleBucketSubAggregationAsc() throws boolean asc = randomBoolean(); SearchResponse response = client() .prepareSearch("idx") - .setTypes("type") + .addAggregation( terms("tags").executionHint(randomExecutionHint()).field("tag") .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.aggregation("filter", asc)) @@ -624,7 +624,7 @@ public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevels( boolean asc = randomBoolean(); SearchResponse response = client() .prepareSearch("idx") - .setTypes("type") + .addAggregation( terms("tags") .executionHint(randomExecutionHint()) @@ -687,7 +687,7 @@ public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevelsS boolean asc = randomBoolean(); SearchResponse response = client() .prepareSearch("idx") - .setTypes("type") + .addAggregation( terms("tags") .executionHint(randomExecutionHint()) @@ -750,7 +750,7 @@ public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevelsS boolean asc = randomBoolean(); SearchResponse response = client() .prepareSearch("idx") - .setTypes("type") + .addAggregation( terms("tags") .executionHint(randomExecutionHint()) @@ -807,7 +807,7 @@ public void testSingleValuedFieldOrderedByMissingSubAggregation() throws Excepti for (String index : Arrays.asList("idx", "idx_unmapped")) { try { client().prepareSearch(index) - .setTypes("type") + .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) @@ -825,7 +825,7 @@ public void testSingleValuedFieldOrderedByNonMetricsOrMultiBucketSubAggregation( for (String index : Arrays.asList("idx", "idx_unmapped")) { try { client().prepareSearch(index) - .setTypes("type") + .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) @@ -847,7 +847,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithUnknownMe try { SearchResponse response = client() .prepareSearch(index) - .setTypes("type") + .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) @@ -866,7 +866,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric for (String index : Arrays.asList("idx", "idx_unmapped")) { try { client().prepareSearch(index) - .setTypes("type") + .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) @@ -886,7 +886,7 @@ public void testSingleValuedFieldOrderedByMultiValueSubAggregationAsc() throws E boolean asc = true; SearchResponse response = client() .prepareSearch("idx") - .setTypes("type") + .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.aggregation("stats.avg", asc)) @@ -916,7 +916,7 @@ public void testSingleValuedFieldOrderedByMultiValueSubAggregationDesc() throws boolean asc = false; SearchResponse response = client() .prepareSearch("idx") - .setTypes("type") + .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.aggregation("stats.avg", asc)) @@ -947,7 +947,7 @@ public void testSingleValuedFieldOrderedByMultiValueExtendedStatsAsc() throws Ex boolean asc = true; SearchResponse response = client() .prepareSearch("idx") - .setTypes("type") + .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) @@ -979,7 +979,7 @@ public void testSingleValuedFieldOrderedByStatsAggAscWithTermsSubAgg() throws Ex boolean asc = true; SearchResponse response = client() .prepareSearch("idx") - .setTypes("type") + .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) @@ -1089,7 +1089,7 @@ private void assertMultiSortResponse(String[] expectedKeys, BucketOrder... order public void testIndexMetaField() throws Exception { SearchResponse response = client() .prepareSearch("idx", "empty_bucket_idx") - .setTypes("type") + .addAggregation( terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(randomExecutionHint()) .field(IndexFieldMapper.NAME)).get(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java index 759adddd9e890..174b40a154490 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java @@ -172,7 +172,7 @@ private static String multiNumericField(boolean hash) { } public void testUnmapped() throws Exception { - SearchResponse response = client().prepareSearch("idx_unmapped").setTypes("type") + SearchResponse response = client().prepareSearch("idx_unmapped") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value")) .get(); @@ -185,7 +185,7 @@ public void testUnmapped() throws Exception { } public void testPartiallyUnmapped() throws Exception { - SearchResponse response = client().prepareSearch("idx", "idx_unmapped").setTypes("type") + SearchResponse response = client().prepareSearch("idx", "idx_unmapped") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value")) .get(); @@ -198,7 +198,7 @@ public void testPartiallyUnmapped() throws Exception { } public void testSingleValuedString() throws Exception { - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value")) .get(); @@ -211,7 +211,7 @@ public void testSingleValuedString() throws Exception { } public void testSingleValuedNumeric() throws Exception { - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField())) .get(); @@ -250,7 +250,7 @@ public void testSingleValuedNumericGetProperty() throws Exception { } public void testSingleValuedNumericHashed() throws Exception { - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField())) .get(); @@ -263,7 +263,7 @@ public void testSingleValuedNumericHashed() throws Exception { } public void testMultiValuedString() throws Exception { - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_values")) .get(); @@ -276,7 +276,7 @@ public void testMultiValuedString() throws Exception { } public void testMultiValuedNumeric() throws Exception { - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(multiNumericField(false))) .get(); @@ -289,7 +289,7 @@ public void testMultiValuedNumeric() throws Exception { } public void testMultiValuedNumericHashed() throws Exception { - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(multiNumericField(true))) .get(); @@ -302,7 +302,7 @@ public void testMultiValuedNumericHashed() throws Exception { } public void testSingleValuedStringScript() throws Exception { - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .addAggregation( cardinality("cardinality") .precisionThreshold(precisionThreshold) @@ -318,7 +318,7 @@ public void testSingleValuedStringScript() throws Exception { } public void testMultiValuedStringScript() throws Exception { - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .addAggregation( cardinality("cardinality") .precisionThreshold(precisionThreshold) @@ -335,7 +335,7 @@ public void testMultiValuedStringScript() throws Exception { public void testSingleValuedNumericScript() throws Exception { Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc[' + singleNumericField() + '].value", emptyMap()); - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script(script)) .get(); @@ -350,7 +350,7 @@ public void testSingleValuedNumericScript() throws Exception { public void testMultiValuedNumericScript() throws Exception { Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc[' + multiNumericField(false) + ']", Collections.emptyMap()); - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script(script)) .get(); @@ -363,7 +363,7 @@ public void testMultiValuedNumericScript() throws Exception { } public void testSingleValuedStringValueScript() throws Exception { - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .addAggregation( cardinality("cardinality") .precisionThreshold(precisionThreshold) @@ -380,7 +380,7 @@ public void testSingleValuedStringValueScript() throws Exception { } public void testMultiValuedStringValueScript() throws Exception { - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .addAggregation( cardinality("cardinality") .precisionThreshold(precisionThreshold) @@ -397,7 +397,7 @@ public void testMultiValuedStringValueScript() throws Exception { } public void testSingleValuedNumericValueScript() throws Exception { - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .addAggregation( cardinality("cardinality") .precisionThreshold(precisionThreshold) @@ -414,7 +414,7 @@ public void testSingleValuedNumericValueScript() throws Exception { } public void testMultiValuedNumericValueScript() throws Exception { - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .addAggregation( cardinality("cardinality") .precisionThreshold(precisionThreshold) @@ -431,7 +431,7 @@ public void testMultiValuedNumericValueScript() throws Exception { } public void testAsSubAgg() throws Exception { - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client().prepareSearch("idx") .addAggregation(terms("terms").field("str_value") .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_values"))) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffIT.java index 9d5c790628fc6..cf771a9727003 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffIT.java @@ -228,7 +228,7 @@ private void setupExpected(MetricTarget target) { public void testBasicDiff() { SearchResponse response = client() - .prepareSearch("idx").setTypes("type") + .prepareSearch("idx") .addAggregation( histogram("histo").field(INTERVAL_FIELD).interval(interval) .extendedBounds(0L, (long) (interval * (numBuckets - 1))) @@ -275,7 +275,7 @@ public void testBasicDiff() { public void testInvalidLagSize() { try { client() - .prepareSearch("idx").setTypes("type") + .prepareSearch("idx") .addAggregation( histogram("histo").field(INTERVAL_FIELD).interval(interval) .extendedBounds(0L, (long) (interval * (numBuckets - 1))) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java index 2ac97a4408dcb..8d15ae2c01673 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java @@ -20,11 +20,10 @@ package org.elasticsearch.search.aggregations.support; import com.carrotsearch.randomizedtesting.generators.RandomStrings; -import java.util.Collections; + import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Scorable; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.Strings; import org.elasticsearch.script.AggregationScript; import org.elasticsearch.search.aggregations.support.values.ScriptBytesValues; import org.elasticsearch.search.aggregations.support.values.ScriptDoubleValues; @@ -35,6 +34,7 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -47,7 +47,7 @@ private static class FakeAggregationScript extends AggregationScript { int index; FakeAggregationScript(Object[][] values) { - super(Collections.emptyMap(), new SearchLookup(null, null, Strings.EMPTY_ARRAY) { + super(Collections.emptyMap(), new SearchLookup(null, null) { @Override public LeafSearchLookup getLeafSearchLookup(LeafReaderContext context) { diff --git a/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java index 0a4d3201f5cc3..9aa1626b8646c 100644 --- a/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -39,6 +39,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.store.MockFSDirectoryFactory; import org.elasticsearch.test.store.MockFSIndexStore; + import java.io.IOException; import java.util.Arrays; import java.util.Collection; @@ -169,7 +170,7 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc int docToQuery = between(0, numDocs - 1); int expectedResults = added[docToQuery] ? 1 : 0; logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery)); - SearchResponse searchResponse = client().prepareSearch().setTypes("type") + SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))) .setSize(expectedResults).get(); logger.info("Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards.numPrimaries); @@ -177,7 +178,7 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc assertResultsAndLogOnFailure(expectedResults, searchResponse); } // check match all - searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchAllQuery()) + searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()) .setSize(numCreated + numInitialDocs).addSort("_uid", SortOrder.ASC).get(); logger.info("Match all Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards.numPrimaries); @@ -202,7 +203,7 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc .put(MockFSDirectoryFactory.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), 0)); client().admin().indices().prepareOpen("test").execute().get(); ensureGreen(); - SearchResponse searchResponse = client().prepareSearch().setTypes("type") + SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.matchQuery("test", "init")).get(); assertNoFailures(searchResponse); assertHitCount(searchResponse, numInitialDocs); diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java index 7790e8d6576ca..4b62e77393865 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java @@ -25,8 +25,8 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.test.ESTestCase; @@ -173,7 +173,7 @@ public FetchSourceContext fetchSourceContext() { @Override public SearchLookup lookup() { - SearchLookup lookup = new SearchLookup(this.mapperService(), this::getForField, null); + SearchLookup lookup = new SearchLookup(this.mapperService(), this::getForField); lookup.source().setSource(source); return lookup; } diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java index b7eac260cd852..8faff9e1050ec 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -57,7 +56,7 @@ protected void setup() throws Exception{ } public void testThatCustomHighlightersAreSupported() throws IOException { - SearchResponse searchResponse = client().prepareSearch("test").setTypes("test") + SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.matchAllQuery()) .highlighter(new HighlightBuilder().field("name").highlighterType("test-custom")) .get(); @@ -71,7 +70,7 @@ public void testThatCustomHighlighterCanBeConfiguredPerField() throws Exception options.put("myFieldOption", "someValue"); highlightConfig.options(options); - SearchResponse searchResponse = client().prepareSearch("test").setTypes("test") + SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.matchAllQuery()) .highlighter(new HighlightBuilder().field(highlightConfig)) .get(); @@ -84,7 +83,7 @@ public void testThatCustomHighlighterCanBeConfiguredGlobally() throws Exception Map options = new HashMap<>(); options.put("myGlobalOption", "someValue"); - SearchResponse searchResponse = client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()) + SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) .highlighter(new HighlightBuilder().field("name").highlighterType("test-custom").options(options)) .get(); @@ -93,7 +92,7 @@ public void testThatCustomHighlighterCanBeConfiguredGlobally() throws Exception } public void testThatCustomHighlighterReceivesFieldsInOrder() throws Exception { - SearchResponse searchResponse = client().prepareSearch("test").setTypes("test") + SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).should(QueryBuilders .termQuery("name", "arbitrary"))) .highlighter( diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 3c21085fc905d..f5e601fd97abd 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -2536,7 +2536,7 @@ public void testDoesNotHighlightTypeName() throws Exception { indexRandom(true, client().prepareIndex("test", "typename").setSource("foo", "test typename")); for (String highlighter : ALL_TYPES) { - SearchResponse response = client().prepareSearch("test").setTypes("typename").setQuery(matchQuery("foo", "test")) + SearchResponse response = client().prepareSearch("test").setQuery(matchQuery("foo", "test")) .highlighter(new HighlightBuilder().field("foo").highlighterType(highlighter).requireFieldMatch(false)).get(); assertHighlight(response, 0, "foo", 0, 1, equalTo("test typename")); } @@ -2555,7 +2555,7 @@ public void testDoesNotHighlightAliasFilters() throws Exception { indexRandom(true, client().prepareIndex("test", "typename").setSource("foo", "test japanese")); for (String highlighter : ALL_TYPES) { - SearchResponse response = client().prepareSearch("filtered_alias").setTypes("typename").setQuery(matchQuery("foo", "test")) + SearchResponse response = client().prepareSearch("filtered_alias").setQuery(matchQuery("foo", "test")) .highlighter(new HighlightBuilder().field("foo").highlighterType(highlighter).requireFieldMatch(false)).get(); assertHighlight(response, 0, "foo", 0, 1, equalTo("test japanese")); } diff --git a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java index 356fafbbb4de2..b401c28f5b622 100644 --- a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -654,7 +654,6 @@ public void testSearchFieldsMetaData() throws Exception { .get(); SearchResponse searchResponse = client().prepareSearch("my-index") - .setTypes("my-type1") .addStoredField("field1").addStoredField("_routing") .get(); @@ -670,7 +669,7 @@ public void testSearchFieldsNonLeafField() throws Exception { .setRefreshPolicy(IMMEDIATE) .get(); - assertFailures(client().prepareSearch("my-index").setTypes("my-type1").addStoredField("field1"), + assertFailures(client().prepareSearch("my-index").addStoredField("field1"), RestStatus.BAD_REQUEST, containsString("field [field1] isn't a leaf field")); } @@ -748,7 +747,8 @@ public void testSingleValueFieldDatatField() throws ExecutionException, Interrup .addMapping("type", "test_field", "type=keyword").get()); indexRandom(true, client().prepareIndex("test", "type", "1").setSource("test_field", "foobar")); refresh(); - SearchResponse searchResponse = client().prepareSearch("test").setTypes("type").setSource( + SearchResponse searchResponse = client().prepareSearch("test") + .setSource( new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).docValueField("test_field")).get(); assertHitCount(searchResponse, 1); Map fields = searchResponse.getHits().getHits()[0].getFields(); diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java b/server/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java index a8ef8c10116c4..f6803488d9a59 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java @@ -185,7 +185,6 @@ public void testGeoDistanceAggregation() throws IOException { String name = "TestPosition"; search.setQuery(QueryBuilders.matchAllQuery()) - .setTypes("type1") .addAggregation(AggregationBuilders.geoDistance(name, new GeoPoint(tgt_lat, tgt_lon)) .field("location") .unit(DistanceUnit.MILES) diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java index ef6bea10d749d..f9f2d8525fb4c 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java @@ -123,7 +123,7 @@ public void testIndexPointsFilterRectangle() throws Exception { EnvelopeBuilder shape = new EnvelopeBuilder(new Coordinate(-45, 45), new Coordinate(45, -45)); - SearchResponse searchResponse = client().prepareSearch("test").setTypes("type1") + SearchResponse searchResponse = client().prepareSearch("test") .setQuery(geoIntersectionQuery("location", shape)) .get(); @@ -132,7 +132,7 @@ public void testIndexPointsFilterRectangle() throws Exception { assertThat(searchResponse.getHits().getHits().length, equalTo(1)); assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - searchResponse = client().prepareSearch("test").setTypes("type1") + searchResponse = client().prepareSearch("test") .setQuery(geoShapeQuery("location", shape)) .get(); @@ -168,7 +168,7 @@ public void testEdgeCases() throws Exception { // This search would fail if both geoshape indexing and geoshape filtering // used the bottom-level optimization in SpatialPrefixTree#recursiveGetNodes. - SearchResponse searchResponse = client().prepareSearch("test").setTypes("type1") + SearchResponse searchResponse = client().prepareSearch("test") .setQuery(geoIntersectionQuery("location", query)) .get(); @@ -626,7 +626,7 @@ public void testPointsOnly() throws Exception { } // test that point was inserted - SearchResponse response = client().prepareSearch("geo_points_only").setTypes("type1") + SearchResponse response = client().prepareSearch("geo_points_only") .setQuery(geoIntersectionQuery("location", shape)) .get(); @@ -660,7 +660,7 @@ public void testPointsOnlyExplicit() throws Exception { .setRefreshPolicy(IMMEDIATE).get(); // test that point was inserted - SearchResponse response = client().prepareSearch("geo_points_only").setTypes("type1") + SearchResponse response = client().prepareSearch("geo_points_only") .setQuery(matchAllQuery()) .get(); diff --git a/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java b/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java index fa08a12485776..f4211ed32efe0 100644 --- a/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java +++ b/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java @@ -61,7 +61,6 @@ public void testSerialization() throws Exception { assertEquals(deserializedRequest.scroll(), shardSearchTransportRequest.scroll()); assertEquals(deserializedRequest.getAliasFilter(), shardSearchTransportRequest.getAliasFilter()); assertArrayEquals(deserializedRequest.indices(), shardSearchTransportRequest.indices()); - assertArrayEquals(deserializedRequest.types(), shardSearchTransportRequest.types()); assertEquals(deserializedRequest.indicesOptions(), shardSearchTransportRequest.indicesOptions()); assertEquals(deserializedRequest.nowInMillis(), shardSearchTransportRequest.nowInMillis()); assertEquals(deserializedRequest.source(), shardSearchTransportRequest.source()); diff --git a/server/src/test/java/org/elasticsearch/search/lookup/LeafDocLookupTests.java b/server/src/test/java/org/elasticsearch/search/lookup/LeafDocLookupTests.java index fca61bf2564b9..1f4253825667a 100644 --- a/server/src/test/java/org/elasticsearch/search/lookup/LeafDocLookupTests.java +++ b/server/src/test/java/org/elasticsearch/search/lookup/LeafDocLookupTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; -import static org.elasticsearch.search.lookup.LeafDocLookup.TYPES_DEPRECATION_MESSAGE; import static org.mockito.AdditionalAnswers.returnsFirstArg; import static org.mockito.Matchers.anyObject; import static org.mockito.Mockito.doReturn; @@ -46,7 +45,6 @@ public void setUp() throws Exception { when(fieldType.valueForDisplay(anyObject())).then(returnsFirstArg()); MapperService mapperService = mock(MapperService.class); - when(mapperService.fullName("_type")).thenReturn(fieldType); when(mapperService.fullName("field")).thenReturn(fieldType); when(mapperService.fullName("alias")).thenReturn(fieldType); @@ -61,7 +59,6 @@ public void setUp() throws Exception { docLookup = new LeafDocLookup(mapperService, ignored -> fieldData, - new String[] { "type" }, null); } @@ -74,10 +71,4 @@ public void testLookupWithFieldAlias() { ScriptDocValues fetchedDocValues = docLookup.get("alias"); assertEquals(docValues, fetchedDocValues); } - - public void testTypesDeprecation() { - ScriptDocValues fetchedDocValues = docLookup.get("_type"); - assertEquals(docValues, fetchedDocValues); - assertWarnings(TYPES_DEPRECATION_MESSAGE); - } } diff --git a/server/src/test/java/org/elasticsearch/search/lookup/LeafFieldsLookupTests.java b/server/src/test/java/org/elasticsearch/search/lookup/LeafFieldsLookupTests.java index 72bd6d1fe2c87..6683adb9185f2 100644 --- a/server/src/test/java/org/elasticsearch/search/lookup/LeafFieldsLookupTests.java +++ b/server/src/test/java/org/elasticsearch/search/lookup/LeafFieldsLookupTests.java @@ -66,9 +66,7 @@ public void setUp() throws Exception { return null; }).when(leafReader).document(anyInt(), any(StoredFieldVisitor.class)); - fieldsLookup = new LeafFieldsLookup(mapperService, - new String[] { "type" }, - leafReader); + fieldsLookup = new LeafFieldsLookup(mapperService, leafReader); } public void testBasicLookup() { diff --git a/server/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java b/server/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java index 4492353f6f15b..dabbecf481bfb 100644 --- a/server/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java +++ b/server/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java @@ -478,7 +478,7 @@ public void testSimpleMoreLikeThisIds() throws Exception { Item[] items = new Item[] { new Item(null, null, "1")}; MoreLikeThisQueryBuilder queryBuilder = QueryBuilders.moreLikeThisQuery(new String[] {"text"}, null, items).include(true) .minTermFreq(1).minDocFreq(1); - SearchResponse mltResponse = client().prepareSearch().setTypes("type1").setQuery(queryBuilder).get(); + SearchResponse mltResponse = client().prepareSearch().setQuery(queryBuilder).get(); assertHitCount(mltResponse, 3L); } @@ -507,7 +507,7 @@ public void testMoreLikeThisMultiValueFields() throws Exception { MoreLikeThisQueryBuilder mltQuery = moreLikeThisQuery(new String[] {"text"}, null, new Item[] {new Item(null, null, "0")}) .minTermFreq(1).minDocFreq(1) .maxQueryTerms(max_query_terms).minimumShouldMatch("0%"); - SearchResponse response = client().prepareSearch("test").setTypes("type1") + SearchResponse response = client().prepareSearch("test") .setQuery(mltQuery).get(); assertSearchResponse(response); assertHitCount(response, max_query_terms); @@ -540,7 +540,7 @@ public void testMinimumShouldMatch() throws ExecutionException, InterruptedExcep .minDocFreq(1) .minimumShouldMatch(minimumShouldMatch); logger.info("Testing with minimum_should_match = {}", minimumShouldMatch); - SearchResponse response = client().prepareSearch("test").setTypes("type1") + SearchResponse response = client().prepareSearch("test") .setQuery(mltQuery).get(); assertSearchResponse(response); if (minimumShouldMatch.equals("0%")) { @@ -572,7 +572,7 @@ public void testMoreLikeThisArtificialDocs() throws Exception { .minDocFreq(0) .maxQueryTerms(100) .minimumShouldMatch("100%"); // strict all terms must match! - SearchResponse response = client().prepareSearch("test").setTypes("type1") + SearchResponse response = client().prepareSearch("test") .setQuery(mltQuery).get(); assertSearchResponse(response); assertHitCount(response, 1); @@ -601,7 +601,7 @@ public void testMoreLikeThisMalformedArtificialDocs() throws Exception { .minTermFreq(0) .minDocFreq(0) .minimumShouldMatch("0%"); - SearchResponse response = client().prepareSearch("test").setTypes("type1") + SearchResponse response = client().prepareSearch("test") .setQuery(mltQuery).get(); assertSearchResponse(response); assertHitCount(response, 0); @@ -612,7 +612,7 @@ public void testMoreLikeThisMalformedArtificialDocs() throws Exception { .minTermFreq(0) .minDocFreq(0) .minimumShouldMatch("0%"); - response = client().prepareSearch("test").setTypes("type1") + response = client().prepareSearch("test") .setQuery(mltQuery).get(); assertSearchResponse(response); assertHitCount(response, 0); @@ -627,7 +627,7 @@ public void testMoreLikeThisMalformedArtificialDocs() throws Exception { .minTermFreq(0) .minDocFreq(0) .minimumShouldMatch("100%"); // strict all terms must match but date is ignored - response = client().prepareSearch("test").setTypes("type1") + response = client().prepareSearch("test") .setQuery(mltQuery).get(); assertSearchResponse(response); assertHitCount(response, 1); @@ -658,7 +658,7 @@ public void testMoreLikeThisUnlike() throws ExecutionException, InterruptedExcep .minDocFreq(0) .maxQueryTerms(100) .minimumShouldMatch("0%"); - SearchResponse response = client().prepareSearch("test").setTypes("type1") + SearchResponse response = client().prepareSearch("test") .setQuery(mltQuery).get(); assertSearchResponse(response); assertHitCount(response, numFields); @@ -675,7 +675,7 @@ public void testMoreLikeThisUnlike() throws ExecutionException, InterruptedExcep .include(true) .minimumShouldMatch("0%"); - response = client().prepareSearch("test").setTypes("type1").setQuery(mltQuery).get(); + response = client().prepareSearch("test").setQuery(mltQuery).get(); assertSearchResponse(response); assertHitCount(response, numFields - (i + 1)); } @@ -702,7 +702,7 @@ public void testSelectFields() throws IOException, ExecutionException, Interrupt .minDocFreq(0) .include(true) .minimumShouldMatch("1%"); - SearchResponse response = client().prepareSearch("test").setTypes("type1") + SearchResponse response = client().prepareSearch("test") .setQuery(mltQuery).get(); assertSearchResponse(response); assertHitCount(response, 2); @@ -712,7 +712,7 @@ public void testSelectFields() throws IOException, ExecutionException, Interrupt .minDocFreq(0) .include(true) .minimumShouldMatch("1%"); - response = client().prepareSearch("test").setTypes("type1") + response = client().prepareSearch("test") .setQuery(mltQuery).get(); assertSearchResponse(response); assertHitCount(response, 1); diff --git a/server/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java b/server/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java index 5feb341fd6943..1937bb2a847fd 100644 --- a/server/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java +++ b/server/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java @@ -383,7 +383,7 @@ public void testSimpleNestedSorting() throws Exception { refresh(); SearchResponse searchResponse = client().prepareSearch("test") - .setTypes("type1") + .setQuery(QueryBuilders.matchAllQuery()) .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.ASC).setNestedPath("nested1")) .get(); @@ -397,7 +397,7 @@ public void testSimpleNestedSorting() throws Exception { assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("4")); searchResponse = client().prepareSearch("test") - .setTypes("type1") + .setQuery(QueryBuilders.matchAllQuery()) .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.DESC).setNestedPath("nested1")) .get(); @@ -474,7 +474,7 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { .endObject()).get(); refresh(); - SearchRequestBuilder searchRequestBuilder = client().prepareSearch("test").setTypes("type1") + SearchRequestBuilder searchRequestBuilder = client().prepareSearch("test") .setQuery(QueryBuilders.matchAllQuery()) .addSort(SortBuilders.fieldSort("nested1.field1").setNestedPath("nested1") .setNestedFilter(termQuery("nested1.field2", true)).missing(10).order(SortOrder.ASC)); @@ -493,7 +493,7 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("10")); - searchRequestBuilder = client().prepareSearch("test").setTypes("type1").setQuery(QueryBuilders.matchAllQuery()) + searchRequestBuilder = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) .addSort(SortBuilders.fieldSort("nested1.field1").setNestedPath("nested1") .setNestedFilter(termQuery("nested1.field2", true)).missing(10).order(SortOrder.DESC)); diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java index 040e16b6e957f..6b851729332e0 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java @@ -571,7 +571,6 @@ public void testPhrase() throws Exception { SearchResponse resp = client().prepareSearch() .setQuery(q) .setIndices("test") - .setTypes("type1") .setProfile(true) .setSearchType(SearchType.QUERY_THEN_FETCH) .get(); diff --git a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 7e233b863076a..2a7eb10313c51 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -479,17 +479,6 @@ public void testDateRangeInQueryStringWithTimeZone_10477() { assertHitCount(searchResponse, 0L); } - public void testTypeFilter() throws Exception { - assertAcked(prepareCreate("test")); - indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1"), - client().prepareIndex("test", "type1", "2").setSource("field1", "value1")); - - assertHitCount(client().prepareSearch().setTypes("type1").setQuery(matchAllQuery()).get(), 2L); - assertHitCount(client().prepareSearch().setTypes("type2").setQuery(matchAllQuery()).get(), 0L); - - assertHitCount(client().prepareSearch().setTypes("type1", "type2").setQuery(matchAllQuery()).get(), 2L); - } - public void testIdsQueryTestsIdIndexed() throws Exception { assertAcked(client().admin().indices().prepareCreate("test")); @@ -501,16 +490,6 @@ public void testIdsQueryTestsIdIndexed() throws Exception { assertHitCount(searchResponse, 2L); assertSearchHits(searchResponse, "1", "3"); - // no type - searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsQuery().addIds("1", "3"))).get(); - assertHitCount(searchResponse, 2L); - assertSearchHits(searchResponse, "1", "3"); - - searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1", "3")).get(); - assertHitCount(searchResponse, 2L); - assertSearchHits(searchResponse, "1", "3"); - - // no type searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1", "3")).get(); assertHitCount(searchResponse, 2L); assertSearchHits(searchResponse, "1", "3"); @@ -519,7 +498,7 @@ public void testIdsQueryTestsIdIndexed() throws Exception { assertHitCount(searchResponse, 0L); // repeat..., with terms - searchResponse = client().prepareSearch().setTypes("type1").setQuery(constantScoreQuery(termsQuery("_id", "1", "3"))).get(); + searchResponse = client().prepareSearch().setQuery(constantScoreQuery(termsQuery("_id", "1", "3"))).get(); assertHitCount(searchResponse, 2L); assertSearchHits(searchResponse, "1", "3"); } diff --git a/server/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java b/server/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java index 7f8ab4aa51567..13f03472923d2 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java @@ -233,7 +233,7 @@ public void testNestedFieldSimpleQueryString() throws IOException { assertHitCount(searchResponse, 1L); assertSearchHits(searchResponse, "1"); - searchResponse = client().prepareSearch().setTypes("type1").setQuery( + searchResponse = client().prepareSearch().setQuery( simpleQueryStringQuery("foo bar baz").field("body")).get(); assertHitCount(searchResponse, 1L); assertSearchHits(searchResponse, "1"); @@ -243,7 +243,7 @@ public void testNestedFieldSimpleQueryString() throws IOException { assertHitCount(searchResponse, 1L); assertSearchHits(searchResponse, "1"); - searchResponse = client().prepareSearch().setTypes("type1").setQuery( + searchResponse = client().prepareSearch().setQuery( simpleQueryStringQuery("foo bar baz").field("body.sub")).get(); assertHitCount(searchResponse, 1L); assertSearchHits(searchResponse, "1"); diff --git a/server/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java b/server/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java index 7ac48e03be688..64b621e0974a4 100644 --- a/server/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java +++ b/server/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java @@ -496,7 +496,7 @@ public void testStringSortMissingAscTerminates() throws Exception { refresh(); SearchResponse response = client().prepareSearch("test") - .setTypes("test") + .addSort(new FieldSortBuilder("no_field").order(SortOrder.ASC).missing("_last")) .setScroll("1m") .get(); @@ -509,7 +509,7 @@ public void testStringSortMissingAscTerminates() throws Exception { assertNoSearchHits(response); response = client().prepareSearch("test") - .setTypes("test") + .addSort(new FieldSortBuilder("no_field").order(SortOrder.ASC).missing("_first")) .setScroll("1m") .get(); diff --git a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java index 6128f8d39fcf6..8802b35a694d6 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java @@ -112,11 +112,6 @@ public ShardId shardId() { return new ShardId(new Index(indices[0], indices[0]), shardId); } - @Override - public String[] types() { - return new String[0]; - } - @Override public SearchSourceBuilder source() { return null; diff --git a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java index ebe1118cc6f69..c90e6ef0dde39 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java @@ -251,7 +251,6 @@ public void testGeoDistanceSortCanBeParsedFromGeoHash() throws IOException { " \"nested\" : {\n" + " \"filter\" : {\n" + " \"ids\" : {\n" + - " \"type\" : [ ],\n" + " \"values\" : [ ],\n" + " \"boost\" : 5.711116\n" + " }\n" + diff --git a/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index c59b342b7c0d8..f12ce3e3b4fa2 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.suggest; import com.carrotsearch.randomizedtesting.generators.RandomStrings; + import org.apache.lucene.analysis.TokenStreamToAutomaton; import org.apache.lucene.search.suggest.document.ContextSuggestField; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; @@ -813,7 +814,7 @@ public void testThatSortingOnCompletionFieldReturnsUsefulException() throws Exce refresh(); SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, - () -> client().prepareSearch(INDEX).setTypes(TYPE).addSort(new FieldSortBuilder(FIELD)).get()); + () -> client().prepareSearch(INDEX).addSort(new FieldSortBuilder(FIELD)).get()); assertThat(e.status().getStatus(), is(400)); assertThat(e.toString(), containsString("Fielddata is not supported on field [" + FIELD + "] of type [completion]")); } diff --git a/server/src/test/java/org/elasticsearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java b/server/src/test/java/org/elasticsearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java index 07c2dac0e7cf5..f3e4e00e81976 100644 --- a/server/src/test/java/org/elasticsearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java +++ b/server/src/test/java/org/elasticsearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java @@ -58,7 +58,7 @@ public static void aggregateAndCheckFromSeveralShards(ESIntegTestCase testCase) } private static void checkSignificantTermsAggregationCorrect(ESIntegTestCase testCase) { - SearchResponse response = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE).addAggregation( + SearchResponse response = client().prepareSearch(INDEX_NAME).addAggregation( terms("class").field(CLASS_FIELD).subAggregation(significantTerms("sig_terms").field(TEXT_FIELD))) .execute().actionGet(); assertSearchResponse(response); diff --git a/server/src/test/resources/org/elasticsearch/action/search/simple-msearch1.json b/server/src/test/resources/org/elasticsearch/action/search/simple-msearch1.json index eefec530e1f71..6db9e4b996045 100644 --- a/server/src/test/resources/org/elasticsearch/action/search/simple-msearch1.json +++ b/server/src/test/resources/org/elasticsearch/action/search/simple-msearch1.json @@ -1,6 +1,6 @@ {"index":"test", "ignore_unavailable" : true, "expand_wildcards" : "open,closed"}} {"query" : {"match_all" :{}}} -{"index" : "test", "type" : "type1", "expand_wildcards" : ["open", "closed"]} +{"index" : "test", "expand_wildcards" : ["open", "closed"]} {"query" : {"match_all" :{}}} {"index":"test", "ignore_unavailable" : false, "expand_wildcards" : ["open"]}} {"query" : {"match_all" :{}}} diff --git a/server/src/test/resources/org/elasticsearch/action/search/simple-msearch2.json b/server/src/test/resources/org/elasticsearch/action/search/simple-msearch2.json index 79330d80f7267..ef82fee039638 100644 --- a/server/src/test/resources/org/elasticsearch/action/search/simple-msearch2.json +++ b/server/src/test/resources/org/elasticsearch/action/search/simple-msearch2.json @@ -1,6 +1,6 @@ {"index":"test"} {"query" : {"match_all" : {}}} -{"index" : "test", "type" : "type1"} +{"index" : "test"} {"query" : {"match_all" : {}}} {} {"query" : {"match_all" : {}}} diff --git a/server/src/test/resources/org/elasticsearch/action/search/simple-msearch3.json b/server/src/test/resources/org/elasticsearch/action/search/simple-msearch3.json index a6b52fd3bf93e..f7ff9a2b3f991 100644 --- a/server/src/test/resources/org/elasticsearch/action/search/simple-msearch3.json +++ b/server/src/test/resources/org/elasticsearch/action/search/simple-msearch3.json @@ -1,8 +1,8 @@ {"index":["test0", "test1"]} {"query" : {"match_all" : {}}} -{"index" : "test2,test3", "type" : "type1"} +{"index" : "test2,test3"} {"query" : {"match_all" : {}}} -{"index" : ["test4", "test1"], "type" : [ "type2", "type1" ]} +{"index" : ["test4", "test1"]} {"query" : {"match_all" : {}}} {"search_type" : "dfs_query_then_fetch"} {"query" : {"match_all" : {}}} diff --git a/server/src/test/resources/org/elasticsearch/action/search/simple-msearch4.json b/server/src/test/resources/org/elasticsearch/action/search/simple-msearch4.json index 844d8bea1f8ee..4dd2cfde569dd 100644 --- a/server/src/test/resources/org/elasticsearch/action/search/simple-msearch4.json +++ b/server/src/test/resources/org/elasticsearch/action/search/simple-msearch4.json @@ -1,6 +1,6 @@ {"index":["test0", "test1"], "request_cache": true} {"query" : {"match_all" : {}}} -{"index" : "test2,test3", "type" : "type1", "preference": "_local"} +{"index" : "test2,test3", "preference": "_local"} {"query" : {"match_all" : {}}} -{"index" : ["test4", "test1"], "type" : [ "type2", "type1" ], "routing": "123"} +{"index" : ["test4", "test1"], "routing": "123"} {"query" : {"match_all" : {}}} diff --git a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java index df554ea42de28..6860bee1d0af7 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java +++ b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java @@ -95,9 +95,6 @@ public static SearchRequest randomSearchRequest(Supplier ra if (randomBoolean()) { searchRequest.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); } - if (randomBoolean()) { - searchRequest.types(generateRandomStringArray(10, 10, false, false)); - } if (randomBoolean()) { searchRequest.preference(randomAlphaOfLengthBetween(3, 10)); } diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 597c2a5ac849b..0ee965403bc7f 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -153,7 +153,7 @@ protected AggregatorFactory createAggregatorFactory(Query query, when(searchContext.getForField(Mockito.any(MappedFieldType.class))) .thenAnswer(invocationOnMock -> ifds.getForField((MappedFieldType) invocationOnMock.getArguments()[0])); - SearchLookup searchLookup = new SearchLookup(mapperService, ifds::getForField, new String[]{TYPE_NAME}); + SearchLookup searchLookup = new SearchLookup(mapperService, ifds::getForField); when(searchContext.lookup()).thenReturn(searchLookup); QueryShardContext queryShardContext = queryShardContextMock(mapperService); diff --git a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java index d05c075d26b65..e6c7dd59efd9e 100644 --- a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java +++ b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java @@ -181,7 +181,7 @@ synchronized void expand() { currentHopNumber++; Hop currentHop = request.getHop(currentHopNumber); - final SearchRequest searchRequest = new SearchRequest(request.indices()).types(request.types()).indicesOptions( + final SearchRequest searchRequest = new SearchRequest(request.indices()).indicesOptions( request.indicesOptions()); if (request.routing() != null) { searchRequest.routing(request.routing()); @@ -568,7 +568,7 @@ private void addBigOrClause(Map> lastHopFindings, BoolQueryB public synchronized void start() { try { - final SearchRequest searchRequest = new SearchRequest(request.indices()).types(request.types()).indicesOptions( + final SearchRequest searchRequest = new SearchRequest(request.indices()).indicesOptions( request.indicesOptions()); if (request.routing() != null) { searchRequest.routing(request.routing()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIteratorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIteratorTests.java index 0024eb5f8c648..2ff21243979be 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIteratorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIteratorTests.java @@ -138,7 +138,6 @@ private void assertSearchRequest() { SearchRequest searchRequest = searchRequests.get(0); assertThat(searchRequest.indices(), equalTo(new String[] {INDEX_NAME})); assertThat(searchRequest.scroll().keepAlive(), equalTo(TimeValue.timeValueMinutes(5))); - assertThat(searchRequest.types().length, equalTo(0)); assertThat(searchRequest.source().query(), equalTo(QueryBuilders.matchAllQuery())); assertThat(searchRequest.source().trackTotalHitsUpTo(), is(SearchContext.TRACK_TOTAL_HITS_ACCURATE)); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java index 999c36c7b4f86..4c43baa35d568 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java @@ -206,7 +206,6 @@ public MockClientBuilder createIndexRequest(ArgumentCaptor r @SuppressWarnings("unchecked") public MockClientBuilder prepareSearchExecuteListener(String index, SearchResponse response) { SearchRequestBuilder builder = mock(SearchRequestBuilder.class); - when(builder.setTypes(anyString())).thenReturn(builder); when(builder.addSort(any(SortBuilder.class))).thenReturn(builder); when(builder.setFetchSource(anyBoolean())).thenReturn(builder); when(builder.setScroll(anyString())).thenReturn(builder); @@ -250,10 +249,9 @@ public Void answer(InvocationOnMock invocationOnMock) throws Throwable { return this; } - public MockClientBuilder prepareSearch(String index, String type, int from, int size, SearchResponse response, + public MockClientBuilder prepareSearch(String index, int from, int size, SearchResponse response, ArgumentCaptor filter) { SearchRequestBuilder builder = mock(SearchRequestBuilder.class); - when(builder.setTypes(eq(type))).thenReturn(builder); when(builder.addSort(any(SortBuilder.class))).thenReturn(builder); when(builder.setQuery(filter.capture())).thenReturn(builder); when(builder.setPostFilter(filter.capture())).thenReturn(builder); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamerTests.java index 1629a8bcdbad5..9337095974eba 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamerTests.java @@ -106,7 +106,6 @@ private static SearchResponse createSearchResponse(List> sou private static SearchRequestBuilder prepareSearchBuilder(SearchResponse response, QueryBuilder queryBuilder) { SearchRequestBuilder builder = mock(SearchRequestBuilder.class); - when(builder.setTypes(any())).thenReturn(builder); when(builder.addSort(any(SortBuilder.class))).thenReturn(builder); when(builder.setQuery(queryBuilder)).thenReturn(builder); when(builder.setPostFilter(any())).thenReturn(builder); diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java index 2a1308353d6ad..9542f2129bcbf 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java @@ -161,7 +161,7 @@ static MultiSearchRequest createMSearchRequest(SearchRequest request, NamedWrite // Note: we can't apply any query rewriting or filtering on the query because there // are no validated caps, so we have no idea what job is intended here. The only thing // this affects is doc count, since hits and aggs will both be empty it doesn't really matter. - msearch.add(new SearchRequest(context.getRollupIndices(), request.source()).types(request.types())); + msearch.add(new SearchRequest(context.getRollupIndices(), request.source())); return msearch; } @@ -208,7 +208,7 @@ static MultiSearchRequest createMSearchRequest(SearchRequest request, NamedWrite new long[]{Rollup.ROLLUP_VERSION_V1, Rollup.ROLLUP_VERSION_V2}))); // And add a new msearch per JobID - msearch.add(new SearchRequest(context.getRollupIndices(), copiedSource).types(request.types())); + msearch.add(new SearchRequest(context.getRollupIndices(), copiedSource)); } return msearch; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java index dc8f93a0cdcf7..d23ca8e11428e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java @@ -345,8 +345,8 @@ public void testMSearch() throws Exception { MultiSearchResponse response = client() .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) .prepareMultiSearch() - .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) - .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) .get(); assertFalse(response.getResponses()[0].isFailure()); assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); @@ -363,8 +363,8 @@ public void testMSearch() throws Exception { response = client() .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) .prepareMultiSearch() - .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) - .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) .get(); assertFalse(response.getResponses()[0].isFailure()); assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); @@ -381,9 +381,9 @@ public void testMSearch() throws Exception { response = client() .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) .prepareMultiSearch() - .add(client().prepareSearch("test1").setTypes("type1").addSort(SortBuilders.fieldSort("id").sortMode(SortMode.MIN)) + .add(client().prepareSearch("test1").addSort(SortBuilders.fieldSort("id").sortMode(SortMode.MIN)) .setQuery(QueryBuilders.matchAllQuery())) - .add(client().prepareSearch("test2").setTypes("type1").addSort(SortBuilders.fieldSort("id").sortMode(SortMode.MIN)) + .add(client().prepareSearch("test2").addSort(SortBuilders.fieldSort("id").sortMode(SortMode.MIN)) .setQuery(QueryBuilders.matchAllQuery())) .get(); assertFalse(response.getResponses()[0].isFailure()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java index c5ff8242b6bec..c10dc51226ec2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java @@ -539,8 +539,8 @@ public void testMSearchApi() throws Exception { MultiSearchResponse response = client() .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) .prepareMultiSearch() - .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) - .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) .get(); assertFalse(response.getResponses()[0].isFailure()); assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); @@ -554,8 +554,8 @@ public void testMSearchApi() throws Exception { response = client() .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) .prepareMultiSearch() - .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) - .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) .get(); assertFalse(response.getResponses()[0].isFailure()); assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); @@ -569,8 +569,8 @@ public void testMSearchApi() throws Exception { response = client() .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) .prepareMultiSearch() - .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) - .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) .get(); assertFalse(response.getResponses()[0].isFailure()); assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); @@ -586,8 +586,8 @@ public void testMSearchApi() throws Exception { response = client() .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) .prepareMultiSearch() - .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) - .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) .get(); assertFalse(response.getResponses()[0].isFailure()); assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); @@ -599,8 +599,8 @@ public void testMSearchApi() throws Exception { response = client() .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) .prepareMultiSearch() - .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) - .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) .get(); assertFalse(response.getResponses()[0].isFailure()); assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); @@ -618,8 +618,8 @@ public void testMSearchApi() throws Exception { response = client() .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) .prepareMultiSearch() - .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) - .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) .get(); assertFalse(response.getResponses()[0].isFailure()); assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); @@ -637,8 +637,8 @@ public void testMSearchApi() throws Exception { response = client() .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user7", USERS_PASSWD))) .prepareMultiSearch() - .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) - .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) .get(); assertFalse(response.getResponses()[0].isFailure()); assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); @@ -656,8 +656,8 @@ public void testMSearchApi() throws Exception { response = client() .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user8", USERS_PASSWD))) .prepareMultiSearch() - .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) - .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) .get(); assertFalse(response.getResponses()[0].isFailure()); assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityCachePermissionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityCachePermissionTests.java index 8a570aac28663..c5169f9344c52 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityCachePermissionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityCachePermissionTests.java @@ -52,7 +52,7 @@ public void loadData() { } public void testThatTermsFilterQueryDoesntLeakData() { - SearchResponse response = client().prepareSearch("data").setTypes("a").setQuery(QueryBuilders.constantScoreQuery( + SearchResponse response = client().prepareSearch("data").setQuery(QueryBuilders.constantScoreQuery( QueryBuilders.termsLookupQuery("token", new TermsLookup("tokens", "tokens", "1", "tokens")))) .execute().actionGet(); assertThat(response.isTimedOut(), is(false)); @@ -62,7 +62,8 @@ public void testThatTermsFilterQueryDoesntLeakData() { try { response = client().filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(READ_ONE_IDX_USER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))) - .prepareSearch("data").setTypes("a").setQuery(QueryBuilders.constantScoreQuery( + .prepareSearch("data") + .setQuery(QueryBuilders.constantScoreQuery( QueryBuilders.termsLookupQuery("token", new TermsLookup("tokens", "tokens", "1", "tokens")))) .execute().actionGet(); fail("search phase exception should have been thrown! response was:\n" + response.toString()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityClearScrollTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityClearScrollTests.java index 4c189e3e7f3da..2bc7a43937d36 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityClearScrollTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityClearScrollTests.java @@ -14,8 +14,8 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.xpack.core.security.SecurityField; import org.junit.After; import org.junit.Before; @@ -25,9 +25,9 @@ import java.util.Map; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; @@ -75,7 +75,7 @@ public void indexRandomDocuments() { MultiSearchRequestBuilder multiSearchRequestBuilder = client().prepareMultiSearch(); int count = randomIntBetween(5, 15); for (int i = 0; i < count; i++) { - multiSearchRequestBuilder.add(client().prepareSearch("index").setTypes("type").setScroll("10m").setSize(1)); + multiSearchRequestBuilder.add(client().prepareSearch("index").setScroll("10m").setSize(1)); } MultiSearchResponse multiSearchResponse = multiSearchRequestBuilder.get(); scrollIds = getScrollIds(multiSearchResponse); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateService.java index d86a63948c7ac..f3ae021eb560f 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateService.java @@ -57,9 +57,6 @@ public String renderTemplate(Script source, WatchExecutionContext ctx, Payload p public SearchRequest toSearchRequest(WatcherSearchTemplateRequest request) throws IOException { SearchRequest searchRequest = new SearchRequest(request.getIndices()); - if (request.getTypes() != null) { - searchRequest.types(request.getTypes()); - } searchRequest.searchType(request.getSearchType()); searchRequest.indicesOptions(request.getIndicesOptions()); SearchSourceBuilder sourceBuilder = SearchSourceBuilder.searchSource(); From 5b5d00198280586a929773bfa7edc9e9e3f2ab1c Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Wed, 29 May 2019 11:13:37 +0200 Subject: [PATCH 167/224] [ML-DataFrame] rewrite start and stop to answer with acknowledged (#42589) rewrite start and stop to answer with acknowledged fixes #42450 --- .../StartDataFrameTransformResponse.java | 12 ++++------ .../StopDataFrameTransformResponse.java | 13 ++++------ .../client/DataFrameTransformIT.java | 6 ++--- .../DataFrameTransformDocumentationIT.java | 4 ++-- .../data-frames/apis/start-transform.asciidoc | 2 +- .../data-frames/apis/stop-transform.asciidoc | 2 +- .../action/StartDataFrameTransformAction.java | 20 ++++++++-------- .../action/StopDataFrameTransformAction.java | 24 +++++++++---------- .../integration/DataFrameTransformIT.java | 2 +- .../integration/DataFrameRestTestCase.java | 4 ++-- ...TransportStopDataFrameTransformAction.java | 4 ++-- .../test/data_frame/transforms_start_stop.yml | 24 +++++++++---------- 12 files changed, 56 insertions(+), 61 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StartDataFrameTransformResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StartDataFrameTransformResponse.java index f11ecd096c16e..9b358ffdfa151 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StartDataFrameTransformResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StartDataFrameTransformResponse.java @@ -30,21 +30,19 @@ public class StartDataFrameTransformResponse extends AcknowledgedTasksResponse { - private static final String STARTED = "started"; + private static final String ACKNOWLEDGED = "acknowledged"; private static final ConstructingObjectParser PARSER = - AcknowledgedTasksResponse.generateParser("start_data_frame_transform_response", StartDataFrameTransformResponse::new, STARTED); + AcknowledgedTasksResponse.generateParser("start_data_frame_transform_response", StartDataFrameTransformResponse::new, + ACKNOWLEDGED); public static StartDataFrameTransformResponse fromXContent(final XContentParser parser) throws IOException { return PARSER.parse(parser, null); } - public StartDataFrameTransformResponse(boolean started, @Nullable List taskFailures, + public StartDataFrameTransformResponse(boolean acknowledged, @Nullable List taskFailures, @Nullable List nodeFailures) { - super(started, taskFailures, nodeFailures); + super(acknowledged, taskFailures, nodeFailures); } - public boolean isStarted() { - return isAcknowledged(); - } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StopDataFrameTransformResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StopDataFrameTransformResponse.java index 3224dfb4703e5..6d32474f70c1a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StopDataFrameTransformResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StopDataFrameTransformResponse.java @@ -30,21 +30,18 @@ public class StopDataFrameTransformResponse extends AcknowledgedTasksResponse { - private static final String STOPPED = "stopped"; + private static final String ACKNOWLEDGED = "acknowledged"; - private static final ConstructingObjectParser PARSER = - AcknowledgedTasksResponse.generateParser("stop_data_frame_transform_response", StopDataFrameTransformResponse::new, STOPPED); + private static final ConstructingObjectParser PARSER = AcknowledgedTasksResponse + .generateParser("stop_data_frame_transform_response", StopDataFrameTransformResponse::new, ACKNOWLEDGED); public static StopDataFrameTransformResponse fromXContent(final XContentParser parser) throws IOException { return PARSER.parse(parser, null); } - public StopDataFrameTransformResponse(boolean stopped, @Nullable List taskFailures, + public StopDataFrameTransformResponse(boolean acknowledged, @Nullable List taskFailures, @Nullable List nodeFailures) { - super(stopped, taskFailures, nodeFailures); + super(acknowledged, taskFailures, nodeFailures); } - public boolean isStopped() { - return isAcknowledged(); - } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java index 40cd6f454cdab..31027e7c0f171 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java @@ -258,7 +258,7 @@ public void testStartStop() throws IOException { StartDataFrameTransformRequest startRequest = new StartDataFrameTransformRequest(id); StartDataFrameTransformResponse startResponse = execute(startRequest, client::startDataFrameTransform, client::startDataFrameTransformAsync); - assertTrue(startResponse.isStarted()); + assertTrue(startResponse.isAcknowledged()); assertThat(startResponse.getNodeFailures(), empty()); assertThat(startResponse.getTaskFailures(), empty()); @@ -271,7 +271,7 @@ public void testStartStop() throws IOException { StopDataFrameTransformRequest stopRequest = new StopDataFrameTransformRequest(id, Boolean.TRUE, null); StopDataFrameTransformResponse stopResponse = execute(stopRequest, client::stopDataFrameTransform, client::stopDataFrameTransformAsync); - assertTrue(stopResponse.isStopped()); + assertTrue(stopResponse.isAcknowledged()); assertThat(stopResponse.getNodeFailures(), empty()); assertThat(stopResponse.getTaskFailures(), empty()); } @@ -358,7 +358,7 @@ public void testGetStats() throws Exception { StartDataFrameTransformResponse startTransformResponse = execute(new StartDataFrameTransformRequest(id), client::startDataFrameTransform, client::startDataFrameTransformAsync); - assertThat(startTransformResponse.isStarted(), is(true)); + assertThat(startTransformResponse.isAcknowledged(), is(true)); assertBusy(() -> { GetDataFrameTransformStatsResponse response = execute(new GetDataFrameTransformStatsRequest(id), client::getDataFrameTransformStats, client::getDataFrameTransformStatsAsync); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java index 6f7832cbf3cff..fca5881969bcb 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java @@ -244,7 +244,7 @@ public void testStartStop() throws IOException, InterruptedException { request, RequestOptions.DEFAULT); // end::start-data-frame-transform-execute - assertTrue(response.isStarted()); + assertTrue(response.isAcknowledged()); } { // tag::stop-data-frame-transform-request @@ -263,7 +263,7 @@ public void testStartStop() throws IOException, InterruptedException { request, RequestOptions.DEFAULT); // end::stop-data-frame-transform-execute - assertTrue(response.isStopped()); + assertTrue(response.isAcknowledged()); } { // tag::start-data-frame-transform-execute-listener diff --git a/docs/reference/data-frames/apis/start-transform.asciidoc b/docs/reference/data-frames/apis/start-transform.asciidoc index d200ef34f587d..3ab90036f8631 100644 --- a/docs/reference/data-frames/apis/start-transform.asciidoc +++ b/docs/reference/data-frames/apis/start-transform.asciidoc @@ -41,7 +41,7 @@ When the {dataframe-transform} starts, you receive the following results: [source,js] ---- { - "started" : true + "acknowledged" : true } ---- // TESTRESPONSE diff --git a/docs/reference/data-frames/apis/stop-transform.asciidoc b/docs/reference/data-frames/apis/stop-transform.asciidoc index 4e89579b52ec1..9a39d184d8ceb 100644 --- a/docs/reference/data-frames/apis/stop-transform.asciidoc +++ b/docs/reference/data-frames/apis/stop-transform.asciidoc @@ -61,7 +61,7 @@ When the {dataframe-transform} stops, you receive the following results: [source,js] ---- { - "stopped" : true + "acknowledged" : true } ---- // TESTRESPONSE diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java index 6216b4489db92..e2128a0f7180b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java @@ -96,33 +96,33 @@ public boolean equals(Object obj) { } public static class Response extends BaseTasksResponse implements ToXContentObject { - private final boolean started; + private final boolean acknowledged; public Response(StreamInput in) throws IOException { super(in); - started = in.readBoolean(); + acknowledged = in.readBoolean(); } - public Response(boolean started) { + public Response(boolean acknowledged) { super(Collections.emptyList(), Collections.emptyList()); - this.started = started; + this.acknowledged = acknowledged; } - public boolean isStarted() { - return started; + public boolean isAcknowledged() { + return acknowledged; } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeBoolean(started); + out.writeBoolean(acknowledged); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); toXContentCommon(builder, params); - builder.field("started", started); + builder.field("acknowledged", acknowledged); builder.endObject(); return builder; } @@ -137,12 +137,12 @@ public boolean equals(Object obj) { return false; } Response response = (Response) obj; - return started == response.started; + return acknowledged == response.acknowledged; } @Override public int hashCode() { - return Objects.hash(started); + return Objects.hash(acknowledged); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java index 99699c3a48cb5..e170e5e475fd2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java @@ -158,40 +158,40 @@ public boolean match(Task task) { public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { - private final boolean stopped; + private final boolean acknowledged; public Response(StreamInput in) throws IOException { super(in); - stopped = in.readBoolean(); + acknowledged = in.readBoolean(); } - public Response(boolean stopped) { + public Response(boolean acknowledged) { super(Collections.emptyList(), Collections.emptyList()); - this.stopped = stopped; + this.acknowledged = acknowledged; } public Response(List taskFailures, List nodeFailures, - boolean stopped) { + boolean acknowledged) { super(taskFailures, nodeFailures); - this.stopped = stopped; + this.acknowledged = acknowledged; } - public boolean isStopped() { - return stopped; + public boolean isAcknowledged() { + return acknowledged; } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeBoolean(stopped); + out.writeBoolean(acknowledged); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); toXContentCommon(builder, params); - builder.field("stopped", stopped); + builder.field("acknowledged", acknowledged); builder.endObject(); return builder; } @@ -203,12 +203,12 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; Response response = (Response) o; - return stopped == response.stopped; + return acknowledged == response.acknowledged; } @Override public int hashCode() { - return Objects.hash(stopped); + return Objects.hash(acknowledged); } } } diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java index 486ea5e5d7403..1ec425c641693 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java @@ -50,7 +50,7 @@ public void testDataFrameTransformCrud() throws Exception { REVIEWS_INDEX_NAME); assertTrue(putDataFrameTransform(config, RequestOptions.DEFAULT).isAcknowledged()); - assertTrue(startDataFrameTransform(config.getId(), RequestOptions.DEFAULT).isStarted()); + assertTrue(startDataFrameTransform(config.getId(), RequestOptions.DEFAULT).isAcknowledged()); waitUntilCheckpoint(config.getId(), 1L); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java index 23bff163031ce..1bfa79d0f5b96 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java @@ -191,7 +191,7 @@ protected void startDataframeTransform(String transformId, boolean force, String startTransformRequest.setOptions(expectWarnings(warnings)); } Map startTransformResponse = entityAsMap(client().performRequest(startTransformRequest)); - assertThat(startTransformResponse.get("started"), equalTo(Boolean.TRUE)); + assertThat(startTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); } protected void stopDataFrameTransform(String transformId, boolean force) throws Exception { @@ -200,7 +200,7 @@ protected void stopDataFrameTransform(String transformId, boolean force) throws stopTransformRequest.addParameter(DataFrameField.FORCE.getPreferredName(), Boolean.toString(force)); stopTransformRequest.addParameter(DataFrameField.WAIT_FOR_COMPLETION.getPreferredName(), Boolean.toString(true)); Map stopTransformResponse = entityAsMap(client().performRequest(stopTransformRequest)); - assertThat(stopTransformResponse.get("stopped"), equalTo(Boolean.TRUE)); + assertThat(stopTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); } protected void startAndWaitForTransform(String transformId, String dataFrameIndex) throws Exception { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java index 26f5259c69dc8..a013b65416d7e 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java @@ -124,8 +124,8 @@ protected StopDataFrameTransformAction.Response newResponse(StopDataFrameTransfo } // if tasks is empty allMatch is 'vacuously satisfied' - boolean allStopped = tasks.stream().allMatch(StopDataFrameTransformAction.Response::isStopped); - return new StopDataFrameTransformAction.Response(allStopped); + boolean allAcknowledged = tasks.stream().allMatch(StopDataFrameTransformAction.Response::isAcknowledged); + return new StopDataFrameTransformAction.Response(allAcknowledged); } private ActionListener diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml index 58af6e0899dda..31f80033e7bdb 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml @@ -42,7 +42,7 @@ teardown: - do: data_frame.start_data_frame_transform: transform_id: "airline-transform-start-stop" - - match: { started: true } + - match: { acknowledged: true } --- "Test start missing transform": @@ -56,7 +56,7 @@ teardown: - do: data_frame.start_data_frame_transform: transform_id: "airline-transform-start-stop" - - match: { started: true } + - match: { acknowledged: true } - do: catch: /Unable to start data frame transform \[airline-transform-start-stop\] as it is in state \[STARTED\]/ @@ -68,7 +68,7 @@ teardown: - do: data_frame.start_data_frame_transform: transform_id: "airline-transform-start-stop" - - match: { started: true } + - match: { acknowledged: true } - do: indices.get_mapping: index: airline-data-by-airline-start-stop @@ -83,7 +83,7 @@ teardown: - do: data_frame.start_data_frame_transform: transform_id: "airline-transform-start-stop" - - match: { started: true } + - match: { acknowledged: true } - do: indices.get_mapping: index: airline-data-by-airline-start-stop @@ -96,7 +96,7 @@ teardown: - do: data_frame.start_data_frame_transform: transform_id: "airline-transform-start-stop" - - match: { started: true } + - match: { acknowledged: true } - do: data_frame.get_data_frame_transform_stats: @@ -110,7 +110,7 @@ teardown: data_frame.stop_data_frame_transform: transform_id: "airline-transform-start-stop" wait_for_completion: true - - match: { stopped: true } + - match: { acknowledged: true } - do: data_frame.get_data_frame_transform_stats: @@ -123,7 +123,7 @@ teardown: - do: data_frame.start_data_frame_transform: transform_id: "airline-transform-start-stop" - - match: { started: true } + - match: { acknowledged: true } - do: data_frame.get_data_frame_transform_stats: @@ -145,7 +145,7 @@ teardown: - do: data_frame.stop_data_frame_transform: transform_id: "airline-transform-start-stop" - - match: { stopped: true } + - match: { acknowledged: true } --- "Test start/stop only starts/stops specified transform": @@ -164,7 +164,7 @@ teardown: - do: data_frame.start_data_frame_transform: transform_id: "airline-transform-start-stop" - - match: { started: true } + - match: { acknowledged: true } - do: data_frame.get_data_frame_transform_stats: @@ -185,12 +185,12 @@ teardown: - do: data_frame.start_data_frame_transform: transform_id: "airline-transform-start-later" - - match: { started: true } + - match: { acknowledged: true } - do: data_frame.stop_data_frame_transform: transform_id: "airline-transform-start-stop" - - match: { stopped: true } + - match: { acknowledged: true } - do: data_frame.get_data_frame_transform_stats: @@ -204,7 +204,7 @@ teardown: data_frame.stop_data_frame_transform: transform_id: "airline-transform-start-later" wait_for_completion: true - - match: { stopped: true } + - match: { acknowledged: true } - do: data_frame.delete_data_frame_transform: From c26122014f772b7f2c50d6029d680c09e56e2e88 Mon Sep 17 00:00:00 2001 From: kevin fuksman Date: Wed, 29 May 2019 08:44:24 -0300 Subject: [PATCH 168/224] Added param ignore_throttled=false when indicesOptions.ignoreThrottled() is false (#42393) and fixed test RequestConvertersTests and added ignore_throttled on all request --- .../org/elasticsearch/client/RequestConverters.java | 1 + .../elasticsearch/client/RequestConvertersTests.java | 12 ++++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 711bb68e7fb08..9bd9a0852d33e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -941,6 +941,7 @@ Params withIndicesOptions(IndicesOptions indicesOptions) { expandWildcards = joiner.toString(); } putParam("expand_wildcards", expandWildcards); + putParam("ignore_throttled", Boolean.toString(indicesOptions.ignoreThrottled())); } return this; } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 8c5e6b779ff47..46d929e27d988 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -1536,7 +1536,7 @@ public void testFieldCaps() { endpoint.add("_field_caps"); assertEquals(endpoint.toString(), request.getEndpoint()); - assertEquals(4, request.getParameters().size()); + assertEquals(5, request.getParameters().size()); // Note that we don't check the field param value explicitly, as field names are // passed through @@ -1570,7 +1570,7 @@ public void testRankEval() throws Exception { } endpoint.add(RestRankEvalAction.ENDPOINT); assertEquals(endpoint.toString(), request.getEndpoint()); - assertEquals(3, request.getParameters().size()); + assertEquals(4, request.getParameters().size()); assertEquals(expectedParams, request.getParameters()); assertToXContentBody(spec, request.getEntity()); } @@ -1897,7 +1897,8 @@ static void setRandomIndicesOptions(Consumer setter, Supplier expectedParams) { if (randomBoolean()) { - setter.accept(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + setter.accept(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), + true, false, false, randomBoolean())); } expectedParams.put("ignore_unavailable", Boolean.toString(getter.get().ignoreUnavailable())); expectedParams.put("allow_no_indices", Boolean.toString(getter.get().allowNoIndices())); @@ -1910,11 +1911,13 @@ static void setRandomIndicesOptions(Consumer setter, Supplier expectedParams) { if (randomBoolean()) { - indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), + true, false, false, randomBoolean()); } expectedParams.put("ignore_unavailable", Boolean.toString(indicesOptions.ignoreUnavailable())); expectedParams.put("allow_no_indices", Boolean.toString(indicesOptions.allowNoIndices())); @@ -1927,6 +1930,7 @@ static IndicesOptions setRandomIndicesOptions(IndicesOptions indicesOptions, Map } else { expectedParams.put("expand_wildcards", "none"); } + expectedParams.put("ignore_throttled", Boolean.toString(indicesOptions.ignoreThrottled())); return indicesOptions; } From 665b6563d795e8e851dc01d35aba1a3b0485d7d1 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Wed, 29 May 2019 08:24:25 -0400 Subject: [PATCH 169/224] [DOCS] Set explicit anchors for TLS/SSL settings (#42524) --- .../settings/monitoring-settings.asciidoc | 1 + .../settings/notification-settings.asciidoc | 1 + .../settings/security-settings.asciidoc | 2 ++ docs/reference/settings/ssl-settings.asciidoc | 19 ++++++++++++++++++- 4 files changed, 22 insertions(+), 1 deletion(-) diff --git a/docs/reference/settings/monitoring-settings.asciidoc b/docs/reference/settings/monitoring-settings.asciidoc index c48b7d8764d2c..18379577e6a3e 100644 --- a/docs/reference/settings/monitoring-settings.asciidoc +++ b/docs/reference/settings/monitoring-settings.asciidoc @@ -283,5 +283,6 @@ For example: `["elasticsearch_version_mismatch","xpack_license_expiration"]`. :component: {monitoring} :verifies: :server!: +:ssl-context: monitoring include::ssl-settings.asciidoc[] diff --git a/docs/reference/settings/notification-settings.asciidoc b/docs/reference/settings/notification-settings.asciidoc index 77f755b09e285..ac7160bd20aac 100644 --- a/docs/reference/settings/notification-settings.asciidoc +++ b/docs/reference/settings/notification-settings.asciidoc @@ -85,6 +85,7 @@ corresponding endpoints are whitelisted as well. :component: {watcher} :verifies: :server!: +:ssl-context: watcher include::ssl-settings.asciidoc[] diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 3cfe1d1e58769..00c1941f65256 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -1566,6 +1566,7 @@ a PKCS#12 container includes trusted certificate ("anchor") entries look for :client-auth-default: none :verifies!: :server: +:ssl-context: security-http include::ssl-settings.asciidoc[] @@ -1575,6 +1576,7 @@ include::ssl-settings.asciidoc[] :client-auth-default!: :verifies: :server: +:ssl-context: security-transport include::ssl-settings.asciidoc[] diff --git a/docs/reference/settings/ssl-settings.asciidoc b/docs/reference/settings/ssl-settings.asciidoc index f392d0c2fb816..6d8ffd90b6a37 100644 --- a/docs/reference/settings/ssl-settings.asciidoc +++ b/docs/reference/settings/ssl-settings.asciidoc @@ -1,4 +1,3 @@ - ==== {component} TLS/SSL Settings You can configure the following TLS/SSL settings. If the settings are not configured, the {ref}/security-settings.html#ssl-tls-settings[Default TLS/SSL Settings] @@ -39,7 +38,13 @@ endif::verifies[] Supported cipher suites can be found in Oracle's http://docs.oracle.com/javase/8/docs/technotes/guides/security/SunProviders.html[ Java Cryptography Architecture documentation]. Defaults to ``. +ifdef::asciidoctor[] +[#{ssl-context}-tls-ssl-key-trusted-certificate-settings] ===== {component} TLS/SSL Key and Trusted Certificate Settings +endif::[] +ifndef::asciidoctor[] +===== anchor:{ssl-context}-tls-ssl-key-trusted-certificate-settings[] {component} TLS/SSL Key and Trusted Certificate Settings +endif::[] The following settings are used to specify a private key, certificate, and the trusted certificates that should be used when communicating over an SSL/TLS connection. @@ -105,7 +110,13 @@ Password to the truststore. +{ssl-prefix}.ssl.truststore.secure_password+ (<>):: Password to the truststore. +ifdef::asciidoctor[] +[#{ssl-context}-pkcs12-files] ===== PKCS#12 Files +endif::[] +ifndef::asciidoctor[] +===== anchor:{ssl-context}-pkcs12-files[] PKCS#12 Files +endif::[] {es} can be configured to use PKCS#12 container files (`.p12` or `.pfx` files) that contain the private key, certificate and certificates that should be trusted. @@ -143,7 +154,13 @@ Password to the PKCS#12 file. +{ssl-prefix}.ssl.truststore.secure_password+ (<>):: Password to the PKCS#12 file. +ifdef::asciidoctor[] +[#{ssl-context}-pkcs11-tokens] ===== PKCS#11 Tokens +endif::[] +ifndef::asciidoctor[] +===== anchor:{ssl-context}-pkcs11-tokens[] PKCS#11 Tokens +endif::[] {es} can be configured to use a PKCS#11 token that contains the private key, certificate and certificates that should be trusted. From 5e0a162ed665cfb6eb528ac0d68b3182e7205ff2 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Wed, 29 May 2019 16:16:04 +0300 Subject: [PATCH 170/224] Testclusters: convert ccr tests (#42313) --- buildSrc/build.gradle | 2 +- .../gradle/test/RestIntegTestTask.groovy | 6 +- .../testclusters/ElasticsearchCluster.java | 15 ++++ .../testclusters/ElasticsearchNode.java | 81 ++++++++++++++++--- .../TestClusterConfiguration.java | 6 ++ .../testclusters/TestClustersPlugin.java | 43 ++++++++-- x-pack/plugin/ccr/qa/build.gradle | 1 + .../downgrade-to-basic-license/build.gradle | 58 +++++++------ .../xpack/ccr/FollowIndexIT.java | 2 +- .../plugin/ccr/qa/multi-cluster/build.gradle | 75 ++++++++--------- .../ccr/qa/non-compliant-license/build.gradle | 42 +++++----- x-pack/plugin/ccr/qa/rest/build.gradle | 20 ++--- x-pack/plugin/ccr/qa/restart/build.gradle | 69 ++++++++-------- x-pack/plugin/ccr/qa/security/build.gradle | 76 ++++++----------- 14 files changed, 282 insertions(+), 214 deletions(-) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index d3a16f55277d7..a974e866465be 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -177,7 +177,7 @@ if (project != rootProject) { if (isLuceneSnapshot) { systemProperty 'test.lucene-snapshot-revision', isLuceneSnapshot[0][1] } - maxParallelForks System.getProperty('tests.jvms', project.rootProject.ext.defaultParallel.toString()) as Integer + maxParallelForks System.getProperty('tI', project.rootProject.ext.defaultParallel.toString()) as Integer } check.dependsOn(integTest) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 40cefdcc25fb9..9564bde038b90 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -79,7 +79,10 @@ class RestIntegTestTask extends DefaultTask { // disable the build cache for rest test tasks // there are a number of inputs we aren't properly tracking here so we'll just not cache these for now - runner.outputs.doNotCacheIf('Caching is disabled for REST integration tests') { true } + runner.getOutputs().doNotCacheIf( + "Caching is disabled for REST integration tests", + { false } + ); // override/add more for rest tests runner.maxParallelForks = 1 @@ -285,4 +288,5 @@ class RestIntegTestTask extends DefaultTask { } return copyRestSpec } + } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java index e245fb0ead95a..efa8e878d56b0 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java @@ -186,6 +186,16 @@ public void environment(String key, Supplier valueSupplier) { nodes.all(each -> each.environment(key, valueSupplier)); } + @Override + public void jvmArgs(String... values) { + nodes.all(each -> each.jvmArgs(values)); + } + + @Override + public void jvmArgs(Supplier valueSupplier) { + nodes.all(each -> each.jvmArgs(valueSupplier)); + } + @Override public void freeze() { nodes.forEach(ElasticsearchNode::freeze); @@ -216,6 +226,11 @@ public void start() { } } + @Override + public void restart() { + nodes.forEach(ElasticsearchNode::restart); + } + @Override public void extraConfigFile(String destination, File from) { nodes.all(node -> node.extraConfigFile(destination, from)); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index bba94f6c7d173..310308be93ab4 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -38,6 +38,7 @@ import java.nio.file.StandardCopyOption; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -86,6 +87,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { private final Map keystoreFiles = new LinkedHashMap<>(); private final Map> systemProperties = new LinkedHashMap<>(); private final Map> environment = new LinkedHashMap<>(); + private final List>> jvmArgs = new ArrayList<>(); private final Map extraConfigFiles = new HashMap<>(); final LinkedHashMap defaultConfig = new LinkedHashMap<>(); private final List> credentials = new ArrayList<>(); @@ -105,6 +107,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { private File javaHome; private volatile Process esProcess; private Function nameCustomization = Function.identity(); + private boolean isWorkingDirConfigured = false; ElasticsearchNode(String path, String name, GradleServicesAdapter services, File artifactsExtractDir, File workingDirBase) { this.path = path; @@ -220,6 +223,19 @@ public void environment(String key, Supplier valueSupplier) { addSupplier("Environment variable", environment, key, valueSupplier); } + + public void jvmArgs(String... values) { + for (String value : values) { + requireNonNull(value, "jvm argument was null when configuring test cluster `" + this + "`"); + } + jvmArgs.add(() -> Arrays.asList(values)); + } + + public void jvmArgs(Supplier valueSupplier) { + requireNonNull(valueSupplier, "jvm argument supplier was null when configuring test cluster `" + this + "`"); + jvmArgs.add(() -> Arrays.asList(valueSupplier.get())); + } + private void addSupplier(String name, Map> collector, String key, Supplier valueSupplier) { requireNonNull(key, name + " key was null when configuring test cluster `" + this + "`"); requireNonNull(valueSupplier, name + " value supplier was null when configuring test cluster `" + this + "`"); @@ -231,10 +247,13 @@ private void addSupplier(String name, Map> collec addSupplier(name, collector, key, () -> actualValue); } - private void checkSuppliers(String name, Map> collector) { - collector.forEach((key, value) -> { - requireNonNull(value.get().toString(), name + " supplied value was null when configuring test cluster `" + this + "`"); - }); + private void checkSuppliers(String name, Collection> collector) { + collector.forEach(suplier -> + requireNonNull( + suplier.get().toString(), + name + " supplied value was null when configuring test cluster `" + this + "`" + ) + ); } public Path getConfigDir() { @@ -289,7 +308,11 @@ public synchronized void start() { } try { - createWorkingDir(distroArtifact); + if (isWorkingDirConfigured == false) { + // Only configure working dir once so we don't loose data on restarts + isWorkingDirConfigured = true; + createWorkingDir(distroArtifact); + } } catch (IOException e) { throw new UncheckedIOException("Failed to create working directory for " + this, e); } @@ -303,7 +326,7 @@ public synchronized void start() { if (keystoreSettings.isEmpty() == false || keystoreFiles.isEmpty() == false) { runElaticsearchBinScript("elasticsearch-keystore", "create"); - checkSuppliers("Keystore", keystoreSettings); + checkSuppliers("Keystore", keystoreSettings.values()); keystoreSettings.forEach((key, value) -> runElaticsearchBinScriptWithInput(value.get().toString(), "elasticsearch-keystore", "add", "-x", key) ); @@ -337,6 +360,20 @@ public synchronized void start() { startElasticsearchProcess(); } + @Override + public void restart() { + LOGGER.info("Restarting {}", this); + stop(false); + try { + Files.delete(httpPortsFile); + Files.delete(transportPortFile); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + + start(); + } + private boolean isSettingMissingOrTrue(String name) { return Boolean.valueOf(settings.getOrDefault(name, () -> "false").get().toString()); } @@ -349,7 +386,7 @@ private void copyExtraConfigFiles() { } Path dst = configFile.getParent().resolve(destination); try { - Files.createDirectories(dst); + Files.createDirectories(dst.getParent()); Files.copy(from.toPath(), dst, StandardCopyOption.REPLACE_EXISTING); LOGGER.info("Added extra config file {} for {}", destination, this); } catch (IOException e) { @@ -453,12 +490,30 @@ private Map getESEnvironment() { defaultEnv.put("ES_PATH_CONF", configFile.getParent().toString()); String systemPropertiesString = ""; if (systemProperties.isEmpty() == false) { - checkSuppliers("Java System property", systemProperties); + checkSuppliers("Java System property", systemProperties.values()); systemPropertiesString = " " + systemProperties.entrySet().stream() .map(entry -> "-D" + entry.getKey() + "=" + entry.getValue().get()) .collect(Collectors.joining(" ")); } - defaultEnv.put("ES_JAVA_OPTS", "-Xms512m -Xmx512m -ea -esa" + systemPropertiesString); + String jvmArgsString = ""; + if (jvmArgs.isEmpty() == false) { + jvmArgsString = " " + jvmArgs.stream() + .map(Supplier::get) + .peek(charSequences -> requireNonNull(charSequences, "Jvm argument supplier returned null while configuring " + this)) + .flatMap(Collection::stream) + .peek(argument -> { + requireNonNull(argument, "Jvm argument supplier returned null while configuring " + this); + if (argument.toString().startsWith("-D")) { + throw new TestClustersException("Invalid jvm argument `" + argument + + "` configure as systemProperty instead for " + this + ); + } + }) + .collect(Collectors.joining(" ")); + } + defaultEnv.put("ES_JAVA_OPTS", "-Xms512m -Xmx512m -ea -esa" + + systemPropertiesString + jvmArgsString + ); defaultEnv.put("ES_TMPDIR", tmpDir.toString()); // Windows requires this as it defaults to `c:\windows` despite ES_TMPDIR defaultEnv.put("TMP", tmpDir.toString()); @@ -471,7 +526,7 @@ private Map getESEnvironment() { ); } - checkSuppliers("Environment variable", environment); + checkSuppliers("Environment variable", environment.values()); environment.forEach((key, value) -> defaultEnv.put(key, value.get().toString())); return defaultEnv; } @@ -520,6 +575,10 @@ public List getAllTransportPortURI() { return getTransportPortInternal(); } + public File getServerLog() { + return confPathLogs.resolve(safeName(getName()).replaceAll("-[0-9]+$", "") + "_server.json").toFile(); + } + @Override public synchronized void stop(boolean tailLogs) { if (esProcess == null && tailLogs) { @@ -693,7 +752,7 @@ private void createConfiguration() { // Don't wait for state, just start up quickly. This will also allow new and old nodes in the BWC case to become the master defaultConfig.put("discovery.initial_state_timeout", "0s"); - checkSuppliers("Settings", settings); + checkSuppliers("Settings", settings.values()); Map userConfig = settings.entrySet().stream() .collect(Collectors.toMap(entry -> entry.getKey(), entry -> entry.getValue().get().toString())); HashSet overriden = new HashSet<>(defaultConfig.keySet()); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java index 1ccbeabd4b88a..253a58c6b96a7 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java @@ -66,12 +66,18 @@ public interface TestClusterConfiguration { void environment(String key, Supplier valueSupplier); + void jvmArgs(String... values); + + void jvmArgs(Supplier valueSupplier); + void freeze(); void setJavaHome(File javaHome); void start(); + void restart(); + void extraConfigFile(String destination, File from); void user(Map userSpec); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index daca1f5ebb191..500807854014f 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -62,11 +62,13 @@ public class TestClustersPlugin implements Plugin { private static final TimeUnit EXECUTOR_SHUTDOWN_TIMEOUT_UNIT = TimeUnit.MINUTES; private static final Logger logger = Logging.getLogger(TestClustersPlugin.class); + private static final String TESTCLUSTERS_INSPECT_FAILURE = "testclusters.inspect.failure"; private final Map> usedClusters = new HashMap<>(); private final Map claimsInventory = new HashMap<>(); private final Set runningClusters =new HashSet<>(); private final Thread shutdownHook = new Thread(this::shutDownAllClusters); + private final Boolean allowClusterToSurvive = Boolean.valueOf(System.getProperty(TESTCLUSTERS_INSPECT_FAILURE, "false")); private ExecutorService executorService = Executors.newSingleThreadExecutor(); public static String getHelperConfigurationName(String version) { @@ -195,7 +197,7 @@ private void configureStartClustersHook(Project project) { public void beforeActions(Task task) { // we only start the cluster before the actions, so we'll not start it if the task is up-to-date usedClusters.getOrDefault(task, Collections.emptyList()).stream() - .filter(each -> runningClusters.contains(each) == false) + .filter(cluster -> runningClusters.contains(cluster) == false) .forEach(elasticsearchCluster -> { elasticsearchCluster.start(); runningClusters.add(elasticsearchCluster); @@ -221,18 +223,18 @@ public void afterExecute(Task task, TaskState state) { if (state.getFailure() != null) { // If the task fails, and other tasks use this cluster, the other task will likely never be // executed at all, so we will never get to un-claim and terminate it. - clustersUsedByTask.forEach(each -> each.stop(true)); + clustersUsedByTask.forEach(cluster -> stopCluster(cluster, true)); } else { clustersUsedByTask.forEach( - each -> claimsInventory.put(each, claimsInventory.getOrDefault(each, 0) - 1) + cluster -> claimsInventory.put(cluster, claimsInventory.getOrDefault(cluster, 0) - 1) ); claimsInventory.entrySet().stream() .filter(entry -> entry.getValue() == 0) .filter(entry -> runningClusters.contains(entry.getKey())) .map(Map.Entry::getKey) - .forEach(each -> { - each.stop(false); - runningClusters.remove(each); + .forEach(cluster -> { + stopCluster(cluster, false); + runningClusters.remove(cluster); }); } } @@ -242,6 +244,28 @@ public void beforeExecute(Task task) {} ); } + private void stopCluster(ElasticsearchCluster cluster, boolean taskFailed) { + if (allowClusterToSurvive) { + logger.info("Not stopping clusters, disabled by property"); + if (taskFailed) { + // task failed or this is the last one to stop + for (int i=1 ; ; i += i) { + logger.lifecycle( + "No more test clusters left to run, going to sleep because {} was set," + + " interrupt (^C) to stop clusters.", TESTCLUSTERS_INSPECT_FAILURE + ); + try { + Thread.sleep(1000 * i); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return; + } + } + } + } + cluster.stop(taskFailed); + } + /** * Boilerplate to get testClusters container extension * @@ -428,13 +452,16 @@ private void shutdownExecutorService() { private void shutDownAllClusters() { synchronized (runningClusters) { + if (runningClusters.isEmpty()) { + return; + } Iterator iterator = runningClusters.iterator(); while (iterator.hasNext()) { + ElasticsearchCluster next = iterator.next(); iterator.remove(); - iterator.next().stop(true); + next.stop(false); } } } - } diff --git a/x-pack/plugin/ccr/qa/build.gradle b/x-pack/plugin/ccr/qa/build.gradle index d3e95d997c3fb..6a39489515632 100644 --- a/x-pack/plugin/ccr/qa/build.gradle +++ b/x-pack/plugin/ccr/qa/build.gradle @@ -14,4 +14,5 @@ subprojects { include 'rest-api-spec/api/**' } } + } diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle index cbba093c5526b..17de70794017e 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle @@ -1,5 +1,6 @@ import org.elasticsearch.gradle.test.RestIntegTestTask +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-test' dependencies { @@ -11,56 +12,51 @@ dependencies { testCompile project(':x-pack:plugin:ccr:qa') } -task leaderClusterTest(type: RestIntegTestTask) { +task "leader-cluster"(type: RestIntegTestTask) { mustRunAfter(precommit) + runner { + systemProperty 'tests.target_cluster', 'leader' + } } - -leaderClusterTestCluster { - numNodes = 1 - clusterName = 'leader-cluster' +testClusters."leader-cluster" { + distribution = "DEFAULT" setting 'xpack.license.self_generated.type', 'trial' } -leaderClusterTestRunner { - systemProperty 'tests.target_cluster', 'leader' -} - task writeJavaPolicy { + ext.policyFile = file("${buildDir}/tmp/java.policy") doLast { - final File tmp = file("${buildDir}/tmp") - if (tmp.exists() == false && tmp.mkdirs() == false) { + if (policyFile.parentFile.exists() == false && policyFile.parentFile.mkdirs() == false) { throw new GradleException("failed to create temporary directory [${tmp}]") } - final File javaPolicy = file("${tmp}/java.policy") - javaPolicy.write( + policyFile.write( [ "grant {", - " permission java.io.FilePermission \"${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}_server.json\", \"read\";", + " permission java.io.FilePermission \"${-> testClusters."follow-cluster".getFirstNode().getServerLog()}\", \"read\";", "};" - ].join("\n")) + ].join("\n") + ) } } -task followClusterTest(type: RestIntegTestTask) {} -followClusterTest.dependsOn writeJavaPolicy +task "follow-cluster"(type: RestIntegTestTask) { + dependsOn writeJavaPolicy, "leader-cluster" + useCluster testClusters."leader-cluster" + runner { + systemProperty 'java.security.policy', "file://${writeJavaPolicy.policyFile}" + systemProperty 'tests.target_cluster', 'follow' + nonInputProperties.systemProperty 'tests.leader_host', "${-> testClusters."leader-cluster".getAllHttpSocketURI().get(0)}" + nonInputProperties.systemProperty 'log', "${-> testClusters."follow-cluster".getFirstNode().getServerLog()}" + } +} -followClusterTestCluster { - dependsOn leaderClusterTestRunner - numNodes = 1 - clusterName = 'follow-cluster' +testClusters."follow-cluster" { + distribution = "DEFAULT" setting 'xpack.monitoring.collection.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' - setting 'cluster.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" + setting 'cluster.remote.leader_cluster.seeds', { "\"${testClusters."leader-cluster".getAllTransportPortURI().join(",")}\"" } } -followClusterTestRunner { - systemProperty 'java.security.policy', "file://${buildDir}/tmp/java.policy" - systemProperty 'tests.target_cluster', 'follow' - nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" - nonInputProperties.systemProperty 'log', "${-> followClusterTest.getNodes().get(0).homeDir}/logs/" + - "${-> followClusterTest.getNodes().get(0).clusterName}_server.json" - finalizedBy 'leaderClusterTestCluster#stop' -} -check.dependsOn followClusterTest +check.dependsOn "follow-cluster" test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index 8fb305ba06ee6..4e46de95adb03 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -104,7 +104,7 @@ private Matcher autoFollowCoordinatorWarn() { protected Boolean featureValueOf(JsonLogLine actual) { return actual.level().equals("WARN") && actual.component().equals("o.e.x.c.a.AutoFollowCoordinator") && - actual.nodeName().equals("node-0") && + actual.nodeName().startsWith("follow-cluster-0") && actual.message().contains("failure occurred while fetching cluster state for auto follow pattern [test_pattern]") && actual.stacktrace().contains("org.elasticsearch.ElasticsearchStatusException: can not fetch remote cluster state " + "as the remote cluster [leader_cluster] is not licensed for [ccr]; the license mode [BASIC]" + diff --git a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle index ba4d7ea2064a1..ae0d4247d7038 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle @@ -1,5 +1,6 @@ import org.elasticsearch.gradle.test.RestIntegTestTask +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-test' dependencies { @@ -11,57 +12,57 @@ dependencies { testCompile project(':x-pack:plugin:ccr:qa') } -task leaderClusterTest(type: RestIntegTestTask) { +task "leader-cluster"(type: RestIntegTestTask) { mustRunAfter(precommit) + runner { + systemProperty 'tests.target_cluster', 'leader' + } } -leaderClusterTestCluster { - numNodes = 1 - clusterName = 'leader-cluster' +testClusters."leader-cluster" { + distribution = "DEFAULT" setting 'xpack.license.self_generated.type', 'trial' - setting 'node.name', 'leader' -} - -leaderClusterTestRunner { - systemProperty 'tests.target_cluster', 'leader' } -task middleClusterTest(type: RestIntegTestTask) {} -middleClusterTestCluster { - dependsOn leaderClusterTestRunner - numNodes = 1 - clusterName = 'middle-cluster' +task "middle-cluster"(type: RestIntegTestTask) { + dependsOn "leader-cluster" + useCluster testClusters."leader-cluster" + runner { + systemProperty 'tests.target_cluster', 'middle' + nonInputProperties.systemProperty 'tests.leader_host', + "${-> testClusters."leader-cluster".getAllHttpSocketURI().get(0)}" + } +} +testClusters."middle-cluster" { + distribution = "DEFAULT" setting 'xpack.license.self_generated.type', 'trial' - setting 'cluster.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" - setting 'node.name', 'middle' + setting 'cluster.remote.leader_cluster.seeds', + { "\"${testClusters."leader-cluster".getAllTransportPortURI().join(",")}\"" } } -middleClusterTestRunner { - systemProperty 'tests.target_cluster', 'middle' - nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" +task 'follow-cluster'(type: RestIntegTestTask) { + dependsOn "leader-cluster", "middle-cluster" + useCluster testClusters."leader-cluster" + useCluster testClusters."middle-cluster" + runner { + systemProperty 'tests.target_cluster', 'follow' + nonInputProperties.systemProperty 'tests.leader_host', + "${-> testClusters."leader-cluster".getAllHttpSocketURI().get(0)}" + nonInputProperties.systemProperty 'tests.middle_host', + "${-> testClusters."middle-cluster".getAllHttpSocketURI().get(0)}" + } } -task followClusterTest(type: RestIntegTestTask) {} - -followClusterTestCluster { - dependsOn middleClusterTestRunner - numNodes = 1 - clusterName = 'follow-cluster' +testClusters."follow-cluster" { + distribution = "DEFAULT" setting 'xpack.monitoring.collection.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' - setting 'cluster.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" - setting 'cluster.remote.middle_cluster.seeds', "\"${-> middleClusterTest.nodes.get(0).transportUri()}\"" - setting 'node.name', 'follow' -} - -followClusterTestRunner { - systemProperty 'tests.target_cluster', 'follow' - nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" - nonInputProperties.systemProperty 'tests.middle_host', "${-> middleClusterTest.nodes.get(0).httpUri()}" - finalizedBy 'leaderClusterTestCluster#stop' - finalizedBy 'middleClusterTestCluster#stop' + setting 'cluster.remote.leader_cluster.seeds', + { "\"${testClusters."leader-cluster".getAllTransportPortURI().join(",")}\""} + setting 'cluster.remote.middle_cluster.seeds', + { "\"${testClusters."middle-cluster".getAllTransportPortURI().join(",")}\""} } -check.dependsOn followClusterTest +check.dependsOn "follow-cluster" test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test diff --git a/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle b/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle index 2ef358b6d735d..fa0c02eee3206 100644 --- a/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle +++ b/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle @@ -1,5 +1,6 @@ import org.elasticsearch.gradle.test.RestIntegTestTask +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-test' dependencies { @@ -11,34 +12,31 @@ dependencies { testCompile project(':x-pack:plugin:ccr:qa:') } -task leaderClusterTest(type: RestIntegTestTask) { +task 'leader-cluster'(type: RestIntegTestTask) { mustRunAfter(precommit) + runner { + systemProperty 'tests.target_cluster', 'leader' + } } - -leaderClusterTestCluster { - numNodes = 1 - clusterName = 'leader-cluster' +testClusters.'leader-cluster' { + distribution = "DEFAULT" } -leaderClusterTestRunner { - systemProperty 'tests.target_cluster', 'leader' +task 'follow-cluster'(type: RestIntegTestTask) { + dependsOn 'leader-cluster' + useCluster testClusters.'leader-cluster' + runner { + systemProperty 'tests.target_cluster', 'follow' + nonInputProperties.systemProperty 'tests.leader_host', + { "${testClusters.'follow-cluster'.getAllHttpSocketURI().get(0)}" } + } } - -task followClusterTest(type: RestIntegTestTask) {} - -followClusterTestCluster { - dependsOn leaderClusterTestRunner - numNodes = 1 - clusterName = 'follow-cluster' +testClusters.'follow-cluster' { + distribution = "DEFAULT" setting 'xpack.license.self_generated.type', 'trial' - setting 'cluster.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" -} - -followClusterTestRunner { - systemProperty 'tests.target_cluster', 'follow' - nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" - finalizedBy 'leaderClusterTestCluster#stop' + setting 'cluster.remote.leader_cluster.seeds', + { "\"${testClusters.'leader-cluster'.getAllTransportPortURI().join(",")}\"" } } -check.dependsOn followClusterTest +check.dependsOn "follow-cluster" test.enabled = false diff --git a/x-pack/plugin/ccr/qa/rest/build.gradle b/x-pack/plugin/ccr/qa/rest/build.gradle index ba0c05e09791a..1e4ca1abcbce0 100644 --- a/x-pack/plugin/ccr/qa/rest/build.gradle +++ b/x-pack/plugin/ccr/qa/rest/build.gradle @@ -1,5 +1,6 @@ import org.elasticsearch.gradle.test.RestIntegTestTask +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-test' dependencies { @@ -14,29 +15,18 @@ task restTest(type: RestIntegTestTask) { mustRunAfter(precommit) } -restTestCluster { - distribution 'default' +testClusters.restTest { + distribution = 'default' // Disable assertions in FollowingEngineAssertions, otherwise an AssertionError is thrown before // indexing a document directly in a follower index. In a rest test we like to test the exception // that is thrown in production when indexing a document directly in a follower index. - environment 'ES_JAVA_OPTS', '-da:org.elasticsearch.xpack.ccr.index.engine.FollowingEngineAssertions' + jvmArgs '-da:org.elasticsearch.xpack.ccr.index.engine.FollowingEngineAssertions' setting 'xpack.ml.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.security.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' // TODO: reduce the need for superuser here - setupCommand 'setup-ccr-user', - 'bin/elasticsearch-users', 'useradd', 'ccr-user', '-p', 'ccr-user-password', '-r', 'superuser' - waitCondition = { node, ant -> - File tmpFile = new File(node.cwd, 'wait.success') - ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", - dest: tmpFile.toString(), - username: 'ccr-user', - password: 'ccr-user-password', - ignoreerrors: true, - retries: 10) - return tmpFile.exists() - } + user username:'ccr-user', password: 'ccr-user-password', role: 'superuser' } check.dependsOn restTest diff --git a/x-pack/plugin/ccr/qa/restart/build.gradle b/x-pack/plugin/ccr/qa/restart/build.gradle index cace98d97b015..d29cb136e57b2 100644 --- a/x-pack/plugin/ccr/qa/restart/build.gradle +++ b/x-pack/plugin/ccr/qa/restart/build.gradle @@ -1,60 +1,55 @@ import org.elasticsearch.gradle.test.RestIntegTestTask +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-test' dependencies { testCompile project(':x-pack:plugin:ccr:qa') } -task leaderClusterTest(type: RestIntegTestTask) { +task 'leader-cluster'(type: RestIntegTestTask) { mustRunAfter(precommit) + runner { + systemProperty 'tests.target_cluster', 'leader' + } } - -leaderClusterTestCluster { - numNodes = 1 - clusterName = 'leader-cluster' +testClusters.'leader-cluster' { + distribution = "DEFAULT" setting 'xpack.license.self_generated.type', 'trial' - setting 'node.name', 'leader' } -leaderClusterTestRunner { - systemProperty 'tests.target_cluster', 'leader' +task 'follow-cluster'(type: RestIntegTestTask) { + dependsOn 'leader-cluster' + useCluster testClusters.'leader-cluster' + runner { + systemProperty 'tests.target_cluster', 'follow' + nonInputProperties.systemProperty 'tests.leader_host', + "${-> testClusters.'leader-cluster'.getAllHttpSocketURI().get(0)}" + } } - -task followClusterTest(type: RestIntegTestTask) {} - -followClusterTestCluster { - dependsOn leaderClusterTestRunner - numNodes = 1 - clusterName = 'follow-cluster' +testClusters.'follow-cluster' { + distribution = "DEFAULT" setting 'xpack.monitoring.collection.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' - setting 'cluster.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" - setting 'node.name', 'follow' -} - -followClusterTestRunner { - systemProperty 'tests.target_cluster', 'follow' - nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + setting 'cluster.remote.leader_cluster.seeds', + { "\"${testClusters.'leader-cluster'.getAllTransportPortURI().get(0)}\"" } + nameCustomization = { 'follow' } } -task followClusterRestartTest(type: RestIntegTestTask) {} - -followClusterRestartTestCluster { - dependsOn followClusterTestRunner, 'followClusterTestCluster#stop' - numNodes = 1 - clusterName = 'follow-cluster' - dataDir = { nodeNumber -> followClusterTest.nodes[0].dataDir } - setting 'xpack.monitoring.collection.enabled', 'true' - setting 'xpack.license.self_generated.type', 'trial' - setting 'cluster.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" - setting 'node.name', 'follow' -} +task followClusterRestartTest(type: Test) { + dependsOn tasks.'follow-cluster' + useCluster testClusters.'leader-cluster' + useCluster testClusters.'follow-cluster' -followClusterRestartTestRunner { + maxParallelForks = 1 + systemProperty 'tests.rest.load_packaged', 'false' systemProperty 'tests.target_cluster', 'follow-restart' - nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" - finalizedBy 'leaderClusterTestCluster#stop' + doFirst { + testClusters.'follow-cluster'.restart() + nonInputProperties.systemProperty 'tests.leader_host', "${-> testClusters.'leader-cluster'.getAllHttpSocketURI().get(0)}" + nonInputProperties.systemProperty 'tests.rest.cluster', "${-> testClusters.'follow-cluster'.getAllHttpSocketURI().join(",")}" + } + outputs.doNotCacheIf "Caching of REST tests not implemented yet", { false } } check.dependsOn followClusterRestartTest diff --git a/x-pack/plugin/ccr/qa/security/build.gradle b/x-pack/plugin/ccr/qa/security/build.gradle index 872e99051018c..a24dc8cd99fdc 100644 --- a/x-pack/plugin/ccr/qa/security/build.gradle +++ b/x-pack/plugin/ccr/qa/security/build.gradle @@ -1,5 +1,6 @@ import org.elasticsearch.gradle.test.RestIntegTestTask +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-test' dependencies { @@ -11,69 +12,44 @@ dependencies { testCompile project(':x-pack:plugin:ccr:qa') } -task leaderClusterTest(type: RestIntegTestTask) { +task 'leader-cluster'(type: RestIntegTestTask) { mustRunAfter(precommit) + runner { + systemProperty 'tests.target_cluster', 'leader' + } } -leaderClusterTestCluster { - numNodes = 1 - clusterName = 'leader-cluster' +testClusters.'leader-cluster' { + distribution = 'Default' setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' setting 'xpack.monitoring.enabled', 'false' - extraConfigFile 'roles.yml', 'leader-roles.yml' - setupCommand 'setupTestAdmin', - 'bin/elasticsearch-users', 'useradd', "test_admin", '-p', 'x-pack-test-password', '-r', "superuser" - setupCommand 'setupCcrUser', - 'bin/elasticsearch-users', 'useradd', "test_ccr", '-p', 'x-pack-test-password', '-r', "ccruser" - waitCondition = { node, ant -> - File tmpFile = new File(node.cwd, 'wait.success') - ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", - dest: tmpFile.toString(), - username: 'test_admin', - password: 'x-pack-test-password', - ignoreerrors: true, - retries: 10) - return tmpFile.exists() - } + extraConfigFile 'roles.yml', file('leader-roles.yml') + user username: "test_admin", role: "superuser" + user username: "test_ccr", role: "ccruser" } -leaderClusterTestRunner { - systemProperty 'tests.target_cluster', 'leader' +task 'follow-cluster'(type: RestIntegTestTask) { + dependsOn 'leader-cluster' + useCluster testClusters.'leader-cluster' + runner { + systemProperty 'tests.target_cluster', 'follow' + nonInputProperties.systemProperty 'tests.leader_host', "${-> testClusters.'leader-cluster'.getAllHttpSocketURI().get(0)}" + } } -task followClusterTest(type: RestIntegTestTask) {} - -followClusterTestCluster { - dependsOn leaderClusterTestRunner - numNodes = 1 - clusterName = 'follow-cluster' - setting 'cluster.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" +testClusters.'follow-cluster' { + distribution = 'Default' + setting 'cluster.remote.leader_cluster.seeds', { + "\"${testClusters.'leader-cluster'.getAllTransportPortURI().join(",")}\"" + } setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' setting 'xpack.monitoring.collection.enabled', 'true' - extraConfigFile 'roles.yml', 'follower-roles.yml' - setupCommand 'setupTestAdmin', - 'bin/elasticsearch-users', 'useradd', "test_admin", '-p', 'x-pack-test-password', '-r', "superuser" - setupCommand 'setupCcrUser', - 'bin/elasticsearch-users', 'useradd', "test_ccr", '-p', 'x-pack-test-password', '-r', "ccruser" - waitCondition = { node, ant -> - File tmpFile = new File(node.cwd, 'wait.success') - ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", - dest: tmpFile.toString(), - username: 'test_admin', - password: 'x-pack-test-password', - ignoreerrors: true, - retries: 10) - return tmpFile.exists() - } -} - -followClusterTestRunner { - systemProperty 'tests.target_cluster', 'follow' - nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" - finalizedBy 'leaderClusterTestCluster#stop' + extraConfigFile 'roles.yml', file('follower-roles.yml') + user username: "test_admin", role: "superuser" + user username: "test_ccr", role: "ccruser" } -check.dependsOn followClusterTest +check.dependsOn 'follow-cluster' test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test From 590c7d1b30196d729ddc570566b4293b5ce6c0c2 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Wed, 29 May 2019 08:35:15 -0500 Subject: [PATCH 171/224] un-mute ActivateWatchTests, bump up logging, and remove explicit sleeps (#42396) --- .../action/activate/ActivateWatchTests.java | 20 ++++++++----------- 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java index f89d0eee7c941..8bdf7d3d70d43 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java @@ -40,7 +40,8 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -@TestLogging("org.elasticsearch.xpack.watcher:DEBUG,org.elasticsearch.xpack.watcher.WatcherIndexingListener:TRACE") +@TestLogging("org.elasticsearch.xpack.watcher:DEBUG,org.elasticsearch.xpack.core.watcher:DEBUG," + + "org.elasticsearch.xpack.watcher.WatcherIndexingListener:TRACE") public class ActivateWatchTests extends AbstractWatcherIntegrationTestCase { @Override @@ -48,7 +49,6 @@ protected boolean timeWarped() { return false; } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30699") public void testDeactivateAndActivate() throws Exception { PutWatchResponse putWatchResponse = watcherClient().preparePutWatch() .setId("_id") @@ -88,13 +88,9 @@ public void testDeactivateAndActivate() throws Exception { refresh(); long count1 = docCount(".watcher-history*", matchAllQuery()); - logger.info("Sleeping for 5 seconds, watch history count [{}]", count1); - Thread.sleep(5000); - refresh(); - long count2 = docCount(".watcher-history*", matchAllQuery()); - - assertThat(count2, is(count1)); + //ensure no new watch history + awaitBusy(() -> count1 != docCount(".watcher-history*", matchAllQuery()), 5, TimeUnit.SECONDS); // lets activate it again logger.info("Activating watch again"); @@ -107,11 +103,11 @@ public void testDeactivateAndActivate() throws Exception { assertThat(getWatchResponse, notNullValue()); assertThat(getWatchResponse.getStatus().state().isActive(), is(true)); - logger.info("Sleeping for another five seconds, ensuring that watch is executed"); - Thread.sleep(5000); refresh(); - long count3 = docCount(".watcher-history*", matchAllQuery()); - assertThat(count3, greaterThan(count1)); + assertBusy(() -> { + long count2 = docCount(".watcher-history*", matchAllQuery()); + assertThat(count2, greaterThan(count1)); + }); } public void testLoadWatchWithoutAState() throws Exception { From 697c793dcbabf1df0351d75a3705047ac4435dca Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Wed, 29 May 2019 08:39:06 -0500 Subject: [PATCH 172/224] un-mute Watcher rolling upgrade tests and bump up logging (#42377) --- x-pack/qa/rolling-upgrade/build.gradle | 2 ++ .../rest-api-spec/test/mixed_cluster/60_watcher.yml | 8 -------- .../rest-api-spec/test/old_cluster/60_watcher.yml | 7 ------- .../rest-api-spec/test/upgraded_cluster/60_watcher.yml | 8 -------- 4 files changed, 2 insertions(+), 23 deletions(-) diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index d75ecbd7a55ed..8be945e701898 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -123,6 +123,8 @@ for (Version version : bwcVersions.wireCompatible) { setting 'xpack.security.authc.token.timeout', '60m' setting 'logger.org.elasticsearch.xpack.security.authc.TokenService', 'trace' setting 'xpack.security.audit.enabled', 'true' + setting 'logger.org.elasticsearch.xpack.watcher', 'debug' + setting 'logger.org.elasticsearch.xpack.core.watcher', 'debug' rootProject.globalInfo.ready { if (project.inFipsJvm) { setting 'xpack.security.transport.ssl.key', 'testnode.pem' diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_watcher.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_watcher.yml index 2a1dd4397dc56..1ce3f35049ecd 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_watcher.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_watcher.yml @@ -1,10 +1,6 @@ --- "CRUD watch APIs": - - skip: - reason: https://github.com/elastic/elasticsearch/issues/33185 - version: "6.7.0 - " - # no need to put watch, exists already - do: watcher.get_watch: @@ -74,10 +70,6 @@ --- "Test watcher stats output": - - skip: - reason: https://github.com/elastic/elasticsearch/issues/33185 - version: "6.7.0 - " - - do: watcher.stats: {} - match: { "manually_stopped": false } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_watcher.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_watcher.yml index aafb7ddf239bb..6e51e6b0dc717 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_watcher.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_watcher.yml @@ -1,8 +1,5 @@ --- "CRUD watch APIs": - - skip: - reason: https://github.com/elastic/elasticsearch/issues/33185 - version: "6.7.0 - " - do: watcher.put_watch: @@ -93,10 +90,6 @@ --- "Test watcher stats output": - - skip: - reason: https://github.com/elastic/elasticsearch/issues/33185 - version: "6.7.0 - " - - do: watcher.stats: {} - match: { "manually_stopped": false } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_watcher.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_watcher.yml index dacb437d4b449..e9574215dc44e 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_watcher.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_watcher.yml @@ -1,10 +1,6 @@ --- "CRUD watch APIs": - - skip: - reason: https://github.com/elastic/elasticsearch/issues/33185 - version: "6.7.0 - " - # no need to put watch, exists already - do: watcher.get_watch: @@ -73,10 +69,6 @@ --- "Test watcher stats output": - - skip: - reason: https://github.com/elastic/elasticsearch/issues/33185 - version: "6.7.0 - " - - do: watcher.stats: {} - match: { "manually_stopped": false } From 93f7a1ac5946aa3990799b2465a640be4377ac4e Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Wed, 29 May 2019 15:18:48 +0100 Subject: [PATCH 173/224] Fixes watcher test to remove typed api call --- .../rest-api-spec/test/mustache/50_webhook_url_escaping.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml index bb06aca4f95a4..ac1578a64bab7 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml @@ -80,7 +80,6 @@ - do: count: index: - type: log - match: {count : 2} From 0538d810feba811f31835cc07cd7ad4c5b6ed874 Mon Sep 17 00:00:00 2001 From: James Baiera Date: Wed, 29 May 2019 11:51:27 -0400 Subject: [PATCH 174/224] Muting WatcherRestIT webhook url escaping test See #41172 --- x-pack/qa/smoke-test-watcher/build.gradle | 1 - .../rest-api-spec/test/mustache/50_webhook_url_escaping.yml | 4 +++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/x-pack/qa/smoke-test-watcher/build.gradle b/x-pack/qa/smoke-test-watcher/build.gradle index 8de7448618ea1..9194c46daed01 100644 --- a/x-pack/qa/smoke-test-watcher/build.gradle +++ b/x-pack/qa/smoke-test-watcher/build.gradle @@ -12,7 +12,6 @@ integTestCluster { setting 'xpack.ml.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' setting 'logger.org.elasticsearch.xpack.watcher', 'DEBUG' - setting 'logger.org.elasticsearch.xpack.core.watcher', 'DEBUG' } integTestRunner { diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml index ac1578a64bab7..bff41eceab3f7 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml @@ -1,6 +1,8 @@ --- "Test url escaping with url mustache function": - + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/41172" - do: cluster.health: wait_for_status: yellow From e78dd4013c94b15066b95f775d3a8e3b58c8bc48 Mon Sep 17 00:00:00 2001 From: lcawl Date: Wed, 29 May 2019 11:00:47 -0700 Subject: [PATCH 175/224] [DOCS] Adds more monitoring tagged regions --- .../configuring-metricbeat.asciidoc | 25 +++++++++++-------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/docs/reference/monitoring/configuring-metricbeat.asciidoc b/docs/reference/monitoring/configuring-metricbeat.asciidoc index e337c5bf7d345..265eba5d480ab 100644 --- a/docs/reference/monitoring/configuring-metricbeat.asciidoc +++ b/docs/reference/monitoring/configuring-metricbeat.asciidoc @@ -19,11 +19,13 @@ To learn about monitoring in general, see //NOTE: The tagged regions are re-used in the Stack Overview. -. Enable the collection of monitoring data. Set -`xpack.monitoring.collection.enabled` to `true` on each node in the production -cluster. By default, it is is disabled (`false`). -+ +. Enable the collection of monitoring data. + ++ -- +// tag::enable-collection[] +Set `xpack.monitoring.collection.enabled` to `true` on each node in the +production cluster. By default, it is is disabled (`false`). + NOTE: You can specify this setting in either the `elasticsearch.yml` on each node or across the cluster as a dynamic cluster setting. If {es} {security-features} are enabled, you must have `monitor` cluster privileges to @@ -43,15 +45,17 @@ PUT _cluster/settings } ---------------------------------- // CONSOLE - +// end::enable-collection[] For more information, see <> and <>. -- -. Disable the default collection of {es} monitoring metrics. Set -`xpack.monitoring.elasticsearch.collection.enabled` to `false` on each node in -the production cluster. -+ +. Disable the default collection of {es} monitoring metrics. + ++ -- +// tag::disable-default-collection[] +Set `xpack.monitoring.elasticsearch.collection.enabled` to `false` on each node +in the production cluster. + NOTE: You can specify this setting in either the `elasticsearch.yml` on each node or across the cluster as a dynamic cluster setting. If {es} {security-features} are enabled, you must have `monitor` cluster privileges to @@ -70,7 +74,8 @@ PUT _cluster/settings ---------------------------------- // CONSOLE -Leave `xpack.monitoring.enabled` set to its default value (`true`). +Leave `xpack.monitoring.enabled` set to its default value (`true`). +// end::disable-default-collection[] -- . {metricbeat-ref}/metricbeat-installation.html[Install {metricbeat}] on each From e0041930a1004c408edaded1e94b2840bd6a9d40 Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Wed, 29 May 2019 16:09:36 -0400 Subject: [PATCH 176/224] Add warning scores are floats (#42667) --- .../query-dsl/query_filter_context.asciidoc | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/docs/reference/query-dsl/query_filter_context.asciidoc b/docs/reference/query-dsl/query_filter_context.asciidoc index 79f8c4bd960b0..6e7177ed19c18 100644 --- a/docs/reference/query-dsl/query_filter_context.asciidoc +++ b/docs/reference/query-dsl/query_filter_context.asciidoc @@ -52,12 +52,12 @@ GET /_search "query": { <1> "bool": { <2> "must": [ - { "match": { "title": "Search" }}, <2> - { "match": { "content": "Elasticsearch" }} <2> + { "match": { "title": "Search" }}, + { "match": { "content": "Elasticsearch" }} ], "filter": [ <3> - { "term": { "status": "published" }}, <4> - { "range": { "publish_date": { "gte": "2015-01-01" }}} <4> + { "term": { "status": "published" }}, + { "range": { "publish_date": { "gte": "2015-01-01" }}} ] } } @@ -68,11 +68,16 @@ GET /_search <2> The `bool` and two `match` clauses are used in query context, which means that they are used to score how well each document matches. -<3> The `filter` parameter indicates filter context. -<4> The `term` and `range` clauses are used in filter context. - They will filter out documents which do not match, but they will +<3> The `filter` parameter indicates filter context. Its `term` and + `range` clauses are used in filter context. They will filter out + documents which do not match, but they will not affect the score for matching documents. +WARNING: Scores calculated for queries in query context are represented +as single precision floating point numbers; they have only +24 bits for significand's precision. Score calculations that exceed the +significand's precision will be converted to floats with loss of precision. + TIP: Use query clauses in query context for conditions which should affect the score of matching documents (i.e. how well does the document match), and use all other query clauses in filter context. From 37835cacde91fdcaf6864741fdad4ede108595fa Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Wed, 29 May 2019 13:26:12 -0700 Subject: [PATCH 177/224] Allow aggregations using expressions to use _score (#42652) _score was removed from use in aggregations using expressions unintentionally when script contexts were added. This allows _score to once again be used. --- .../ExpressionAggregationScript.java | 27 +++++++++++++------ .../expression/ExpressionScriptEngine.java | 9 +++++-- .../expression/MoreExpressionTests.java | 13 +++++++-- .../test/lang_expression/20_search.yml | 20 ++++++++++++-- 4 files changed, 55 insertions(+), 14 deletions(-) diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionAggregationScript.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionAggregationScript.java index 199f52c40319d..ba7b1b68c41e5 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionAggregationScript.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionAggregationScript.java @@ -38,20 +38,37 @@ class ExpressionAggregationScript implements AggregationScript.LeafFactory { final Expression exprScript; final SimpleBindings bindings; final DoubleValuesSource source; + final boolean needsScore; final ReplaceableConstDoubleValueSource specialValue; // _value - ExpressionAggregationScript(Expression e, SimpleBindings b, ReplaceableConstDoubleValueSource v) { + ExpressionAggregationScript(Expression e, SimpleBindings b, boolean n, ReplaceableConstDoubleValueSource v) { exprScript = e; bindings = b; source = exprScript.getDoubleValuesSource(bindings); + needsScore = n; specialValue = v; } + @Override + public boolean needs_score() { + return needsScore; + } + @Override public AggregationScript newInstance(final LeafReaderContext leaf) throws IOException { return new AggregationScript() { // Fake the scorer until setScorer is called. - DoubleValues values = source.getValues(leaf, null); + DoubleValues values = source.getValues(leaf, new DoubleValues() { + @Override + public double doubleValue() throws IOException { + return get_score().doubleValue(); + } + + @Override + public boolean advanceExact(int doc) throws IOException { + return true; + } + }); @Override public Object execute() { @@ -84,10 +101,4 @@ public void setNextAggregationValue(Object value) { } }; } - - @Override - public boolean needs_score() { - return false; - } - } diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java index 1ad0fff49b78a..abd8738b0e4c1 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java @@ -221,10 +221,14 @@ private AggregationScript.LeafFactory newAggregationScript(Expression expr, Sear // NOTE: if we need to do anything complicated with bindings in the future, we can just extend Bindings, // instead of complicating SimpleBindings (which should stay simple) SimpleBindings bindings = new SimpleBindings(); + boolean needsScores = false; ReplaceableConstDoubleValueSource specialValue = null; for (String variable : expr.variables) { try { - if (variable.equals("_value")) { + if (variable.equals("_score")) { + bindings.add(new SortField("_score", SortField.Type.SCORE)); + needsScores = true; + } else if (variable.equals("_value")) { specialValue = new ReplaceableConstDoubleValueSource(); bindings.add("_value", specialValue); // noop: _value is special for aggregations, and is handled in ExpressionScriptBindings @@ -237,6 +241,7 @@ private AggregationScript.LeafFactory newAggregationScript(Expression expr, Sear // delegate valuesource creation based on field's type // there are three types of "fields" to expressions, and each one has a different "api" of variables and methods. final ValueSource valueSource = getDocValueSource(variable, lookup); + needsScores |= valueSource.getSortField(false).needsScores(); bindings.add(variable, valueSource.asDoubleValuesSource()); } } catch (Exception e) { @@ -244,7 +249,7 @@ private AggregationScript.LeafFactory newAggregationScript(Expression expr, Sear throw convertToScriptException("link error", expr.sourceText, variable, e); } } - return new ExpressionAggregationScript(expr, bindings, specialValue); + return new ExpressionAggregationScript(expr, bindings, needsScores, specialValue); } private FieldScript.LeafFactory newFieldScript(Expression expr, SearchLookup lookup, @Nullable Map vars) { diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java index 4f8fa5f463edd..6e1c0efb7ded9 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java @@ -28,8 +28,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders; +import org.elasticsearch.index.query.functionscore.ScriptScoreFunctionBuilder; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -120,7 +120,7 @@ public void testScore() throws Exception { client().prepareIndex("test", "doc", "1").setSource("text", "hello goodbye"), client().prepareIndex("test", "doc", "2").setSource("text", "hello hello hello goodbye"), client().prepareIndex("test", "doc", "3").setSource("text", "hello hello goodebye")); - ScoreFunctionBuilder score = ScoreFunctionBuilders.scriptFunction( + ScriptScoreFunctionBuilder score = ScoreFunctionBuilders.scriptFunction( new Script(ScriptType.INLINE, "expression", "1 / _score", Collections.emptyMap())); SearchRequestBuilder req = client().prepareSearch().setIndices("test"); req.setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("text", "hello"), score).boostMode(CombineFunction.REPLACE)); @@ -132,6 +132,15 @@ public void testScore() throws Exception { assertEquals("1", hits.getAt(0).getId()); assertEquals("3", hits.getAt(1).getId()); assertEquals("2", hits.getAt(2).getId()); + + req = client().prepareSearch().setIndices("test"); + req.setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("text", "hello"), score).boostMode(CombineFunction.REPLACE)); + score = ScoreFunctionBuilders.scriptFunction( + new Script(ScriptType.INLINE, "expression", "1 / _score", Collections.emptyMap())); + req.addAggregation(AggregationBuilders.max("max_score").script((score).getScript())); + req.setSearchType(SearchType.DFS_QUERY_THEN_FETCH); // make sure DF is consistent + rsp = req.get(); + assertSearchResponse(rsp); } public void testDateMethods() throws Exception { diff --git a/modules/lang-expression/src/test/resources/rest-api-spec/test/lang_expression/20_search.yml b/modules/lang-expression/src/test/resources/rest-api-spec/test/lang_expression/20_search.yml index fd8a2ebf1183b..33bad01a1232a 100644 --- a/modules/lang-expression/src/test/resources/rest-api-spec/test/lang_expression/20_search.yml +++ b/modules/lang-expression/src/test/resources/rest-api-spec/test/lang_expression/20_search.yml @@ -25,9 +25,25 @@ setup: rest_total_hits_as_int: true body: script_fields: - my_field : + my_field: script: lang: expression source: 'doc["age"].value + 19' - - match: { hits.hits.0.fields.my_field.0: 42.0 } + - match: { hits.hits.0.fields.my_field.0: 42.0 } + +--- +"Expressions aggregation score test": + + - do: + search: + rest_total_hits_as_int: true + body: + aggs: + max_score: + max: + script: + lang: expression + source: '_score' + + - match: { aggregations.max_score.value: 1.0 } From 35b6239a8d44f0bb5b15437c593a38ead8373ed4 Mon Sep 17 00:00:00 2001 From: Ojas gulati Date: Thu, 30 May 2019 02:29:41 +0530 Subject: [PATCH 178/224] Refactor HLRC RequestConverters parameters to be more explicit (#42128) The existing `RequestConverters.Params` is confusing, because it wraps an underlying request object and mutations of the `Params` object actually mutate the `Request` that was used in the construction of the `Params`. This leads to a situation where we create a `RequestConverter.Params` object, mutate it, and then it appears nothing happens to it - it appears to be unused. What happens behind the scenes is that the Request object is mutated when methods on `Params` are invoked. This results in unclear, confusing code where mutating one object changes another with no obvious connection. This commit refactors `RequestConverters.Params` to be a simple helper class to produce a `Map` which must be passed explicitly to a Request object. This makes it apparent that the `Params` are actually used, and that they have an effect on the `request` object explicit and easier to understand. Co-authored-by: Ojas Gulati --- .../client/CcrRequestConverters.java | 3 +- .../client/ClusterRequestConverters.java | 12 +- .../client/DataFrameRequestConverters.java | 6 +- .../IndexLifecycleRequestConverters.java | 27 ++-- .../client/IndicesRequestConverters.java | 123 ++++++++++-------- .../client/IngestRequestConverters.java | 14 +- .../client/LicenseRequestConverters.java | 15 ++- .../client/MLRequestConverters.java | 37 +++--- .../client/RequestConverters.java | 84 +++++++----- .../client/RollupRequestConverters.java | 3 +- .../client/SecurityRequestConverters.java | 38 ++++-- .../client/SnapshotRequestConverters.java | 29 +++-- .../client/TasksRequestConverters.java | 9 +- .../client/WatcherRequestConverters.java | 10 +- .../client/XPackRequestConverters.java | 3 +- .../org/elasticsearch/client/Request.java | 4 + 16 files changed, 259 insertions(+), 158 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java index a3f5d7e79fda7..8272e5d73bb4f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java @@ -48,9 +48,10 @@ static Request putFollow(PutFollowRequest putFollowRequest) throws IOException { .addPathPartAsIs("_ccr", "follow") .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withWaitForActiveShards(putFollowRequest.waitForActiveShards()); request.setEntity(createEntity(putFollowRequest, REQUEST_BODY_CONTENT_TYPE)); + request.addParameters(parameters.asMap()); return request; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterRequestConverters.java index 4da8d128b98d1..a246402b505cc 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterRequestConverters.java @@ -36,22 +36,21 @@ private ClusterRequestConverters() {} static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest) throws IOException { Request request = new Request(HttpPut.METHOD_NAME, "/_cluster/settings"); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(clusterUpdateSettingsRequest.timeout()); parameters.withMasterTimeout(clusterUpdateSettingsRequest.masterNodeTimeout()); - + request.addParameters(parameters.asMap()); request.setEntity(RequestConverters.createEntity(clusterUpdateSettingsRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; } static Request clusterGetSettings(ClusterGetSettingsRequest clusterGetSettingsRequest) throws IOException { Request request = new Request(HttpGet.METHOD_NAME, "/_cluster/settings"); - - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withLocal(clusterGetSettingsRequest.local()); parameters.withIncludeDefaults(clusterGetSettingsRequest.includeDefaults()); parameters.withMasterTimeout(clusterGetSettingsRequest.masterNodeTimeout()); - + request.addParameters(parameters.asMap()); return request; } @@ -63,7 +62,7 @@ static Request clusterHealth(ClusterHealthRequest healthRequest) { .build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - new RequestConverters.Params(request) + RequestConverters.Params params = new RequestConverters.Params() .withWaitForStatus(healthRequest.waitForStatus()) .withWaitForNoRelocatingShards(healthRequest.waitForNoRelocatingShards()) .withWaitForNoInitializingShards(healthRequest.waitForNoInitializingShards()) @@ -74,6 +73,7 @@ static Request clusterHealth(ClusterHealthRequest healthRequest) { .withMasterTimeout(healthRequest.masterNodeTimeout()) .withLocal(healthRequest.local()) .withLevel(healthRequest.level()); + request.addParameters(params.asMap()); return request; } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java index 0e887994da7bf..00d2651a1aeb8 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java @@ -82,10 +82,11 @@ static Request startDataFrameTransform(StartDataFrameTransformRequest startReque .addPathPartAsIs("_start") .build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); if (startRequest.getTimeout() != null) { params.withTimeout(startRequest.getTimeout()); } + request.addParameters(params.asMap()); return request; } @@ -96,13 +97,14 @@ static Request stopDataFrameTransform(StopDataFrameTransformRequest stopRequest) .addPathPartAsIs("_stop") .build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); if (stopRequest.getWaitForCompletion() != null) { params.withWaitForCompletion(stopRequest.getWaitForCompletion()); } if (stopRequest.getTimeout() != null) { params.withTimeout(stopRequest.getTimeout()); } + request.addParameters(params.asMap()); return request; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java index ff6d8a90cdb2d..f39f2b36cebc0 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java @@ -44,9 +44,10 @@ static Request getLifecyclePolicy(GetLifecyclePolicyRequest getLifecyclePolicyRe String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_ilm/policy") .addCommaSeparatedPathParts(getLifecyclePolicyRequest.getPolicyNames()).build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withMasterTimeout(getLifecyclePolicyRequest.masterNodeTimeout()); params.withTimeout(getLifecyclePolicyRequest.timeout()); + request.addParameters(params.asMap()); return request; } @@ -56,9 +57,10 @@ static Request putLifecyclePolicy(PutLifecyclePolicyRequest putLifecycleRequest) .addPathPartAsIs(putLifecycleRequest.getName()) .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withMasterTimeout(putLifecycleRequest.masterNodeTimeout()); params.withTimeout(putLifecycleRequest.timeout()); + request.addParameters(params.asMap()); request.setEntity(RequestConverters.createEntity(putLifecycleRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -69,9 +71,10 @@ static Request deleteLifecyclePolicy(DeleteLifecyclePolicyRequest deleteLifecycl .addPathPartAsIs("_ilm/policy") .addPathPartAsIs(deleteLifecyclePolicyRequest.getLifecyclePolicy()) .build()); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withMasterTimeout(deleteLifecyclePolicyRequest.masterNodeTimeout()); params.withTimeout(deleteLifecyclePolicyRequest.timeout()); + request.addParameters(params.asMap()); return request; } @@ -83,9 +86,10 @@ static Request removeIndexLifecyclePolicy(RemoveIndexLifecyclePolicyRequest remo .addCommaSeparatedPathParts(indices) .addPathPartAsIs("_ilm", "remove") .build()); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withIndicesOptions(removePolicyRequest.indicesOptions()); params.withMasterTimeout(removePolicyRequest.masterNodeTimeout()); + request.addParameters(params.asMap()); return request; } @@ -95,9 +99,10 @@ static Request startILM(StartILMRequest startILMRequest) { .addPathPartAsIs("_ilm") .addPathPartAsIs("start") .build()); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withMasterTimeout(startILMRequest.masterNodeTimeout()); params.withTimeout(startILMRequest.timeout()); + request.addParameters(params.asMap()); return request; } @@ -107,9 +112,10 @@ static Request stopILM(StopILMRequest stopILMRequest) { .addPathPartAsIs("_ilm") .addPathPartAsIs("stop") .build()); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withMasterTimeout(stopILMRequest.masterNodeTimeout()); params.withTimeout(stopILMRequest.timeout()); + request.addParameters(params.asMap()); return request; } @@ -119,9 +125,10 @@ static Request lifecycleManagementStatus(LifecycleManagementStatusRequest lifecy .addPathPartAsIs("_ilm") .addPathPartAsIs("status") .build()); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withMasterTimeout(lifecycleManagementStatusRequest.masterNodeTimeout()); params.withTimeout(lifecycleManagementStatusRequest.timeout()); + request.addParameters(params.asMap()); return request; } @@ -132,9 +139,10 @@ static Request explainLifecycle(ExplainLifecycleRequest explainLifecycleRequest) .addPathPartAsIs("_ilm") .addPathPartAsIs("explain") .build()); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withIndicesOptions(explainLifecycleRequest.indicesOptions()); params.withMasterTimeout(explainLifecycleRequest.masterNodeTimeout()); + request.addParameters(params.asMap()); return request; } @@ -145,9 +153,10 @@ static Request retryLifecycle(RetryLifecyclePolicyRequest retryLifecyclePolicyRe .addPathPartAsIs("_ilm") .addPathPartAsIs("retry") .build()); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withMasterTimeout(retryLifecyclePolicyRequest.masterNodeTimeout()); params.withTimeout(retryLifecyclePolicyRequest.timeout()); + request.addParameters(params.asMap()); return request; } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java index cc5adffd33483..5cbab84313886 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java @@ -67,10 +67,11 @@ static Request deleteIndex(DeleteIndexRequest deleteIndexRequest) { String endpoint = RequestConverters.endpoint(deleteIndexRequest.indices()); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(deleteIndexRequest.timeout()); parameters.withMasterTimeout(deleteIndexRequest.masterNodeTimeout()); parameters.withIndicesOptions(deleteIndexRequest.indicesOptions()); + request.addParameters(parameters.asMap()); return request; } @@ -78,11 +79,12 @@ static Request openIndex(OpenIndexRequest openIndexRequest) { String endpoint = RequestConverters.endpoint(openIndexRequest.indices(), "_open"); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(openIndexRequest.timeout()); parameters.withMasterTimeout(openIndexRequest.masterNodeTimeout()); parameters.withWaitForActiveShards(openIndexRequest.waitForActiveShards()); parameters.withIndicesOptions(openIndexRequest.indicesOptions()); + request.addParameters(parameters.asMap()); return request; } @@ -90,10 +92,11 @@ static Request closeIndex(CloseIndexRequest closeIndexRequest) { String endpoint = RequestConverters.endpoint(closeIndexRequest.indices(), "_close"); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(closeIndexRequest.timeout()); parameters.withMasterTimeout(closeIndexRequest.masterNodeTimeout()); parameters.withIndicesOptions(closeIndexRequest.indicesOptions()); + request.addParameters(parameters.asMap()); return request; } @@ -102,11 +105,11 @@ static Request createIndex(CreateIndexRequest createIndexRequest) throws IOExcep .addPathPart(createIndexRequest.index()).build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(createIndexRequest.timeout()); parameters.withMasterTimeout(createIndexRequest.masterNodeTimeout()); parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards()); - + request.addParameters(parameters.asMap()); request.setEntity(RequestConverters.createEntity(createIndexRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -116,12 +119,12 @@ static Request createIndex(org.elasticsearch.action.admin.indices.create.CreateI String endpoint = RequestConverters.endpoint(createIndexRequest.indices()); Request request = new Request(HttpPut.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(createIndexRequest.timeout()); parameters.withMasterTimeout(createIndexRequest.masterNodeTimeout()); parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards()); parameters.putParam(INCLUDE_TYPE_NAME_PARAMETER, "true"); - + request.addParameters(parameters.asMap()); request.setEntity(RequestConverters.createEntity(createIndexRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -129,10 +132,10 @@ static Request createIndex(org.elasticsearch.action.admin.indices.create.CreateI static Request updateAliases(IndicesAliasesRequest indicesAliasesRequest) throws IOException { Request request = new Request(HttpPost.METHOD_NAME, "/_aliases"); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(indicesAliasesRequest.timeout()); parameters.withMasterTimeout(indicesAliasesRequest.masterNodeTimeout()); - + request.addParameters(parameters.asMap()); request.setEntity(RequestConverters.createEntity(indicesAliasesRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -141,9 +144,10 @@ static Request updateAliases(IndicesAliasesRequest indicesAliasesRequest) throws static Request putMapping(PutMappingRequest putMappingRequest) throws IOException { Request request = new Request(HttpPut.METHOD_NAME, RequestConverters.endpoint(putMappingRequest.indices(), "_mapping")); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(putMappingRequest.timeout()); parameters.withMasterTimeout(putMappingRequest.masterNodeTimeout()); + request.addParameters(parameters.asMap()); request.setEntity(RequestConverters.createEntity(putMappingRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -162,11 +166,11 @@ static Request putMapping(org.elasticsearch.action.admin.indices.mapping.put.Put Request request = new Request(HttpPut.METHOD_NAME, RequestConverters.endpoint(putMappingRequest.indices(), "_mapping", putMappingRequest.type())); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(putMappingRequest.timeout()); parameters.withMasterTimeout(putMappingRequest.masterNodeTimeout()); parameters.putParam(INCLUDE_TYPE_NAME_PARAMETER, "true"); - + request.addParameters(parameters.asMap()); request.setEntity(RequestConverters.createEntity(putMappingRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -176,11 +180,11 @@ static Request getMappings(GetMappingsRequest getMappingsRequest) { Request request = new Request(HttpGet.METHOD_NAME, RequestConverters.endpoint(indices, "_mapping")); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withMasterTimeout(getMappingsRequest.masterNodeTimeout()); parameters.withIndicesOptions(getMappingsRequest.indicesOptions()); parameters.withLocal(getMappingsRequest.local()); - + request.addParameters(parameters.asMap()); return request; } @@ -191,12 +195,12 @@ static Request getMappings(org.elasticsearch.action.admin.indices.mapping.get.Ge Request request = new Request(HttpGet.METHOD_NAME, RequestConverters.endpoint(indices, "_mapping", types)); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withMasterTimeout(getMappingsRequest.masterNodeTimeout()); parameters.withIndicesOptions(getMappingsRequest.indicesOptions()); parameters.withLocal(getMappingsRequest.local()); parameters.putParam(INCLUDE_TYPE_NAME_PARAMETER, "true"); - + request.addParameters(parameters.asMap()); return request; } @@ -213,11 +217,11 @@ static Request getFieldMapping(GetFieldMappingsRequest getFieldMappingsRequest) Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withIndicesOptions(getFieldMappingsRequest.indicesOptions()); parameters.withIncludeDefaults(getFieldMappingsRequest.includeDefaults()); parameters.withLocal(getFieldMappingsRequest.local()); - + request.addParameters(parameters.asMap()); return request; } @@ -234,12 +238,12 @@ static Request getFieldMapping(org.elasticsearch.action.admin.indices.mapping.ge Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withIndicesOptions(getFieldMappingsRequest.indicesOptions()); parameters.withIncludeDefaults(getFieldMappingsRequest.includeDefaults()); parameters.withLocal(getFieldMappingsRequest.local()); parameters.putParam(INCLUDE_TYPE_NAME_PARAMETER, "true"); - + request.addParameters(parameters.asMap()); return request; } @@ -247,8 +251,9 @@ static Request refresh(RefreshRequest refreshRequest) { String[] indices = refreshRequest.indices() == null ? Strings.EMPTY_ARRAY : refreshRequest.indices(); Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_refresh")); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withIndicesOptions(refreshRequest.indicesOptions()); + request.addParameters(parameters.asMap()); return request; } @@ -256,18 +261,20 @@ static Request flush(FlushRequest flushRequest) { String[] indices = flushRequest.indices() == null ? Strings.EMPTY_ARRAY : flushRequest.indices(); Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_flush")); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withIndicesOptions(flushRequest.indicesOptions()); parameters.putParam("wait_if_ongoing", Boolean.toString(flushRequest.waitIfOngoing())); parameters.putParam("force", Boolean.toString(flushRequest.force())); + request.addParameters(parameters.asMap()); return request; } static Request flushSynced(SyncedFlushRequest syncedFlushRequest) { String[] indices = syncedFlushRequest.indices() == null ? Strings.EMPTY_ARRAY : syncedFlushRequest.indices(); Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_flush/synced")); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withIndicesOptions(syncedFlushRequest.indicesOptions()); + request.addParameters(parameters.asMap()); return request; } @@ -275,11 +282,12 @@ static Request forceMerge(ForceMergeRequest forceMergeRequest) { String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices(); Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_forcemerge")); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withIndicesOptions(forceMergeRequest.indicesOptions()); parameters.putParam("max_num_segments", Integer.toString(forceMergeRequest.maxNumSegments())); parameters.putParam("only_expunge_deletes", Boolean.toString(forceMergeRequest.onlyExpungeDeletes())); parameters.putParam("flush", Boolean.toString(forceMergeRequest.flush())); + request.addParameters(parameters.asMap()); return request; } @@ -287,12 +295,13 @@ static Request clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest) { String[] indices = clearIndicesCacheRequest.indices() == null ? Strings.EMPTY_ARRAY :clearIndicesCacheRequest.indices(); Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_cache/clear")); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withIndicesOptions(clearIndicesCacheRequest.indicesOptions()); parameters.putParam("query", Boolean.toString(clearIndicesCacheRequest.queryCache())); parameters.putParam("fielddata", Boolean.toString(clearIndicesCacheRequest.fieldDataCache())); parameters.putParam("request", Boolean.toString(clearIndicesCacheRequest.requestCache())); parameters.putParam("fields", String.join(",", clearIndicesCacheRequest.fields())); + request.addParameters(parameters.asMap()); return request; } @@ -306,9 +315,10 @@ static Request existsAlias(GetAliasesRequest getAliasesRequest) { Request request = new Request(HttpHead.METHOD_NAME, RequestConverters.endpoint(indices, "_alias", aliases)); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withIndicesOptions(getAliasesRequest.indicesOptions()); params.withLocal(getAliasesRequest.local()); + request.addParameters(params.asMap()); return request; } @@ -332,11 +342,11 @@ private static Request resize(ResizeRequest resizeRequest) throws IOException { .addPathPart(resizeRequest.getTargetIndexRequest().index()).build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withTimeout(resizeRequest.timeout()); params.withMasterTimeout(resizeRequest.masterNodeTimeout()); params.withWaitForActiveShards(resizeRequest.getTargetIndexRequest().waitForActiveShards()); - + request.addParameters(params.asMap()); request.setEntity(RequestConverters.createEntity(resizeRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -346,14 +356,14 @@ static Request rollover(RolloverRequest rolloverRequest) throws IOException { .addPathPart(rolloverRequest.getNewIndexName()).build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withTimeout(rolloverRequest.timeout()); params.withMasterTimeout(rolloverRequest.masterNodeTimeout()); params.withWaitForActiveShards(rolloverRequest.getCreateIndexRequest().waitForActiveShards()); if (rolloverRequest.isDryRun()) { params.putParam("dry_run", Boolean.TRUE.toString()); } - + request.addParameters(params.asMap()); request.setEntity(RequestConverters.createEntity(rolloverRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -364,7 +374,7 @@ static Request rollover(org.elasticsearch.action.admin.indices.rollover.Rollover .addPathPart(rolloverRequest.getNewIndexName()).build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withTimeout(rolloverRequest.timeout()); params.withMasterTimeout(rolloverRequest.masterNodeTimeout()); params.withWaitForActiveShards(rolloverRequest.getCreateIndexRequest().waitForActiveShards()); @@ -373,7 +383,7 @@ static Request rollover(org.elasticsearch.action.admin.indices.rollover.Rollover } params.putParam(INCLUDE_TYPE_NAME_PARAMETER, "true"); request.setEntity(RequestConverters.createEntity(rolloverRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); - + request.addParameters(params.asMap()); return request; } @@ -384,12 +394,12 @@ static Request getSettings(GetSettingsRequest getSettingsRequest) { String endpoint = RequestConverters.endpoint(indices, "_settings", names); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withIndicesOptions(getSettingsRequest.indicesOptions()); params.withLocal(getSettingsRequest.local()); params.withIncludeDefaults(getSettingsRequest.includeDefaults()); params.withMasterTimeout(getSettingsRequest.masterNodeTimeout()); - + request.addParameters(params.asMap()); return request; } @@ -404,14 +414,14 @@ static Request getIndex(org.elasticsearch.action.admin.indices.get.GetIndexReque String endpoint = RequestConverters.endpoint(indices); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withIndicesOptions(getIndexRequest.indicesOptions()); params.withLocal(getIndexRequest.local()); params.withIncludeDefaults(getIndexRequest.includeDefaults()); params.withHuman(getIndexRequest.humanReadable()); params.withMasterTimeout(getIndexRequest.masterNodeTimeout()); params.putParam(INCLUDE_TYPE_NAME_PARAMETER, "true"); - + request.addParameters(params.asMap()); return request; } @@ -421,13 +431,13 @@ static Request getIndex(GetIndexRequest getIndexRequest) { String endpoint = RequestConverters.endpoint(indices); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withIndicesOptions(getIndexRequest.indicesOptions()); params.withLocal(getIndexRequest.local()); params.withIncludeDefaults(getIndexRequest.includeDefaults()); params.withHuman(getIndexRequest.humanReadable()); params.withMasterTimeout(getIndexRequest.masterNodeTimeout()); - + request.addParameters(params.asMap()); return request; } @@ -444,12 +454,13 @@ static Request indicesExist(org.elasticsearch.action.admin.indices.get.GetIndexR String endpoint = RequestConverters.endpoint(getIndexRequest.indices(), ""); Request request = new Request(HttpHead.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withLocal(getIndexRequest.local()); params.withHuman(getIndexRequest.humanReadable()); params.withIndicesOptions(getIndexRequest.indicesOptions()); params.withIncludeDefaults(getIndexRequest.includeDefaults()); params.putParam(INCLUDE_TYPE_NAME_PARAMETER, "true"); + request.addParameters(params.asMap()); return request; } @@ -461,11 +472,12 @@ static Request indicesExist(GetIndexRequest getIndexRequest) { String endpoint = RequestConverters.endpoint(getIndexRequest.indices(), ""); Request request = new Request(HttpHead.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withLocal(getIndexRequest.local()); params.withHuman(getIndexRequest.humanReadable()); params.withIndicesOptions(getIndexRequest.indicesOptions()); params.withIncludeDefaults(getIndexRequest.includeDefaults()); + request.addParameters(params.asMap()); return request; } @@ -473,12 +485,12 @@ static Request indexPutSettings(UpdateSettingsRequest updateSettingsRequest) thr String[] indices = updateSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : updateSettingsRequest.indices(); Request request = new Request(HttpPut.METHOD_NAME, RequestConverters.endpoint(indices, "_settings")); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(updateSettingsRequest.timeout()); parameters.withMasterTimeout(updateSettingsRequest.masterNodeTimeout()); parameters.withIndicesOptions(updateSettingsRequest.indicesOptions()); parameters.withPreserveExisting(updateSettingsRequest.isPreserveExisting()); - + request.addParameters(parameters.asMap()); request.setEntity(RequestConverters.createEntity(updateSettingsRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -493,7 +505,7 @@ static Request putTemplate(org.elasticsearch.action.admin.indices.template.put.P String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_template") .addPathPart(putIndexTemplateRequest.name()).build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withMasterTimeout(putIndexTemplateRequest.masterNodeTimeout()); if (putIndexTemplateRequest.create()) { params.putParam("create", Boolean.TRUE.toString()); @@ -502,6 +514,7 @@ static Request putTemplate(org.elasticsearch.action.admin.indices.template.put.P params.putParam("cause", putIndexTemplateRequest.cause()); } params.putParam(INCLUDE_TYPE_NAME_PARAMETER, "true"); + request.addParameters(params.asMap()); request.setEntity(RequestConverters.createEntity(putIndexTemplateRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -510,7 +523,7 @@ static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) thro String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_template") .addPathPart(putIndexTemplateRequest.name()).build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withMasterTimeout(putIndexTemplateRequest.masterNodeTimeout()); if (putIndexTemplateRequest.create()) { params.putParam("create", Boolean.TRUE.toString()); @@ -518,6 +531,7 @@ static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) thro if (Strings.hasText(putIndexTemplateRequest.cause())) { params.putParam("cause", putIndexTemplateRequest.cause()); } + request.addParameters(params.asMap()); request.setEntity(RequestConverters.createEntity(putIndexTemplateRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -527,11 +541,12 @@ static Request validateQuery(ValidateQueryRequest validateQueryRequest) throws I String[] types = validateQueryRequest.types() == null || indices.length <= 0 ? Strings.EMPTY_ARRAY : validateQueryRequest.types(); String endpoint = RequestConverters.endpoint(indices, types, "_validate/query"); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withIndicesOptions(validateQueryRequest.indicesOptions()); params.putParam("explain", Boolean.toString(validateQueryRequest.explain())); params.putParam("all_shards", Boolean.toString(validateQueryRequest.allShards())); params.putParam("rewrite", Boolean.toString(validateQueryRequest.rewrite())); + request.addParameters(params.asMap()); request.setEntity(RequestConverters.createEntity(validateQueryRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -541,9 +556,10 @@ static Request getAlias(GetAliasesRequest getAliasesRequest) { String[] aliases = getAliasesRequest.aliases() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.aliases(); String endpoint = RequestConverters.endpoint(indices, "_alias", aliases); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withIndicesOptions(getAliasesRequest.indicesOptions()); params.withLocal(getAliasesRequest.local()); + request.addParameters(params.asMap()); return request; } @@ -562,12 +578,13 @@ private static Request getTemplates(GetIndexTemplatesRequest getIndexTemplatesRe .addCommaSeparatedPathParts(getIndexTemplatesRequest.names()) .build(); final Request request = new Request(HttpGet.METHOD_NAME, endpoint); - final RequestConverters.Params params = new RequestConverters.Params(request); + final RequestConverters.Params params = new RequestConverters.Params(); params.withLocal(getIndexTemplatesRequest.isLocal()); params.withMasterTimeout(getIndexTemplatesRequest.getMasterNodeTimeout()); if (includeTypeName) { params.putParam(INCLUDE_TYPE_NAME_PARAMETER, "true"); } + request.addParameters(params.asMap()); return request; } @@ -577,9 +594,10 @@ static Request templatesExist(IndexTemplatesExistRequest indexTemplatesExistRequ .addCommaSeparatedPathParts(indexTemplatesExistRequest.names()) .build(); final Request request = new Request(HttpHead.METHOD_NAME, endpoint); - final RequestConverters.Params params = new RequestConverters.Params(request); + final RequestConverters.Params params = new RequestConverters.Params(); params.withLocal(indexTemplatesExistRequest.isLocal()); params.withMasterTimeout(indexTemplatesExistRequest.getMasterNodeTimeout()); + request.addParameters(params.asMap()); return request; } @@ -598,22 +616,24 @@ static Request analyze(AnalyzeRequest request) throws IOException { static Request freezeIndex(FreezeIndexRequest freezeIndexRequest) { String endpoint = RequestConverters.endpoint(freezeIndexRequest.getIndices(), "_freeze"); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(freezeIndexRequest.timeout()); parameters.withMasterTimeout(freezeIndexRequest.masterNodeTimeout()); parameters.withIndicesOptions(freezeIndexRequest.indicesOptions()); parameters.withWaitForActiveShards(freezeIndexRequest.getWaitForActiveShards()); + request.addParameters(parameters.asMap()); return request; } static Request unfreezeIndex(UnfreezeIndexRequest unfreezeIndexRequest) { String endpoint = RequestConverters.endpoint(unfreezeIndexRequest.getIndices(), "_unfreeze"); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(unfreezeIndexRequest.timeout()); parameters.withMasterTimeout(unfreezeIndexRequest.masterNodeTimeout()); parameters.withIndicesOptions(unfreezeIndexRequest.indicesOptions()); parameters.withWaitForActiveShards(unfreezeIndexRequest.getWaitForActiveShards()); + request.addParameters(parameters.asMap()); return request; } @@ -621,8 +641,9 @@ static Request deleteTemplate(DeleteIndexTemplateRequest deleteIndexTemplateRequ String name = deleteIndexTemplateRequest.name(); String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_template").addPathPart(name).build(); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withMasterTimeout(deleteIndexTemplateRequest.masterNodeTimeout()); + request.addParameters(params.asMap()); return request; } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestRequestConverters.java index 06b4c0fd62ac0..c2ffc4ee5da4d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestRequestConverters.java @@ -41,8 +41,9 @@ static Request getPipeline(GetPipelineRequest getPipelineRequest) { .build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withMasterTimeout(getPipelineRequest.masterNodeTimeout()); + request.addParameters(parameters.asMap()); return request; } @@ -53,10 +54,10 @@ static Request putPipeline(PutPipelineRequest putPipelineRequest) throws IOExcep .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(putPipelineRequest.timeout()); parameters.withMasterTimeout(putPipelineRequest.masterNodeTimeout()); - + request.addParameters(parameters.asMap()); request.setEntity(RequestConverters.createEntity(putPipelineRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -68,10 +69,10 @@ static Request deletePipeline(DeletePipelineRequest deletePipelineRequest) { .build(); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(deletePipelineRequest.timeout()); parameters.withMasterTimeout(deletePipelineRequest.masterNodeTimeout()); - + request.addParameters(parameters.asMap()); return request; } @@ -83,8 +84,9 @@ static Request simulatePipeline(SimulatePipelineRequest simulatePipelineRequest) builder.addPathPartAsIs("_simulate"); String endpoint = builder.build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.putParam("verbose", Boolean.toString(simulatePipelineRequest.isVerbose())); + request.addParameters(params.asMap()); request.setEntity(RequestConverters.createEntity(simulatePipelineRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseRequestConverters.java index 73ecce0f0467c..ec5ae8e3a6073 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseRequestConverters.java @@ -36,12 +36,13 @@ private LicenseRequestConverters() {} static Request putLicense(PutLicenseRequest putLicenseRequest) { String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_license").build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(putLicenseRequest.timeout()); parameters.withMasterTimeout(putLicenseRequest.masterNodeTimeout()); if (putLicenseRequest.isAcknowledge()) { parameters.putParam("acknowledge", "true"); } + request.addParameters(parameters.asMap()); request.setJsonEntity(putLicenseRequest.getLicenseDefinition()); return request; } @@ -49,17 +50,19 @@ static Request putLicense(PutLicenseRequest putLicenseRequest) { static Request getLicense(GetLicenseRequest getLicenseRequest) { String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_license").build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withLocal(getLicenseRequest.isLocal()); + request.addParameters(parameters.asMap()); return request; } static Request deleteLicense(DeleteLicenseRequest deleteLicenseRequest) { String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_license").build(); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(deleteLicenseRequest.timeout()); parameters.withMasterTimeout(deleteLicenseRequest.masterNodeTimeout()); + request.addParameters(parameters.asMap()); return request; } @@ -67,11 +70,12 @@ static Request startTrial(StartTrialRequest startTrialRequest) { final String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_license", "start_trial").build(); final Request request = new Request(HttpPost.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.putParam("acknowledge", Boolean.toString(startTrialRequest.isAcknowledge())); if (startTrialRequest.getLicenseType() != null) { parameters.putParam("type", startTrialRequest.getLicenseType()); } + request.addParameters(parameters.asMap()); return request; } @@ -80,12 +84,13 @@ static Request startBasic(StartBasicRequest startBasicRequest) { .addPathPartAsIs("_license", "start_basic") .build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(startBasicRequest.timeout()); parameters.withMasterTimeout(startBasicRequest.masterNodeTimeout()); if (startBasicRequest.isAcknowledge()) { parameters.putParam("acknowledge", "true"); } + request.addParameters(parameters.asMap()); return request; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java index 99884ee49c868..c11e577ef3639 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -105,11 +105,11 @@ static Request getJob(GetJobRequest getJobRequest) { .build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); if (getJobRequest.getAllowNoJobs() != null) { params.putParam("allow_no_jobs", Boolean.toString(getJobRequest.getAllowNoJobs())); } - + request.addParameters(params.asMap()); return request; } @@ -122,10 +122,11 @@ static Request getJobStats(GetJobStatsRequest getJobStatsRequest) { .build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); if (getJobStatsRequest.getAllowNoJobs() != null) { params.putParam("allow_no_jobs", Boolean.toString(getJobStatsRequest.getAllowNoJobs())); } + request.addParameters(params.asMap()); return request; } @@ -171,14 +172,14 @@ static Request deleteJob(DeleteJobRequest deleteJobRequest) { .build(); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); if (deleteJobRequest.getForce() != null) { params.putParam("force", Boolean.toString(deleteJobRequest.getForce())); } if (deleteJobRequest.getWaitForCompletion() != null) { params.putParam("wait_for_completion", Boolean.toString(deleteJobRequest.getWaitForCompletion())); } - + request.addParameters(params.asMap()); return request; } @@ -249,12 +250,12 @@ static Request getDatafeed(GetDatafeedRequest getDatafeedRequest) { .build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); if (getDatafeedRequest.getAllowNoDatafeeds() != null) { params.putParam(GetDatafeedRequest.ALLOW_NO_DATAFEEDS.getPreferredName(), Boolean.toString(getDatafeedRequest.getAllowNoDatafeeds())); } - + request.addParameters(params.asMap()); return request; } @@ -265,10 +266,11 @@ static Request deleteDatafeed(DeleteDatafeedRequest deleteDatafeedRequest) { .addPathPart(deleteDatafeedRequest.getDatafeedId()) .build(); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); if (deleteDatafeedRequest.getForce() != null) { params.putParam("force", Boolean.toString(deleteDatafeedRequest.getForce())); } + request.addParameters(params.asMap()); return request; } @@ -305,10 +307,11 @@ static Request getDatafeedStats(GetDatafeedStatsRequest getDatafeedStatsRequest) .build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); if (getDatafeedStatsRequest.getAllowNoDatafeeds() != null) { params.putParam("allow_no_datafeeds", Boolean.toString(getDatafeedStatsRequest.getAllowNoDatafeeds())); } + request.addParameters(params.asMap()); return request; } @@ -331,13 +334,14 @@ static Request deleteForecast(DeleteForecastRequest deleteForecastRequest) { .addPathPart(Strings.collectionToCommaDelimitedString(deleteForecastRequest.getForecastIds())) .build(); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); if (deleteForecastRequest.getAllowNoForecasts() != null) { params.putParam("allow_no_forecasts", Boolean.toString(deleteForecastRequest.getAllowNoForecasts())); } if (deleteForecastRequest.timeout() != null) { params.putParam("timeout", deleteForecastRequest.timeout().getStringRep()); } + request.addParameters(params.asMap()); return request; } @@ -453,7 +457,7 @@ static Request postData(PostDataRequest postDataRequest) { .build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); if (postDataRequest.getResetStart() != null) { params.putParam(PostDataRequest.RESET_START.getPreferredName(), postDataRequest.getResetStart()); } @@ -461,6 +465,7 @@ static Request postData(PostDataRequest postDataRequest) { params.putParam(PostDataRequest.RESET_END.getPreferredName(), postDataRequest.getResetEnd()); } BytesReference content = postDataRequest.getContent(); + request.addParameters(params.asMap()); if (content != null) { BytesRef source = postDataRequest.getContent().toBytesRef(); HttpEntity byteEntity = new NByteArrayEntity(source.bytes, @@ -594,13 +599,14 @@ static Request getFilter(GetFiltersRequest getFiltersRequest) { .addPathPart(getFiltersRequest.getFilterId()) .build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); if (getFiltersRequest.getSize() != null) { params.putParam(PageParams.SIZE.getPreferredName(), getFiltersRequest.getSize().toString()); } if (getFiltersRequest.getFrom() != null) { params.putParam(PageParams.FROM.getPreferredName(), getFiltersRequest.getFrom().toString()); } + request.addParameters(params.asMap()); return request; } @@ -628,11 +634,12 @@ static Request deleteFilter(DeleteFilterRequest deleteFilterRequest) { static Request setUpgradeMode(SetUpgradeModeRequest setUpgradeModeRequest) { String endpoint = new EndpointBuilder().addPathPartAsIs("_ml", "set_upgrade_mode").build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.putParam(SetUpgradeModeRequest.ENABLED.getPreferredName(), Boolean.toString(setUpgradeModeRequest.isEnabled())); if (setUpgradeModeRequest.getTimeout() != null) { params.putParam(SetUpgradeModeRequest.TIMEOUT.getPreferredName(), setUpgradeModeRequest.getTimeout().toString()); } + request.addParameters(params.asMap()); return request; } @@ -650,7 +657,7 @@ static Request findFileStructure(FindFileStructureRequest findFileStructureReque .build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); if (findFileStructureRequest.getLinesToSample() != null) { params.putParam(FindFileStructureRequest.LINES_TO_SAMPLE.getPreferredName(), findFileStructureRequest.getLinesToSample().toString()); @@ -695,7 +702,7 @@ static Request findFileStructure(FindFileStructureRequest findFileStructureReque if (findFileStructureRequest.getExplain() != null) { params.putParam(FindFileStructureRequest.EXPLAIN.getPreferredName(), findFileStructureRequest.getExplain().toString()); } - + request.addParameters(params.asMap()); BytesReference sample = findFileStructureRequest.getSample(); BytesRef source = sample.toBytesRef(); HttpEntity byteEntity = new NByteArrayEntity(source.bytes, source.offset, source.length, createContentType(XContentType.JSON)); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 9bd9a0852d33e..9aec180a1a748 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -88,8 +88,10 @@ import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.Charset; +import java.util.HashMap; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.StringJoiner; final class RequestConverters { @@ -103,7 +105,7 @@ static Request delete(DeleteRequest deleteRequest) { String endpoint = endpoint(deleteRequest.index(), deleteRequest.type(), deleteRequest.id()); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - Params parameters = new Params(request); + Params parameters = new Params(); parameters.withRouting(deleteRequest.routing()); parameters.withTimeout(deleteRequest.timeout()); parameters.withVersion(deleteRequest.version()); @@ -112,6 +114,7 @@ static Request delete(DeleteRequest deleteRequest) { parameters.withIfPrimaryTerm(deleteRequest.ifPrimaryTerm()); parameters.withRefreshPolicy(deleteRequest.getRefreshPolicy()); parameters.withWaitForActiveShards(deleteRequest.waitForActiveShards()); + request.addParameters(parameters.asMap()); return request; } @@ -122,7 +125,7 @@ static Request info() { static Request bulk(BulkRequest bulkRequest) throws IOException { Request request = new Request(HttpPost.METHOD_NAME, "/_bulk"); - Params parameters = new Params(request); + Params parameters = new Params(); parameters.withTimeout(bulkRequest.timeout()); parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy()); parameters.withPipeline(bulkRequest.pipeline()); @@ -249,6 +252,7 @@ static Request bulk(BulkRequest bulkRequest) throws IOException { content.write(separator); } } + request.addParameters(parameters.asMap()); request.setEntity(new NByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType)); return request; } @@ -264,7 +268,7 @@ static Request get(GetRequest getRequest) { private static Request getStyleRequest(String method, GetRequest getRequest) { Request request = new Request(method, endpoint(getRequest.index(), getRequest.type(), getRequest.id())); - Params parameters = new Params(request); + Params parameters = new Params(); parameters.withPreference(getRequest.preference()); parameters.withRouting(getRequest.routing()); parameters.withRefresh(getRequest.refresh()); @@ -273,7 +277,7 @@ private static Request getStyleRequest(String method, GetRequest getRequest) { parameters.withVersion(getRequest.version()); parameters.withVersionType(getRequest.versionType()); parameters.withFetchSourceContext(getRequest.fetchSourceContext()); - + request.addParameters(parameters.asMap()); return request; } @@ -286,23 +290,24 @@ static Request sourceExists(GetRequest getRequest) { endpoint = endpoint(getRequest.index(), optionalType, getRequest.id(), "_source"); } Request request = new Request(HttpHead.METHOD_NAME, endpoint); - Params parameters = new Params(request); + Params parameters = new Params(); parameters.withPreference(getRequest.preference()); parameters.withRouting(getRequest.routing()); parameters.withRefresh(getRequest.refresh()); parameters.withRealtime(getRequest.realtime()); // Version params are not currently supported by the source exists API so are not passed + request.addParameters(parameters.asMap()); return request; } static Request multiGet(MultiGetRequest multiGetRequest) throws IOException { Request request = new Request(HttpPost.METHOD_NAME, "/_mget"); - Params parameters = new Params(request); + Params parameters = new Params(); parameters.withPreference(multiGetRequest.preference()); parameters.withRealtime(multiGetRequest.realtime()); parameters.withRefresh(multiGetRequest.refresh()); - + request.addParameters(parameters.asMap()); request.setEntity(createEntity(multiGetRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -321,7 +326,7 @@ static Request index(IndexRequest indexRequest) { Request request = new Request(method, endpoint); - Params parameters = new Params(request); + Params parameters = new Params(); parameters.withRouting(indexRequest.routing()); parameters.withTimeout(indexRequest.timeout()); parameters.withVersion(indexRequest.version()); @@ -334,6 +339,7 @@ static Request index(IndexRequest indexRequest) { BytesRef source = indexRequest.source().toBytesRef(); ContentType contentType = createContentType(indexRequest.getContentType()); + request.addParameters(parameters.asMap()); request.setEntity(new NByteArrayEntity(source.bytes, source.offset, source.length, contentType)); return request; } @@ -348,7 +354,7 @@ static Request update(UpdateRequest updateRequest) throws IOException { : endpoint(updateRequest.index(), updateRequest.type(), updateRequest.id(), "_update"); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - Params parameters = new Params(request); + Params parameters = new Params(); parameters.withRouting(updateRequest.routing()); parameters.withTimeout(updateRequest.timeout()); parameters.withRefreshPolicy(updateRequest.getRefreshPolicy()); @@ -379,6 +385,7 @@ static Request update(UpdateRequest updateRequest) throws IOException { if (xContentType == null) { xContentType = Requests.INDEX_CONTENT_TYPE; } + request.addParameters(parameters.asMap()); request.setEntity(createEntity(updateRequest, xContentType)); return request; } @@ -393,12 +400,13 @@ static Request update(UpdateRequest updateRequest) throws IOException { static Request search(SearchRequest searchRequest, String searchEndpoint) throws IOException { Request request = new Request(HttpPost.METHOD_NAME, endpoint(searchRequest.indices(), searchEndpoint)); - Params params = new Params(request); + Params params = new Params(); addSearchRequestParams(params, searchRequest); if (searchRequest.source() != null) { request.setEntity(createEntity(searchRequest.source(), REQUEST_BODY_CONTENT_TYPE)); } + request.addParameters(params.asMap()); return request; } @@ -436,7 +444,7 @@ static Request clearScroll(ClearScrollRequest clearScrollRequest) throws IOExcep static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOException { Request request = new Request(HttpPost.METHOD_NAME, "/_msearch"); - Params params = new Params(request); + Params params = new Params(); params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); if (multiSearchRequest.maxConcurrentSearchRequests() != MultiSearchRequest.MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT) { params.putParam("max_concurrent_searches", Integer.toString(multiSearchRequest.maxConcurrentSearchRequests())); @@ -444,6 +452,7 @@ static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOExcep XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent(); byte[] source = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, xContent); + request.addParameters(params.asMap()); request.setEntity(new NByteArrayEntity(source, createContentType(xContent.type()))); return request; } @@ -458,8 +467,9 @@ static Request searchTemplate(SearchTemplateRequest searchTemplateRequest) throw String endpoint = endpoint(searchRequest.indices(), "_search/template"); request = new Request(HttpGet.METHOD_NAME, endpoint); - Params params = new Params(request); + Params params = new Params(); addSearchRequestParams(params, searchRequest); + request.addParameters(params.asMap()); } request.setEntity(createEntity(searchTemplateRequest, REQUEST_BODY_CONTENT_TYPE)); @@ -469,7 +479,7 @@ static Request searchTemplate(SearchTemplateRequest searchTemplateRequest) throw static Request multiSearchTemplate(MultiSearchTemplateRequest multiSearchTemplateRequest) throws IOException { Request request = new Request(HttpPost.METHOD_NAME, "/_msearch/template"); - Params params = new Params(request); + Params params = new Params(); params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); if (multiSearchTemplateRequest.maxConcurrentSearchRequests() != MultiSearchRequest.MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT) { params.putParam("max_concurrent_searches", Integer.toString(multiSearchTemplateRequest.maxConcurrentSearchRequests())); @@ -483,10 +493,11 @@ static Request multiSearchTemplate(MultiSearchTemplateRequest multiSearchTemplat static Request count(CountRequest countRequest) throws IOException { Request request = new Request(HttpPost.METHOD_NAME, endpoint(countRequest.indices(), countRequest.types(), "_count")); - Params params = new Params(request); + Params params = new Params(); params.withRouting(countRequest.routing()); params.withPreference(countRequest.preference()); params.withIndicesOptions(countRequest.indicesOptions()); + request.addParameters(params.asMap()); request.setEntity(createEntity(countRequest.source(), REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -497,11 +508,12 @@ static Request explain(ExplainRequest explainRequest) throws IOException { : endpoint(explainRequest.index(), explainRequest.type(), explainRequest.id(), "_explain"); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - Params params = new Params(request); + Params params = new Params(); params.withStoredFields(explainRequest.storedFields()); params.withFetchSourceContext(explainRequest.fetchSourceContext()); params.withRouting(explainRequest.routing()); params.withPreference(explainRequest.preference()); + request.addParameters(params.asMap()); request.setEntity(createEntity(explainRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -509,18 +521,19 @@ static Request explain(ExplainRequest explainRequest) throws IOException { static Request fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest) { Request request = new Request(HttpGet.METHOD_NAME, endpoint(fieldCapabilitiesRequest.indices(), "_field_caps")); - Params params = new Params(request); + Params params = new Params(); params.withFields(fieldCapabilitiesRequest.fields()); params.withIndicesOptions(fieldCapabilitiesRequest.indicesOptions()); + request.addParameters(params.asMap()); return request; } static Request rankEval(RankEvalRequest rankEvalRequest) throws IOException { Request request = new Request(HttpGet.METHOD_NAME, endpoint(rankEvalRequest.indices(), Strings.EMPTY_ARRAY, "_rank_eval")); - Params params = new Params(request); + Params params = new Params(); params.withIndicesOptions(rankEvalRequest.indicesOptions()); - + request.addParameters(params.asMap()); request.setEntity(createEntity(rankEvalRequest.getRankEvalSpec(), REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -536,7 +549,7 @@ static Request submitReindex(ReindexRequest reindexRequest) throws IOException { private static Request prepareReindexRequest(ReindexRequest reindexRequest, boolean waitForCompletion) throws IOException { String endpoint = new EndpointBuilder().addPathPart("_reindex").build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - Params params = new Params(request) + Params params = new Params() .withWaitForCompletion(waitForCompletion) .withRefresh(reindexRequest.isRefresh()) .withTimeout(reindexRequest.getTimeout()) @@ -546,6 +559,7 @@ private static Request prepareReindexRequest(ReindexRequest reindexRequest, bool if (reindexRequest.getScrollTime() != null) { params.putParam("scroll", reindexRequest.getScrollTime()); } + request.addParameters(params.asMap()); request.setEntity(createEntity(reindexRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -553,7 +567,7 @@ private static Request prepareReindexRequest(ReindexRequest reindexRequest, bool static Request updateByQuery(UpdateByQueryRequest updateByQueryRequest) throws IOException { String endpoint = endpoint(updateByQueryRequest.indices(), "_update_by_query"); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - Params params = new Params(request) + Params params = new Params() .withRouting(updateByQueryRequest.getRouting()) .withPipeline(updateByQueryRequest.getPipeline()) .withRefresh(updateByQueryRequest.isRefresh()) @@ -573,6 +587,7 @@ static Request updateByQuery(UpdateByQueryRequest updateByQueryRequest) throws I if (updateByQueryRequest.getSize() > 0) { params.putParam("size", Integer.toString(updateByQueryRequest.getSize())); } + request.addParameters(params.asMap()); request.setEntity(createEntity(updateByQueryRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -580,7 +595,7 @@ static Request updateByQuery(UpdateByQueryRequest updateByQueryRequest) throws I static Request deleteByQuery(DeleteByQueryRequest deleteByQueryRequest) throws IOException { String endpoint = endpoint(deleteByQueryRequest.indices(), "_delete_by_query"); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - Params params = new Params(request) + Params params = new Params() .withRouting(deleteByQueryRequest.getRouting()) .withRefresh(deleteByQueryRequest.isRefresh()) .withTimeout(deleteByQueryRequest.getTimeout()) @@ -599,6 +614,7 @@ static Request deleteByQuery(DeleteByQueryRequest deleteByQueryRequest) throws I if (deleteByQueryRequest.getSize() > 0) { params.putParam("size", Integer.toString(deleteByQueryRequest.getSize())); } + request.addParameters(params.asMap()); request.setEntity(createEntity(deleteByQueryRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -619,22 +635,24 @@ private static Request rethrottle(RethrottleRequest rethrottleRequest, String fi String endpoint = new EndpointBuilder().addPathPart(firstPathPart).addPathPart(rethrottleRequest.getTaskId().toString()) .addPathPart("_rethrottle").build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - Params params = new Params(request) + Params params = new Params() .withRequestsPerSecond(rethrottleRequest.getRequestsPerSecond()); // we set "group_by" to "none" because this is the response format we can parse back params.putParam("group_by", "none"); + request.addParameters(params.asMap()); return request; } static Request putScript(PutStoredScriptRequest putStoredScriptRequest) throws IOException { String endpoint = new EndpointBuilder().addPathPartAsIs("_scripts").addPathPart(putStoredScriptRequest.id()).build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - Params params = new Params(request); + Params params = new Params(); params.withTimeout(putStoredScriptRequest.timeout()); params.withMasterTimeout(putStoredScriptRequest.masterNodeTimeout()); if (Strings.hasText(putStoredScriptRequest.context())) { params.putParam("context", putStoredScriptRequest.context()); } + request.addParameters(params.asMap()); request.setEntity(createEntity(putStoredScriptRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -665,11 +683,12 @@ static Request termVectors(TermVectorsRequest tvrequest) throws IOException { } Request request = new Request(HttpGet.METHOD_NAME, endpoint); - Params params = new Params(request); + Params params = new Params(); params.withRouting(tvrequest.getRouting()); params.withPreference(tvrequest.getPreference()); params.withFields(tvrequest.getFields()); params.withRealtime(tvrequest.getRealtime()); + request.addParameters(params.asMap()); request.setEntity(createEntity(tvrequest, REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -684,17 +703,19 @@ static Request mtermVectors(MultiTermVectorsRequest mtvrequest) throws IOExcepti static Request getScript(GetStoredScriptRequest getStoredScriptRequest) { String endpoint = new EndpointBuilder().addPathPartAsIs("_scripts").addPathPart(getStoredScriptRequest.id()).build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - Params params = new Params(request); + Params params = new Params(); params.withMasterTimeout(getStoredScriptRequest.masterNodeTimeout()); + request.addParameters(params.asMap()); return request; } static Request deleteScript(DeleteStoredScriptRequest deleteStoredScriptRequest) { String endpoint = new EndpointBuilder().addPathPartAsIs("_scripts").addPathPart(deleteStoredScriptRequest.id()).build(); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - Params params = new Params(request); + Params params = new Params(); params.withTimeout(deleteStoredScriptRequest.timeout()); params.withMasterTimeout(deleteStoredScriptRequest.masterNodeTimeout()); + request.addParameters(params.asMap()); return request; } @@ -758,15 +779,14 @@ public static ContentType createContentType(final XContentType xContentType) { * a {@link Request} and adds the parameters to it directly. */ static class Params { - private final Request request; + private final Map parameters = new HashMap<>(); - Params(Request request) { - this.request = request; + Params() { } Params putParam(String name, String value) { if (Strings.hasLength(value)) { - request.addParameter(name, value); + parameters.put(name,value); } return this; } @@ -778,6 +798,10 @@ Params putParam(String key, TimeValue value) { return this; } + Map asMap(){ + return parameters; + } + Params withDocAsUpsert(boolean docAsUpsert) { if (docAsUpsert) { return putParam("doc_as_upsert", Boolean.TRUE.toString()); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java index f43505c1c6537..a9f6ff6d5f9fb 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java @@ -68,11 +68,12 @@ static Request stopJob(final StopRollupJobRequest stopRollupJobRequest) throws I .build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(stopRollupJobRequest.timeout()); if (stopRollupJobRequest.waitForCompletion() != null) { parameters.withWaitForCompletion(stopRollupJobRequest.waitForCompletion()); } + request.addParameters(parameters.asMap()); return request; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java index f686167e211bb..18ecc2cea281a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java @@ -66,8 +66,9 @@ static Request changePassword(ChangePasswordRequest changePasswordRequest) throw .build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); request.setEntity(createEntity(changePasswordRequest, REQUEST_BODY_CONTENT_TYPE)); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withRefreshPolicy(changePasswordRequest.getRefreshPolicy()); + request.addParameters(params.asMap()); return request; } @@ -87,8 +88,9 @@ static Request putUser(PutUserRequest putUserRequest) throws IOException { .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); request.setEntity(createEntity(putUserRequest, REQUEST_BODY_CONTENT_TYPE)); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withRefreshPolicy(putUserRequest.getRefreshPolicy()); + request.addParameters(params.asMap()); return request; } @@ -98,8 +100,9 @@ static Request deleteUser(DeleteUserRequest deleteUserRequest) { .addPathPart(deleteUserRequest.getName()) .build(); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withRefreshPolicy(deleteUserRequest.getRefreshPolicy()); + request.addParameters(params.asMap()); return request; } @@ -110,8 +113,9 @@ static Request putRoleMapping(final PutRoleMappingRequest putRoleMappingRequest) .build(); final Request request = new Request(HttpPut.METHOD_NAME, endpoint); request.setEntity(createEntity(putRoleMappingRequest, REQUEST_BODY_CONTENT_TYPE)); - final RequestConverters.Params params = new RequestConverters.Params(request); + final RequestConverters.Params params = new RequestConverters.Params(); params.withRefreshPolicy(putRoleMappingRequest.getRefreshPolicy()); + request.addParameters(params.asMap()); return request; } @@ -139,8 +143,9 @@ private static Request setUserEnabled(SetUserEnabledRequest setUserEnabledReques .addPathPart(setUserEnabledRequest.isEnabled() ? "_enable" : "_disable") .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withRefreshPolicy(setUserEnabledRequest.getRefreshPolicy()); + request.addParameters(params.asMap()); return request; } @@ -161,8 +166,9 @@ static Request clearRealmCache(ClearRealmCacheRequest clearRealmCacheRequest) { final String endpoint = builder.addPathPartAsIs("_clear_cache").build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); if (clearRealmCacheRequest.getUsernames().isEmpty() == false) { - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.putParam("usernames", Strings.collectionToCommaDelimitedString(clearRealmCacheRequest.getUsernames())); + request.addParameters(params.asMap()); } return request; } @@ -182,8 +188,9 @@ static Request deleteRoleMapping(DeleteRoleMappingRequest deleteRoleMappingReque .addPathPart(deleteRoleMappingRequest.getName()) .build(); final Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - final RequestConverters.Params params = new RequestConverters.Params(request); + final RequestConverters.Params params = new RequestConverters.Params(); params.withRefreshPolicy(deleteRoleMappingRequest.getRefreshPolicy()); + request.addParameters(params.asMap()); return request; } @@ -193,8 +200,9 @@ static Request deleteRole(DeleteRoleRequest deleteRoleRequest) { .addPathPart(deleteRoleRequest.getName()) .build(); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withRefreshPolicy(deleteRoleRequest.getRefreshPolicy()); + request.addParameters(params.asMap()); return request; } @@ -231,8 +239,9 @@ static Request getPrivileges(GetPrivilegesRequest getPrivilegesRequest) { static Request putPrivileges(final PutPrivilegesRequest putPrivilegesRequest) throws IOException { Request request = new Request(HttpPut.METHOD_NAME, "/_security/privilege"); request.setEntity(createEntity(putPrivilegesRequest, REQUEST_BODY_CONTENT_TYPE)); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withRefreshPolicy(putPrivilegesRequest.getRefreshPolicy()); + request.addParameters(params.asMap()); return request; } @@ -243,8 +252,9 @@ static Request deletePrivileges(DeletePrivilegesRequest deletePrivilegeRequest) .addCommaSeparatedPathParts(deletePrivilegeRequest.getPrivileges()) .build(); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withRefreshPolicy(deletePrivilegeRequest.getRefreshPolicy()); + request.addParameters(params.asMap()); return request; } @@ -255,16 +265,18 @@ static Request putRole(final PutRoleRequest putRoleRequest) throws IOException { .build(); final Request request = new Request(HttpPut.METHOD_NAME, endpoint); request.setEntity(createEntity(putRoleRequest, REQUEST_BODY_CONTENT_TYPE)); - final RequestConverters.Params params = new RequestConverters.Params(request); + final RequestConverters.Params params = new RequestConverters.Params(); params.withRefreshPolicy(putRoleRequest.getRefreshPolicy()); + request.addParameters(params.asMap()); return request; } static Request createApiKey(final CreateApiKeyRequest createApiKeyRequest) throws IOException { final Request request = new Request(HttpPost.METHOD_NAME, "/_security/api_key"); request.setEntity(createEntity(createApiKeyRequest, REQUEST_BODY_CONTENT_TYPE)); - final RequestConverters.Params params = new RequestConverters.Params(request); + final RequestConverters.Params params = new RequestConverters.Params(); params.withRefreshPolicy(createApiKeyRequest.getRefreshPolicy()); + request.addParameters(params.asMap()); return request; } @@ -282,13 +294,13 @@ static Request getApiKey(final GetApiKeyRequest getApiKeyRequest) throws IOExcep if (Strings.hasText(getApiKeyRequest.getRealmName())) { request.addParameter("realm_name", getApiKeyRequest.getRealmName()); } + return request; } static Request invalidateApiKey(final InvalidateApiKeyRequest invalidateApiKeyRequest) throws IOException { final Request request = new Request(HttpDelete.METHOD_NAME, "/_security/api_key"); request.setEntity(createEntity(invalidateApiKeyRequest, REQUEST_BODY_CONTENT_TYPE)); - final RequestConverters.Params params = new RequestConverters.Params(request); return request; } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotRequestConverters.java index 93fb10bd56136..0dedd59c3e6a2 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotRequestConverters.java @@ -46,9 +46,10 @@ static Request getRepositories(GetRepositoriesRequest getRepositoriesRequest) { .build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withMasterTimeout(getRepositoriesRequest.masterNodeTimeout()); parameters.withLocal(getRepositoriesRequest.local()); + request.addParameters(parameters.asMap()); return request; } @@ -56,11 +57,11 @@ static Request createRepository(PutRepositoryRequest putRepositoryRequest) throw String endpoint = new RequestConverters.EndpointBuilder().addPathPart("_snapshot").addPathPart(putRepositoryRequest.name()).build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withMasterTimeout(putRepositoryRequest.masterNodeTimeout()); parameters.withTimeout(putRepositoryRequest.timeout()); parameters.withVerify(putRepositoryRequest.verify()); - + request.addParameters(parameters.asMap()); request.setEntity(RequestConverters.createEntity(putRepositoryRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -70,9 +71,10 @@ static Request deleteRepository(DeleteRepositoryRequest deleteRepositoryRequest) .build(); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withMasterTimeout(deleteRepositoryRequest.masterNodeTimeout()); parameters.withTimeout(deleteRepositoryRequest.timeout()); + request.addParameters(parameters.asMap()); return request; } @@ -83,9 +85,10 @@ static Request verifyRepository(VerifyRepositoryRequest verifyRepositoryRequest) .build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withMasterTimeout(verifyRepositoryRequest.masterNodeTimeout()); parameters.withTimeout(verifyRepositoryRequest.timeout()); + request.addParameters(parameters.asMap()); return request; } @@ -95,9 +98,10 @@ static Request createSnapshot(CreateSnapshotRequest createSnapshotRequest) throw .addPathPart(createSnapshotRequest.snapshot()) .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withMasterTimeout(createSnapshotRequest.masterNodeTimeout()); params.withWaitForCompletion(createSnapshotRequest.waitForCompletion()); + request.addParameters(params.asMap()); request.setEntity(RequestConverters.createEntity(createSnapshotRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -114,11 +118,11 @@ static Request getSnapshots(GetSnapshotsRequest getSnapshotsRequest) { Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withMasterTimeout(getSnapshotsRequest.masterNodeTimeout()); parameters.putParam("ignore_unavailable", Boolean.toString(getSnapshotsRequest.ignoreUnavailable())); parameters.putParam("verbose", Boolean.toString(getSnapshotsRequest.verbose())); - + request.addParameters(parameters.asMap()); return request; } @@ -130,9 +134,10 @@ static Request snapshotsStatus(SnapshotsStatusRequest snapshotsStatusRequest) { .build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withMasterTimeout(snapshotsStatusRequest.masterNodeTimeout()); parameters.withIgnoreUnavailable(snapshotsStatusRequest.ignoreUnavailable()); + request.addParameters(parameters.asMap()); return request; } @@ -143,9 +148,10 @@ static Request restoreSnapshot(RestoreSnapshotRequest restoreSnapshotRequest) th .addPathPartAsIs("_restore") .build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withMasterTimeout(restoreSnapshotRequest.masterNodeTimeout()); parameters.withWaitForCompletion(restoreSnapshotRequest.waitForCompletion()); + request.addParameters(parameters.asMap()); request.setEntity(RequestConverters.createEntity(restoreSnapshotRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -157,8 +163,9 @@ static Request deleteSnapshot(DeleteSnapshotRequest deleteSnapshotRequest) { .build(); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withMasterTimeout(deleteSnapshotRequest.masterNodeTimeout()); + request.addParameters(parameters.asMap()); return request; } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java index f0e9cf4e025f6..f30efabc823e3 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java @@ -32,12 +32,13 @@ private TasksRequestConverters() {} static Request cancelTasks(CancelTasksRequest cancelTasksRequest) { Request request = new Request(HttpPost.METHOD_NAME, "/_tasks/_cancel"); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withTimeout(cancelTasksRequest.getTimeout()) .withTaskId(cancelTasksRequest.getTaskId()) .withNodes(cancelTasksRequest.getNodes()) .withParentTaskId(cancelTasksRequest.getParentTaskId()) .withActions(cancelTasksRequest.getActions()); + request.addParameters(params.asMap()); return request; } @@ -46,7 +47,7 @@ static Request listTasks(ListTasksRequest listTaskRequest) { throw new IllegalArgumentException("TaskId cannot be used for list tasks request"); } Request request = new Request(HttpGet.METHOD_NAME, "/_tasks"); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withTimeout(listTaskRequest.getTimeout()) .withDetailed(listTaskRequest.getDetailed()) .withWaitForCompletion(listTaskRequest.getWaitForCompletion()) @@ -54,6 +55,7 @@ static Request listTasks(ListTasksRequest listTaskRequest) { .withNodes(listTaskRequest.getNodes()) .withActions(listTaskRequest.getActions()) .putParam("group_by", "none"); + request.addParameters(params.asMap()); return request; } @@ -62,9 +64,10 @@ static Request getTask(GetTaskRequest getTaskRequest) { .addPathPartAsIs(getTaskRequest.getNodeId() + ":" + Long.toString(getTaskRequest.getTaskId())) .build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); params.withTimeout(getTaskRequest.getTimeout()) .withWaitForCompletion(getTaskRequest.getWaitForCompletion()); + request.addParameters(params.asMap()); return request; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherRequestConverters.java index 9718607d8b80e..84559fb182306 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherRequestConverters.java @@ -69,12 +69,13 @@ static Request putWatch(PutWatchRequest putWatchRequest) { .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request) + RequestConverters.Params params = new RequestConverters.Params() .withIfSeqNo(putWatchRequest.ifSeqNo()) .withIfPrimaryTerm(putWatchRequest.ifPrimaryTerm()); if (putWatchRequest.isActive() == false) { params.putParam("active", "false"); } + request.addParameters(params.asMap()); ContentType contentType = RequestConverters.createContentType(putWatchRequest.xContentType()); BytesReference source = putWatchRequest.getSource(); request.setEntity(new NByteArrayEntity(source.toBytesRef().bytes, 0, source.length(), contentType)); @@ -118,7 +119,7 @@ static Request executeWatch(ExecuteWatchRequest executeWatchRequest) throws IOEx .addPathPartAsIs("_execute").build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(request); + RequestConverters.Params params = new RequestConverters.Params(); if (executeWatchRequest.isDebug()) { params.putParam("debug", "true"); } @@ -128,7 +129,7 @@ static Request executeWatch(ExecuteWatchRequest executeWatchRequest) throws IOEx if (executeWatchRequest.recordExecution()) { params.putParam("record_execution", "true"); } - + request.addParameters(params.asMap()); request.setEntity(RequestConverters.createEntity(executeWatchRequest, XContentType.JSON)); return request; } @@ -158,7 +159,7 @@ static Request watcherStats(WatcherStatsRequest watcherStatsRequest) { RequestConverters.EndpointBuilder builder = new RequestConverters.EndpointBuilder().addPathPartAsIs("_watcher", "stats"); String endpoint = builder.build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); StringBuilder metric = new StringBuilder(); if (watcherStatsRequest.includeCurrentWatches()) { metric.append("current_watches"); @@ -172,6 +173,7 @@ static Request watcherStats(WatcherStatsRequest watcherStatsRequest) { if (metric.length() > 0) { parameters.putParam("metric", metric.toString()); } + request.addParameters(parameters.asMap()); return request; } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackRequestConverters.java index 9e0c1527403d5..e4742e8c1cbe2 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackRequestConverters.java @@ -46,8 +46,9 @@ static Request info(XPackInfoRequest infoRequest) { static Request usage(XPackUsageRequest usageRequest) { Request request = new Request(HttpGet.METHOD_NAME, "/_xpack/usage"); - RequestConverters.Params parameters = new RequestConverters.Params(request); + RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withMasterTimeout(usageRequest.masterNodeTimeout()); + request.addParameters(parameters.asMap()); return request; } } diff --git a/client/rest/src/main/java/org/elasticsearch/client/Request.java b/client/rest/src/main/java/org/elasticsearch/client/Request.java index 2e4733201b12c..0bf9202486427 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Request.java @@ -81,6 +81,10 @@ public void addParameter(String name, String value) { } } + public void addParameters(Map paramSource){ + paramSource.forEach(this::addParameter); + } + /** * Query string parameters. The returned map is an unmodifiable view of the * map in the request so calls to {@link #addParameter(String, String)} From d2d6167ff1cb203b3415ff45351d3960f0050b10 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Wed, 29 May 2019 15:27:30 -0600 Subject: [PATCH 179/224] Propogate version in reindex from remote search (#42412) This is related to #31908. In order to use the external version in a reindex from remote request, the search request must be configured to request the version (as it is not returned by default). This commit modifies the search request to request the version. Additionally, it modifies our current reindex from remote tests to randomly use the external version_type. --- modules/reindex/build.gradle | 2 +- .../reindex/remote/RemoteRequestBuilders.java | 13 +++--- .../index/reindex/ManyDocumentsIT.java | 39 ++++++++++++----- .../remote/ReindexFromOldRemoteIT.java | 43 +++++++++++++------ .../remote/RemoteRequestBuildersTests.java | 25 ++++++----- 5 files changed, 81 insertions(+), 41 deletions(-) diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 260c8dcc1df79..957d502f6fcea 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -77,7 +77,7 @@ forbiddenPatterns { exclude '**/*.p12' } -// Support for testing reindex-from-remote against old Elaticsearch versions +// Support for testing reindex-from-remote against old Elasticsearch versions configurations { oldesFixture es2 diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java index e29617f046133..40c4ba757d176 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java @@ -74,14 +74,13 @@ static Request initialSearch(SearchRequest searchRequest, BytesReference query, request.addParameter("scroll", keepAlive.getStringRep()); } request.addParameter("size", Integer.toString(searchRequest.source().size())); - if (searchRequest.source().version() == null || searchRequest.source().version() == true) { - /* - * Passing `null` here just add the `version` request parameter - * without any value. This way of requesting the version works - * for all supported versions of Elasticsearch. - */ - request.addParameter("version", null); + + if (searchRequest.source().version() == null || searchRequest.source().version() == false) { + request.addParameter("version", Boolean.FALSE.toString()); + } else { + request.addParameter("version", Boolean.TRUE.toString()); } + if (searchRequest.source().sorts() != null) { boolean useScan = false; // Detect if we should use search_type=scan rather than a sort diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ManyDocumentsIT.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ManyDocumentsIT.java index b86f28452cc96..fd8cf49cc39ad 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ManyDocumentsIT.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ManyDocumentsIT.java @@ -73,18 +73,35 @@ public void testReindexFromRemote() throws IOException { Map http = (Map) nodeInfo.get("http"); String remote = "http://"+ http.get("publish_address"); Request request = new Request("POST", "/_reindex"); - request.setJsonEntity( + if (randomBoolean()) { + request.setJsonEntity( "{\n" + - " \"source\":{\n" + - " \"index\":\"test\",\n" + - " \"remote\":{\n" + - " \"host\":\"" + remote + "\"\n" + - " }\n" + - " }\n," + - " \"dest\":{\n" + - " \"index\":\"des\"\n" + - " }\n" + - "}"); + " \"source\":{\n" + + " \"index\":\"test\",\n" + + " \"remote\":{\n" + + " \"host\":\"" + remote + "\"\n" + + " }\n" + + " }\n," + + " \"dest\":{\n" + + " \"index\":\"des\"\n" + + " }\n" + + "}"); + } else { + // Test with external version_type + request.setJsonEntity( + "{\n" + + " \"source\":{\n" + + " \"index\":\"test\",\n" + + " \"remote\":{\n" + + " \"host\":\"" + remote + "\"\n" + + " }\n" + + " }\n," + + " \"dest\":{\n" + + " \"index\":\"des\",\n" + + " \"version_type\": \"external\"\n" + + " }\n" + + "}"); + } Map response = entityAsMap(client().performRequest(request)); assertThat(response, hasEntry("total", count)); assertThat(response, hasEntry("created", count)); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/ReindexFromOldRemoteIT.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/ReindexFromOldRemoteIT.java index 9feed83595ff1..27d975c4114f9 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/ReindexFromOldRemoteIT.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/ReindexFromOldRemoteIT.java @@ -56,19 +56,38 @@ private void oldEsTestCase(String portPropertyName, String requestsPerSecond) th } Request reindex = new Request("POST", "/_reindex"); - reindex.setJsonEntity( + if (randomBoolean()) { + // Reindex using the external version_type + reindex.setJsonEntity( "{\n" - + " \"source\":{\n" - + " \"index\": \"test\",\n" - + " \"size\": 1,\n" - + " \"remote\": {\n" - + " \"host\": \"http://127.0.0.1:" + oldEsPort + "\"\n" - + " }\n" - + " },\n" - + " \"dest\": {\n" - + " \"index\": \"test\"\n" - + " }\n" - + "}"); + + " \"source\":{\n" + + " \"index\": \"test\",\n" + + " \"size\": 1,\n" + + " \"remote\": {\n" + + " \"host\": \"http://127.0.0.1:" + oldEsPort + "\"\n" + + " }\n" + + " },\n" + + " \"dest\": {\n" + + " \"index\": \"test\",\n" + + " \"version_type\": \"external\"\n" + + " }\n" + + "}"); + } else { + // Reindex using the default internal version_type + reindex.setJsonEntity( + "{\n" + + " \"source\":{\n" + + " \"index\": \"test\",\n" + + " \"size\": 1,\n" + + " \"remote\": {\n" + + " \"host\": \"http://127.0.0.1:" + oldEsPort + "\"\n" + + " }\n" + + " },\n" + + " \"dest\": {\n" + + " \"index\": \"test\"\n" + + " }\n" + + "}"); + } reindex.addParameter("refresh", "true"); reindex.addParameter("pretty", "true"); if (requestsPerSecond != null) { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java index 28b8d32688397..0005fa921b33b 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -113,7 +113,7 @@ public void testInitialSearchParamsFields() { // Test request without any fields Version remoteVersion = Version.fromId(between(2000099, Version.CURRENT.id)); assertThat(initialSearch(searchRequest, query, remoteVersion).getParameters(), - not(either(hasKey("stored_fields")).or(hasKey("fields")))); + not(either(hasKey("stored_fields")).or(hasKey("fields")))); // Test stored_fields for versions that support it searchRequest = new SearchRequest().source(new SearchSourceBuilder()); @@ -134,14 +134,14 @@ public void testInitialSearchParamsFields() { searchRequest.source().storedField("_source").storedField("_id"); remoteVersion = Version.fromId(between(0, 2000099 - 1)); assertThat(initialSearch(searchRequest, query, remoteVersion).getParameters(), - hasEntry("fields", "_source,_id,_parent,_routing,_ttl")); + hasEntry("fields", "_source,_id,_parent,_routing,_ttl")); // But only versions before 1.0 force _source to be in the list searchRequest = new SearchRequest().source(new SearchSourceBuilder()); searchRequest.source().storedField("_id"); remoteVersion = Version.fromId(between(1000099, 2000099 - 1)); assertThat(initialSearch(searchRequest, query, remoteVersion).getParameters(), - hasEntry("fields", "_id,_parent,_routing,_ttl")); + hasEntry("fields", "_id,_parent,_routing,_ttl")); } public void testInitialSearchParamsMisc() { @@ -161,7 +161,7 @@ public void testInitialSearchParamsMisc() { fetchVersion = randomBoolean(); searchRequest.source().version(fetchVersion); } - + Map params = initialSearch(searchRequest, query, remoteVersion).getParameters(); if (scroll == null) { @@ -170,7 +170,12 @@ public void testInitialSearchParamsMisc() { assertScroll(remoteVersion, params, scroll); } assertThat(params, hasEntry("size", Integer.toString(size))); - assertThat(params, fetchVersion == null || fetchVersion == true ? hasEntry("version", null) : not(hasEntry("version", null))); + if (fetchVersion != null) { + assertThat(params, fetchVersion ? hasEntry("version", Boolean.TRUE.toString()) : + hasEntry("version", Boolean.FALSE.toString())); + } else { + assertThat(params, hasEntry("version", Boolean.FALSE.toString())); + } } private void assertScroll(Version remoteVersion, Map params, TimeValue requested) { @@ -197,22 +202,22 @@ public void testInitialSearchEntity() throws IOException { assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); if (remoteVersion.onOrAfter(Version.fromId(1000099))) { assertEquals("{\"query\":" + query + ",\"_source\":true}", - Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); + Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); } else { assertEquals("{\"query\":" + query + "}", - Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); + Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); } // Source filtering is included if set up - searchRequest.source().fetchSource(new String[] {"in1", "in2"}, new String[] {"out"}); + searchRequest.source().fetchSource(new String[]{"in1", "in2"}, new String[]{"out"}); entity = initialSearch(searchRequest, new BytesArray(query), remoteVersion).getEntity(); assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); assertEquals("{\"query\":" + query + ",\"_source\":{\"includes\":[\"in1\",\"in2\"],\"excludes\":[\"out\"]}}", - Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); + Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); // Invalid XContent fails RuntimeException e = expectThrows(RuntimeException.class, - () -> initialSearch(searchRequest, new BytesArray("{}, \"trailing\": {}"), remoteVersion)); + () -> initialSearch(searchRequest, new BytesArray("{}, \"trailing\": {}"), remoteVersion)); assertThat(e.getCause().getMessage(), containsString("Unexpected character (',' (code 44))")); e = expectThrows(RuntimeException.class, () -> initialSearch(searchRequest, new BytesArray("{"), remoteVersion)); assertThat(e.getCause().getMessage(), containsString("Unexpected end-of-input")); From 26a508bb17b53cf8c06a796aeefa27815de18301 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 29 May 2019 15:34:52 -0700 Subject: [PATCH 180/224] Fix inverted condition so we never cache rest integ tests --- .../org/elasticsearch/gradle/test/RestIntegTestTask.groovy | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 9564bde038b90..ee62e915c5fb5 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -26,6 +26,7 @@ import org.gradle.api.Task import org.gradle.api.execution.TaskExecutionAdapter import org.gradle.api.logging.Logger import org.gradle.api.logging.Logging +import org.gradle.api.specs.Specs import org.gradle.api.tasks.Copy import org.gradle.api.tasks.Input import org.gradle.api.tasks.TaskState @@ -79,10 +80,7 @@ class RestIntegTestTask extends DefaultTask { // disable the build cache for rest test tasks // there are a number of inputs we aren't properly tracking here so we'll just not cache these for now - runner.getOutputs().doNotCacheIf( - "Caching is disabled for REST integration tests", - { false } - ); + runner.getOutputs().doNotCacheIf("Caching is disabled for REST integration tests", Specs.SATISFIES_ALL) // override/add more for rest tests runner.maxParallelForks = 1 From 45873b15a66d8a2d2dccd97bfef8a854cdc7f1cc Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 29 May 2019 15:35:26 -0700 Subject: [PATCH 181/224] Remove unused import --- .../org/elasticsearch/gradle/test/RestIntegTestTask.groovy | 1 - 1 file changed, 1 deletion(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index ee62e915c5fb5..43e17eb73b96c 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -33,7 +33,6 @@ import org.gradle.api.tasks.TaskState import org.gradle.api.tasks.options.Option import org.gradle.api.tasks.testing.Test import org.gradle.plugins.ide.idea.IdeaPlugin -import org.gradle.process.CommandLineArgumentProvider import java.nio.charset.StandardCharsets import java.nio.file.Files From 28ad74f8892530f500e1283a9c5852b666774f0f Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Wed, 29 May 2019 20:05:12 -0400 Subject: [PATCH 182/224] Geo: Refactor libs/geo parsers (#42549) Refactors the WKT and GeoJSON parsers from an utility class into an instantiatable objects. This is a preliminary step in preparation for moving out coordinate validators from Geometry constructors. This should allow us to make validators plugable. --- .../geo/utils/WellKnownText.java | 10 ++-- .../geo/geometry/BaseGeometryTestCase.java | 5 +- .../geo/geometry/CircleTests.java | 13 +++--- .../geo/geometry/GeometryCollectionTests.java | 9 ++-- .../elasticsearch/geo/geometry/LineTests.java | 13 +++--- .../geo/geometry/LinearRingTests.java | 2 +- .../geo/geometry/MultiLineTests.java | 9 ++-- .../geo/geometry/MultiPointTests.java | 17 +++---- .../geo/geometry/MultiPolygonTests.java | 9 ++-- .../geo/geometry/PointTests.java | 13 +++--- .../geo/geometry/PolygonTests.java | 13 +++--- .../geo/geometry/RectangleTests.java | 9 ++-- .../org/elasticsearch/common/geo/GeoJson.java | 30 +++++------- .../common/geo/GeometryParser.java | 12 +++-- .../common/geo/BaseGeoParsingTestCase.java | 2 +- .../common/geo/GeoJsonParserTests.java | 46 +++++++++---------- .../common/geo/GeoJsonSerializationTests.java | 3 +- .../common/geo/GeometryParserTests.java | 16 +++---- .../xpack/sql/jdbc/TypeConverter.java | 4 +- .../xpack/sql/qa/jdbc/JdbcAssert.java | 4 +- .../function/scalar/geo/GeoShape.java | 12 +++-- 21 files changed, 136 insertions(+), 115 deletions(-) diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/utils/WellKnownText.java b/libs/geo/src/main/java/org/elasticsearch/geo/utils/WellKnownText.java index e1af54e3383e0..c489c26e8bca4 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geo/utils/WellKnownText.java +++ b/libs/geo/src/main/java/org/elasticsearch/geo/utils/WellKnownText.java @@ -56,13 +56,17 @@ public class WellKnownText { private static final String EOF = "END-OF-STREAM"; private static final String EOL = "END-OF-LINE"; - public static String toWKT(Geometry geometry) { + public WellKnownText() { + + } + + public String toWKT(Geometry geometry) { StringBuilder builder = new StringBuilder(); toWKT(geometry, builder); return builder.toString(); } - public static void toWKT(Geometry geometry, StringBuilder sb) { + public void toWKT(Geometry geometry, StringBuilder sb) { sb.append(getWKTName(geometry)); sb.append(SPACE); if (geometry.isEmpty()) { @@ -216,7 +220,7 @@ public Void visit(Rectangle rectangle) { } } - public static Geometry fromWKT(String wkt) throws IOException, ParseException { + public Geometry fromWKT(String wkt) throws IOException, ParseException { StringReader reader = new StringReader(wkt); try { // setup the tokenizer; configured to read words w/o numbers diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/BaseGeometryTestCase.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/BaseGeometryTestCase.java index cc7dcd340c734..6d965bb9fd47e 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/BaseGeometryTestCase.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/BaseGeometryTestCase.java @@ -53,9 +53,10 @@ protected Writeable.Reader instanceReader() { @SuppressWarnings("unchecked") @Override protected T copyInstance(T instance, Version version) throws IOException { - String text = WellKnownText.toWKT(instance); + WellKnownText wkt = new WellKnownText(); + String text = wkt.toWKT(instance); try { - return (T) WellKnownText.fromWKT(text); + return (T) wkt.fromWKT(text); } catch (ParseException e) { throw new ElasticsearchException(e); } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/CircleTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/CircleTests.java index a38a29af24b54..e451c9b0b28b8 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/CircleTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/CircleTests.java @@ -36,14 +36,15 @@ protected Circle createTestInstance(boolean hasAlt) { } public void testBasicSerialization() throws IOException, ParseException { - assertEquals("circle (20.0 10.0 15.0)", WellKnownText.toWKT(new Circle(10, 20, 15))); - assertEquals(new Circle(10, 20, 15), WellKnownText.fromWKT("circle (20.0 10.0 15.0)")); + WellKnownText wkt = new WellKnownText(); + assertEquals("circle (20.0 10.0 15.0)", wkt.toWKT(new Circle(10, 20, 15))); + assertEquals(new Circle(10, 20, 15), wkt.fromWKT("circle (20.0 10.0 15.0)")); - assertEquals("circle (20.0 10.0 15.0 25.0)", WellKnownText.toWKT(new Circle(10, 20, 25, 15))); - assertEquals(new Circle(10, 20, 25, 15), WellKnownText.fromWKT("circle (20.0 10.0 15.0 25.0)")); + assertEquals("circle (20.0 10.0 15.0 25.0)", wkt.toWKT(new Circle(10, 20, 25, 15))); + assertEquals(new Circle(10, 20, 25, 15), wkt.fromWKT("circle (20.0 10.0 15.0 25.0)")); - assertEquals("circle EMPTY", WellKnownText.toWKT(Circle.EMPTY)); - assertEquals(Circle.EMPTY, WellKnownText.fromWKT("circle EMPTY)")); + assertEquals("circle EMPTY", wkt.toWKT(Circle.EMPTY)); + assertEquals(Circle.EMPTY, wkt.fromWKT("circle EMPTY)")); } public void testInitValidation() { diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryCollectionTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryCollectionTests.java index 8b6415270635e..3bf437ef5ae64 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryCollectionTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryCollectionTests.java @@ -35,14 +35,15 @@ protected GeometryCollection createTestInstance(boolean hasAlt) { public void testBasicSerialization() throws IOException, ParseException { + WellKnownText wkt = new WellKnownText(); assertEquals("geometrycollection (point (20.0 10.0),point EMPTY)", - WellKnownText.toWKT(new GeometryCollection(Arrays.asList(new Point(10, 20), Point.EMPTY)))); + wkt.toWKT(new GeometryCollection(Arrays.asList(new Point(10, 20), Point.EMPTY)))); assertEquals(new GeometryCollection(Arrays.asList(new Point(10, 20), Point.EMPTY)), - WellKnownText.fromWKT("geometrycollection (point (20.0 10.0),point EMPTY)")); + wkt.fromWKT("geometrycollection (point (20.0 10.0),point EMPTY)")); - assertEquals("geometrycollection EMPTY", WellKnownText.toWKT(GeometryCollection.EMPTY)); - assertEquals(GeometryCollection.EMPTY, WellKnownText.fromWKT("geometrycollection EMPTY)")); + assertEquals("geometrycollection EMPTY", wkt.toWKT(GeometryCollection.EMPTY)); + assertEquals(GeometryCollection.EMPTY, wkt.fromWKT("geometrycollection EMPTY)")); } @SuppressWarnings("ConstantConditions") diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LineTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LineTests.java index 7156039f9085f..48e6cb8ea11c9 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LineTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LineTests.java @@ -31,16 +31,17 @@ protected Line createTestInstance(boolean hasAlt) { } public void testBasicSerialization() throws IOException, ParseException { - assertEquals("linestring (3.0 1.0, 4.0 2.0)", WellKnownText.toWKT(new Line(new double[]{1, 2}, new double[]{3, 4}))); - assertEquals(new Line(new double[]{1, 2}, new double[]{3, 4}), WellKnownText.fromWKT("linestring (3 1, 4 2)")); + WellKnownText wkt = new WellKnownText(); + assertEquals("linestring (3.0 1.0, 4.0 2.0)", wkt.toWKT(new Line(new double[]{1, 2}, new double[]{3, 4}))); + assertEquals(new Line(new double[]{1, 2}, new double[]{3, 4}), wkt.fromWKT("linestring (3 1, 4 2)")); - assertEquals("linestring (3.0 1.0 5.0, 4.0 2.0 6.0)", WellKnownText.toWKT(new Line(new double[]{1, 2}, new double[]{3, 4}, + assertEquals("linestring (3.0 1.0 5.0, 4.0 2.0 6.0)", wkt.toWKT(new Line(new double[]{1, 2}, new double[]{3, 4}, new double[]{5, 6}))); assertEquals(new Line(new double[]{1, 2}, new double[]{3, 4}, new double[]{6, 5}), - WellKnownText.fromWKT("linestring (3 1 6, 4 2 5)")); + wkt.fromWKT("linestring (3 1 6, 4 2 5)")); - assertEquals("linestring EMPTY", WellKnownText.toWKT(Line.EMPTY)); - assertEquals(Line.EMPTY, WellKnownText.fromWKT("linestring EMPTY)")); + assertEquals("linestring EMPTY", wkt.toWKT(Line.EMPTY)); + assertEquals(Line.EMPTY, wkt.fromWKT("linestring EMPTY)")); } public void testInitValidation() { diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LinearRingTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LinearRingTests.java index d5b708f558cac..57c99f3e587f9 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LinearRingTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LinearRingTests.java @@ -26,7 +26,7 @@ public class LinearRingTests extends ESTestCase { public void testBasicSerialization() { UnsupportedOperationException ex = expectThrows(UnsupportedOperationException.class, - () -> WellKnownText.toWKT(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3}))); + () -> new WellKnownText().toWKT(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3}))); assertEquals("line ring cannot be serialized using WKT", ex.getMessage()); } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiLineTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiLineTests.java index 3fcb84d93b7d2..9e913ede35130 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiLineTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiLineTests.java @@ -40,12 +40,13 @@ protected MultiLine createTestInstance(boolean hasAlt) { } public void testBasicSerialization() throws IOException, ParseException { - assertEquals("multilinestring ((3.0 1.0, 4.0 2.0))", WellKnownText.toWKT( + WellKnownText wkt = new WellKnownText(); + assertEquals("multilinestring ((3.0 1.0, 4.0 2.0))", wkt.toWKT( new MultiLine(Collections.singletonList(new Line(new double[]{1, 2}, new double[]{3, 4}))))); assertEquals(new MultiLine(Collections.singletonList(new Line(new double[]{1, 2}, new double[]{3, 4}))), - WellKnownText.fromWKT("multilinestring ((3 1, 4 2))")); + wkt.fromWKT("multilinestring ((3 1, 4 2))")); - assertEquals("multilinestring EMPTY", WellKnownText.toWKT(MultiLine.EMPTY)); - assertEquals(MultiLine.EMPTY, WellKnownText.fromWKT("multilinestring EMPTY)")); + assertEquals("multilinestring EMPTY", wkt.toWKT(MultiLine.EMPTY)); + assertEquals(MultiLine.EMPTY, wkt.fromWKT("multilinestring EMPTY)")); } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPointTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPointTests.java index ecdcc0815a82b..cd51a00d9486d 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPointTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPointTests.java @@ -41,22 +41,23 @@ protected MultiPoint createTestInstance(boolean hasAlt) { } public void testBasicSerialization() throws IOException, ParseException { - assertEquals("multipoint (2.0 1.0)", WellKnownText.toWKT( + WellKnownText wkt = new WellKnownText(); + assertEquals("multipoint (2.0 1.0)", wkt.toWKT( new MultiPoint(Collections.singletonList(new Point(1, 2))))); assertEquals(new MultiPoint(Collections.singletonList(new Point(1 ,2))), - WellKnownText.fromWKT("multipoint (2 1)")); + wkt.fromWKT("multipoint (2 1)")); assertEquals("multipoint (2.0 1.0, 3.0 4.0)", - WellKnownText.toWKT(new MultiPoint(Arrays.asList(new Point(1, 2), new Point(4, 3))))); + wkt.toWKT(new MultiPoint(Arrays.asList(new Point(1, 2), new Point(4, 3))))); assertEquals(new MultiPoint(Arrays.asList(new Point(1, 2), new Point(4, 3))), - WellKnownText.fromWKT("multipoint (2 1, 3 4)")); + wkt.fromWKT("multipoint (2 1, 3 4)")); assertEquals("multipoint (2.0 1.0 10.0, 3.0 4.0 20.0)", - WellKnownText.toWKT(new MultiPoint(Arrays.asList(new Point(1, 2, 10), new Point(4, 3, 20))))); + wkt.toWKT(new MultiPoint(Arrays.asList(new Point(1, 2, 10), new Point(4, 3, 20))))); assertEquals(new MultiPoint(Arrays.asList(new Point(1, 2, 10), new Point(4, 3, 20))), - WellKnownText.fromWKT("multipoint (2 1 10, 3 4 20)")); + wkt.fromWKT("multipoint (2 1 10, 3 4 20)")); - assertEquals("multipoint EMPTY", WellKnownText.toWKT(MultiPoint.EMPTY)); - assertEquals(MultiPoint.EMPTY, WellKnownText.fromWKT("multipoint EMPTY)")); + assertEquals("multipoint EMPTY", wkt.toWKT(MultiPoint.EMPTY)); + assertEquals(MultiPoint.EMPTY, wkt.fromWKT("multipoint EMPTY)")); } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPolygonTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPolygonTests.java index 1b82cfd3871a5..8b344f0422936 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPolygonTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPolygonTests.java @@ -40,14 +40,15 @@ protected MultiPolygon createTestInstance(boolean hasAlt) { } public void testBasicSerialization() throws IOException, ParseException { + WellKnownText wkt = new WellKnownText(); assertEquals("multipolygon (((3.0 1.0, 4.0 2.0, 5.0 3.0, 3.0 1.0)))", - WellKnownText.toWKT(new MultiPolygon(Collections.singletonList( + wkt.toWKT(new MultiPolygon(Collections.singletonList( new Polygon(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3})))))); assertEquals(new MultiPolygon(Collections.singletonList( new Polygon(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3})))), - WellKnownText.fromWKT("multipolygon (((3.0 1.0, 4.0 2.0, 5.0 3.0, 3.0 1.0)))")); + wkt.fromWKT("multipolygon (((3.0 1.0, 4.0 2.0, 5.0 3.0, 3.0 1.0)))")); - assertEquals("multipolygon EMPTY", WellKnownText.toWKT(MultiPolygon.EMPTY)); - assertEquals(MultiPolygon.EMPTY, WellKnownText.fromWKT("multipolygon EMPTY)")); + assertEquals("multipolygon EMPTY", wkt.toWKT(MultiPolygon.EMPTY)); + assertEquals(MultiPolygon.EMPTY, wkt.fromWKT("multipolygon EMPTY)")); } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PointTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PointTests.java index d480f44e30a44..5bb776603da15 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PointTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PointTests.java @@ -31,14 +31,15 @@ protected Point createTestInstance(boolean hasAlt) { } public void testBasicSerialization() throws IOException, ParseException { - assertEquals("point (20.0 10.0)", WellKnownText.toWKT(new Point(10, 20))); - assertEquals(new Point(10, 20), WellKnownText.fromWKT("point (20.0 10.0)")); + WellKnownText wkt = new WellKnownText(); + assertEquals("point (20.0 10.0)", wkt.toWKT(new Point(10, 20))); + assertEquals(new Point(10, 20), wkt.fromWKT("point (20.0 10.0)")); - assertEquals("point (20.0 10.0 100.0)", WellKnownText.toWKT(new Point(10, 20, 100))); - assertEquals(new Point(10, 20, 100), WellKnownText.fromWKT("point (20.0 10.0 100.0)")); + assertEquals("point (20.0 10.0 100.0)", wkt.toWKT(new Point(10, 20, 100))); + assertEquals(new Point(10, 20, 100), wkt.fromWKT("point (20.0 10.0 100.0)")); - assertEquals("point EMPTY", WellKnownText.toWKT(Point.EMPTY)); - assertEquals(Point.EMPTY, WellKnownText.fromWKT("point EMPTY)")); + assertEquals("point EMPTY", wkt.toWKT(Point.EMPTY)); + assertEquals(Point.EMPTY, wkt.fromWKT("point EMPTY)")); } public void testInitValidation() { diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PolygonTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PolygonTests.java index fd3032435891c..ec80dee7940c4 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PolygonTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PolygonTests.java @@ -32,18 +32,19 @@ protected Polygon createTestInstance(boolean hasAlt) { } public void testBasicSerialization() throws IOException, ParseException { + WellKnownText wkt = new WellKnownText(); assertEquals("polygon ((3.0 1.0, 4.0 2.0, 5.0 3.0, 3.0 1.0))", - WellKnownText.toWKT(new Polygon(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3})))); + wkt.toWKT(new Polygon(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3})))); assertEquals(new Polygon(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3})), - WellKnownText.fromWKT("polygon ((3 1, 4 2, 5 3, 3 1))")); + wkt.fromWKT("polygon ((3 1, 4 2, 5 3, 3 1))")); assertEquals("polygon ((3.0 1.0 5.0, 4.0 2.0 4.0, 5.0 3.0 3.0, 3.0 1.0 5.0))", - WellKnownText.toWKT(new Polygon(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3}, new double[]{5, 4, 3, 5})))); + wkt.toWKT(new Polygon(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3}, new double[]{5, 4, 3, 5})))); assertEquals(new Polygon(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3}, new double[]{5, 4, 3, 5})), - WellKnownText.fromWKT("polygon ((3 1 5, 4 2 4, 5 3 3, 3 1 5))")); + wkt.fromWKT("polygon ((3 1 5, 4 2 4, 5 3 3, 3 1 5))")); - assertEquals("polygon EMPTY", WellKnownText.toWKT(Polygon.EMPTY)); - assertEquals(Polygon.EMPTY, WellKnownText.fromWKT("polygon EMPTY)")); + assertEquals("polygon EMPTY", wkt.toWKT(Polygon.EMPTY)); + assertEquals(Polygon.EMPTY, wkt.fromWKT("polygon EMPTY)")); } public void testInitValidation() { diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/RectangleTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/RectangleTests.java index 14cb777f94bb9..75ea3dd809b0c 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/RectangleTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/RectangleTests.java @@ -32,11 +32,12 @@ protected Rectangle createTestInstance(boolean hasAlt) { } public void testBasicSerialization() throws IOException, ParseException { - assertEquals("bbox (10.0, 20.0, 40.0, 30.0)", WellKnownText.toWKT(new Rectangle(30, 40, 10, 20))); - assertEquals(new Rectangle(30, 40, 10, 20), WellKnownText.fromWKT("bbox (10.0, 20.0, 40.0, 30.0)")); + WellKnownText wkt = new WellKnownText(); + assertEquals("bbox (10.0, 20.0, 40.0, 30.0)", wkt.toWKT(new Rectangle(30, 40, 10, 20))); + assertEquals(new Rectangle(30, 40, 10, 20), wkt.fromWKT("bbox (10.0, 20.0, 40.0, 30.0)")); - assertEquals("bbox EMPTY", WellKnownText.toWKT(Rectangle.EMPTY)); - assertEquals(Rectangle.EMPTY, WellKnownText.fromWKT("bbox EMPTY)")); + assertEquals("bbox EMPTY", wkt.toWKT(Rectangle.EMPTY)); + assertEquals(Rectangle.EMPTY, wkt.fromWKT("bbox EMPTY)")); } public void testInitValidation() { diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoJson.java b/server/src/main/java/org/elasticsearch/common/geo/GeoJson.java index 3489eca8b58e4..4fcb20d60b9df 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoJson.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoJson.java @@ -64,14 +64,20 @@ public final class GeoJson { private static final ParseField FIELD_ORIENTATION = new ParseField("orientation"); private static final ParseField FIELD_RADIUS = new ParseField("radius"); - private GeoJson() { - + private final boolean rightOrientation; + private final boolean coerce; + private final boolean ignoreZValue; + + public GeoJson(boolean rightOrientation, boolean coerce, boolean ignoreZValue) { + this.rightOrientation = rightOrientation; + this.coerce = coerce; + this.ignoreZValue = ignoreZValue; } - public static Geometry fromXContent(XContentParser parser, boolean rightOrientation, boolean coerce, boolean ignoreZValue) + public Geometry fromXContent(XContentParser parser) throws IOException { try (XContentSubParser subParser = new XContentSubParser(parser)) { - return PARSER.apply(subParser, new ParserContext(rightOrientation, coerce, ignoreZValue)); + return PARSER.apply(subParser, this); } } @@ -197,26 +203,14 @@ private XContentBuilder coordinatesToXContent(Polygon polygon) throws IOExceptio return builder.endObject(); } - private static class ParserContext { - public final boolean defaultOrientation; - public final boolean coerce; - public final boolean ignoreZValue; - - ParserContext(boolean defaultOrientation, boolean coerce, boolean ignoreZValue) { - this.defaultOrientation = defaultOrientation; - this.coerce = coerce; - this.ignoreZValue = ignoreZValue; - } - } - - private static ConstructingObjectParser PARSER = + private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>("geojson", true, (a, c) -> { String type = (String) a[0]; CoordinateNode coordinates = (CoordinateNode) a[1]; @SuppressWarnings("unchecked") List geometries = (List) a[2]; Boolean orientation = orientationFromString((String) a[3]); DistanceUnit.Distance radius = (DistanceUnit.Distance) a[4]; - return createGeometry(type, geometries, coordinates, orientation, c.defaultOrientation, c.coerce, radius); + return createGeometry(type, geometries, coordinates, orientation, c.rightOrientation, c.coerce, radius); }); static { diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeometryParser.java b/server/src/main/java/org/elasticsearch/common/geo/GeometryParser.java index 8e1db18ccdd97..6308deb084ea1 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeometryParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeometryParser.java @@ -32,22 +32,26 @@ */ public final class GeometryParser { - private GeometryParser() { + private final GeoJson geoJsonParser; + private final WellKnownText wellKnownTextParser; + public GeometryParser(boolean rightOrientation, boolean coerce, boolean ignoreZValue) { + geoJsonParser = new GeoJson(rightOrientation, coerce, ignoreZValue); + wellKnownTextParser = new WellKnownText(); } /** * Parses supplied XContent into Geometry */ - public static Geometry parse(XContentParser parser, boolean orientation, boolean coerce, boolean ignoreZValue) throws IOException, + public Geometry parse(XContentParser parser) throws IOException, ParseException { if (parser.currentToken() == XContentParser.Token.VALUE_NULL) { return null; } else if (parser.currentToken() == XContentParser.Token.START_OBJECT) { - return GeoJson.fromXContent(parser, orientation, coerce, ignoreZValue); + return geoJsonParser.fromXContent(parser); } else if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { // TODO: Add support for ignoreZValue and coerce to WKT - return WellKnownText.fromWKT(parser.text()); + return wellKnownTextParser.fromWKT(parser.text()); } throw new ElasticsearchParseException("shape must be an object consisting of type and coordinates"); } diff --git a/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java b/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java index 7f6c56855ec70..d19f7934c6567 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java @@ -70,7 +70,7 @@ protected void assertGeometryEquals(Object expected, XContentBuilder geoJson, bo protected void assertGeometryEquals(org.elasticsearch.geo.geometry.Geometry expected, XContentBuilder geoJson) throws IOException { try (XContentParser parser = createParser(geoJson)) { parser.nextToken(); - assertEquals(expected, GeoJson.fromXContent(parser, true, false, false)); + assertEquals(expected, new GeoJson(true, false, false).fromXContent(parser)); } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonParserTests.java index 4e2c2e50e2cf1..e095c7e381a82 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonParserTests.java @@ -72,7 +72,7 @@ public void testParseLineString() throws IOException { Line expected = new Line(new double[] {0.0, 1.0}, new double[] { 100.0, 101.0}); try (XContentParser parser = createParser(lineGeoJson)) { parser.nextToken(); - assertEquals(expected, GeoJson.fromXContent(parser, false, false, true)); + assertEquals(expected, new GeoJson(false, false, true).fromXContent(parser)); } } @@ -124,7 +124,7 @@ public void testParseMultiDimensionShapes() throws IOException { try (XContentParser parser = createParser(pointGeoJson)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, false, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(false, false, false).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -140,7 +140,7 @@ public void testParseMultiDimensionShapes() throws IOException { try (XContentParser parser = createParser(lineGeoJson)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, false, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(false, false, false).fromXContent(parser)); assertNull(parser.nextToken()); } } @@ -178,7 +178,7 @@ public void testParseEnvelope() throws IOException { .endObject(); try (XContentParser parser = createParser(multilinesGeoJson)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, false, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(false, false, false).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -189,7 +189,7 @@ public void testParseEnvelope() throws IOException { .endObject(); try (XContentParser parser = createParser(multilinesGeoJson)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, false, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(false, false, false).fromXContent(parser)); assertNull(parser.nextToken()); } } @@ -239,7 +239,7 @@ public void testParse3DPolygon() throws IOException { )); try (XContentParser parser = createParser(polygonGeoJson)) { parser.nextToken(); - assertEquals(expected, GeoJson.fromXContent(parser, true, false, true)); + assertEquals(expected, new GeoJson(true, false, true).fromXContent(parser)); } } @@ -259,7 +259,7 @@ public void testInvalidDimensionalPolygon() throws IOException { .endObject(); try (XContentParser parser = createParser(polygonGeoJson)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, true, false, true)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, true).fromXContent(parser)); assertNull(parser.nextToken()); } } @@ -275,7 +275,7 @@ public void testParseInvalidPoint() throws IOException { .endObject(); try (XContentParser parser = createParser(invalidPoint1)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, true, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -288,7 +288,7 @@ public void testParseInvalidPoint() throws IOException { .endObject(); try (XContentParser parser = createParser(invalidPoint2)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, true, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); assertNull(parser.nextToken()); } } @@ -302,7 +302,7 @@ public void testParseInvalidMultipoint() throws IOException { .endObject(); try (XContentParser parser = createParser(invalidMultipoint1)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, true, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -315,7 +315,7 @@ public void testParseInvalidMultipoint() throws IOException { .endObject(); try (XContentParser parser = createParser(invalidMultipoint2)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, true, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -329,7 +329,7 @@ public void testParseInvalidMultipoint() throws IOException { .endObject(); try (XContentParser parser = createParser(invalidMultipoint3)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, true, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); assertNull(parser.nextToken()); } } @@ -370,7 +370,7 @@ public void testParseInvalidDimensionalMultiPolygon() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, multiPolygonGeoJson)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, true, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); assertNull(parser.nextToken()); } } @@ -391,7 +391,7 @@ public void testParseInvalidPolygon() throws IOException { .endObject()); try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, true, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -406,7 +406,7 @@ public void testParseInvalidPolygon() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, true, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -421,7 +421,7 @@ public void testParseInvalidPolygon() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, true, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -436,7 +436,7 @@ public void testParseInvalidPolygon() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, true, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -449,7 +449,7 @@ public void testParseInvalidPolygon() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, true, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -460,7 +460,7 @@ public void testParseInvalidPolygon() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, true, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -473,7 +473,7 @@ public void testParseInvalidPolygon() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, true, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); assertNull(parser.nextToken()); } } @@ -710,7 +710,7 @@ public void testParseInvalidShapes() throws IOException { try (XContentParser parser = createParser(tooLittlePointGeoJson)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, true, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -723,7 +723,7 @@ public void testParseInvalidShapes() throws IOException { try (XContentParser parser = createParser(emptyPointGeoJson)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, true, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); assertNull(parser.nextToken()); } } @@ -749,7 +749,7 @@ public void testParseInvalidGeometryCollectionShapes() throws IOException { parser.nextToken(); // foo parser.nextToken(); // start object parser.nextToken(); // start object - expectThrows(XContentParseException.class, () -> GeoJson.fromXContent(parser, true, false, false)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); // end of the document assertNull(parser.nextToken()); // no more elements afterwards } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonSerializationTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonSerializationTests.java index ab6e3242654f5..b0ee969119c13 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonSerializationTests.java @@ -49,6 +49,7 @@ public class GeoJsonSerializationTests extends ESTestCase { private static class GeometryWrapper implements ToXContentObject { private Geometry geometry; + private static GeoJson PARSER = new GeoJson(true, false, true); GeometryWrapper(Geometry geometry) { this.geometry = geometry; @@ -61,7 +62,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public static GeometryWrapper fromXContent(XContentParser parser) throws IOException { parser.nextToken(); - return new GeometryWrapper(GeoJson.fromXContent(parser, true, false, true)); + return new GeometryWrapper(PARSER.fromXContent(parser)); } @Override diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeometryParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeometryParserTests.java index 24ba7780cefd6..0d2b182741fc8 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeometryParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeometryParserTests.java @@ -44,7 +44,7 @@ public void testGeoJsonParsing() throws Exception { try (XContentParser parser = createParser(pointGeoJson)) { parser.nextToken(); - assertEquals(new Point(0, 100), GeometryParser.parse(parser, true, randomBoolean(), randomBoolean())); + assertEquals(new Point(0, 100), new GeometryParser(true, randomBoolean(), randomBoolean()).parse(parser)); } XContentBuilder pointGeoJsonWithZ = XContentFactory.jsonBuilder() @@ -55,13 +55,13 @@ public void testGeoJsonParsing() throws Exception { try (XContentParser parser = createParser(pointGeoJsonWithZ)) { parser.nextToken(); - assertEquals(new Point(0, 100, 10.0), GeometryParser.parse(parser, true, randomBoolean(), true)); + assertEquals(new Point(0, 100, 10.0), new GeometryParser(true, randomBoolean(), true).parse(parser)); } try (XContentParser parser = createParser(pointGeoJsonWithZ)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> GeometryParser.parse(parser, true, randomBoolean(), false)); + expectThrows(XContentParseException.class, () -> new GeometryParser(true, randomBoolean(), false).parse(parser)); } XContentBuilder polygonGeoJson = XContentFactory.jsonBuilder() @@ -81,13 +81,13 @@ public void testGeoJsonParsing() throws Exception { try (XContentParser parser = createParser(polygonGeoJson)) { parser.nextToken(); // Coerce should automatically close the polygon - assertEquals(p, GeometryParser.parse(parser, true, true, randomBoolean())); + assertEquals(p, new GeometryParser(true, true, randomBoolean()).parse(parser)); } try (XContentParser parser = createParser(polygonGeoJson)) { parser.nextToken(); // No coerce - the polygon parsing should fail - expectThrows(XContentParseException.class, () -> GeometryParser.parse(parser, true, false, randomBoolean())); + expectThrows(XContentParseException.class, () -> new GeometryParser(true, false, randomBoolean()).parse(parser)); } } @@ -101,7 +101,7 @@ public void testWKTParsing() throws Exception { parser.nextToken(); // Start object parser.nextToken(); // Field Name parser.nextToken(); // Field Value - assertEquals(new Point(0, 100), GeometryParser.parse(parser, true, randomBoolean(), randomBoolean())); + assertEquals(new Point(0, 100), new GeometryParser(true, randomBoolean(), randomBoolean()).parse(parser)); } } @@ -115,7 +115,7 @@ public void testNullParsing() throws Exception { parser.nextToken(); // Start object parser.nextToken(); // Field Name parser.nextToken(); // Field Value - assertNull(GeometryParser.parse(parser, true, randomBoolean(), randomBoolean())); + assertNull(new GeometryParser(true, randomBoolean(), randomBoolean()).parse(parser)); } } @@ -130,7 +130,7 @@ public void testUnsupportedValueParsing() throws Exception { parser.nextToken(); // Field Name parser.nextToken(); // Field Value ElasticsearchParseException ex = expectThrows(ElasticsearchParseException.class, - () -> GeometryParser.parse(parser, true, randomBoolean(), randomBoolean())); + () -> new GeometryParser(true, randomBoolean(), randomBoolean()).parse(parser)); assertEquals("shape must be an object consisting of type and coordinates", ex.getMessage()); } } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java index 7e21f2206b1e9..339051577923a 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java @@ -54,6 +54,8 @@ */ final class TypeConverter { + private static WellKnownText WKT = new WellKnownText(); + private TypeConverter() {} /** @@ -246,7 +248,7 @@ static Object convert(Object v, EsType columnType, String typeString) throws SQL case GEO_POINT: case GEO_SHAPE: try { - return WellKnownText.fromWKT(v.toString()); + return WKT.fromWKT(v.toString()); } catch (IOException | ParseException ex) { throw new SQLException("Cannot parse geo_shape", ex); } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java index 76894fc5a53d5..dbf4e4b4d8e14 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java @@ -51,6 +51,8 @@ public class JdbcAssert { private static final IntObjectHashMap SQL_TO_TYPE = new IntObjectHashMap<>(); + private static final WellKnownText WKT = new WellKnownText(); + static { for (EsType type : EsType.values()) { SQL_TO_TYPE.putIfAbsent(type.getVendorTypeNumber().intValue(), type); @@ -270,7 +272,7 @@ else if (type == Types.DOUBLE) { if (actualObject instanceof Geometry) { // We need to convert the expected object to libs/geo Geometry for comparision try { - expectedObject = WellKnownText.fromWKT(expectedObject.toString()); + expectedObject = WKT.fromWKT(expectedObject.toString()); } catch (IOException | ParseException ex) { fail(ex.getMessage()); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java index 74b5c9646b853..da948cb740306 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java @@ -49,6 +49,10 @@ public class GeoShape implements ToXContentFragment, NamedWriteable { private final Geometry shape; + private static final GeometryParser GEOMETRY_PARSER = new GeometryParser(true, true, true); + + private static final WellKnownText WKT_PARSER = new WellKnownText(); + public GeoShape(double lon, double lat) { shape = new Point(lat, lon); } @@ -72,17 +76,17 @@ public GeoShape(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(WellKnownText.toWKT(shape)); + out.writeString(WKT_PARSER.toWKT(shape)); } @Override public String toString() { - return WellKnownText.toWKT(shape); + return WKT_PARSER.toWKT(shape); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.value(WellKnownText.toWKT(shape)); + return builder.value(WKT_PARSER.toWKT(shape)); } public Geometry toGeometry() { @@ -216,7 +220,7 @@ private static Geometry parse(Object value) throws IOException, ParseException { parser.nextToken(); // start object parser.nextToken(); // field name parser.nextToken(); // field value - return GeometryParser.parse(parser, true, true, true); + return GEOMETRY_PARSER.parse(parser); } } } From f32f7d87c7f63f0fdd2874e800ebd4f63a7fc368 Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Thu, 30 May 2019 14:21:50 +1000 Subject: [PATCH 183/224] Detect when security index is closed (#42191) If the security index is closed, it should be treated as unavailable for security purposes. Prior to 8.0 (or in a mixed cluster) a closed security index has no routing data, which would cause a NPE in the cluster change handler, and the index state would not be updated correctly. This commit fixese that problem --- .../support/SecurityIndexManager.java | 77 ++++++++++++------- .../authc/AuthenticationServiceTests.java | 6 +- .../authc/esnative/NativeRealmTests.java | 6 +- .../mapper/NativeRoleMappingStoreTests.java | 18 +++-- .../authz/store/CompositeRolesStoreTests.java | 17 ++-- .../support/SecurityIndexManagerTests.java | 56 +++++++++++--- 6 files changed, 119 insertions(+), 61 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java index b62cb44ac028c..769465c3080a7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java @@ -45,8 +45,10 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.elasticsearch.xpack.core.template.TemplateUtils; @@ -168,9 +170,11 @@ public ElasticsearchException getUnavailableReason() { throw new IllegalStateException("caller must make sure to use a frozen state and check indexAvailable"); } - if (localState.indexExists()) { + if (localState.indexState == IndexMetaData.State.CLOSE) { + return new IndexClosedException(new Index(localState.concreteIndexName, ClusterState.UNKNOWN_UUID)); + } else if (localState.indexExists()) { return new UnavailableShardsException(null, - "at least one primary shard for the index [" + localState.concreteIndexName + "] is unavailable"); + "at least one primary shard for the index [" + localState.concreteIndexName + "] is unavailable"); } else { return new IndexNotFoundException(localState.concreteIndexName); } @@ -201,11 +205,24 @@ public void clusterChanged(ClusterChangedEvent event) { final boolean indexAvailable = checkIndexAvailable(event.state()); final boolean mappingIsUpToDate = indexMetaData == null || checkIndexMappingUpToDate(event.state()); final Version mappingVersion = oldestIndexMappingVersion(event.state()); - final ClusterHealthStatus indexStatus = indexMetaData == null ? null : - new ClusterIndexHealth(indexMetaData, event.state().getRoutingTable().index(indexMetaData.getIndex())).getStatus(); final String concreteIndexName = indexMetaData == null ? internalIndexName : indexMetaData.getIndex().getName(); + final ClusterHealthStatus indexHealth; + final IndexMetaData.State indexState; + if (indexMetaData == null) { + // Index does not exist + indexState = null; + indexHealth = null; + } else if (indexMetaData.getState() == IndexMetaData.State.CLOSE) { + indexState = IndexMetaData.State.CLOSE; + indexHealth = null; + logger.warn("Index [{}] is closed. This is likely to prevent security from functioning correctly", concreteIndexName); + } else { + indexState = IndexMetaData.State.OPEN; + final IndexRoutingTable routingTable = event.state().getRoutingTable().index(indexMetaData.getIndex()); + indexHealth = new ClusterIndexHealth(indexMetaData, routingTable).getStatus(); + } final State newState = new State(creationTime, isIndexUpToDate, indexAvailable, mappingIsUpToDate, mappingVersion, - concreteIndexName, indexStatus); + concreteIndexName, indexHealth, indexState); this.indexState = newState; if (newState.equals(previousState) == false) { @@ -216,23 +233,21 @@ public void clusterChanged(ClusterChangedEvent event) { } private boolean checkIndexAvailable(ClusterState state) { - final IndexRoutingTable routingTable = getIndexRoutingTable(state); - if (routingTable != null && routingTable.allPrimaryShardsActive()) { - return true; - } - logger.debug("Index [{}] is not yet active", aliasName); - return false; - } - - /** - * Returns the routing-table for this index, or null if the index does not exist. - */ - private IndexRoutingTable getIndexRoutingTable(ClusterState clusterState) { - IndexMetaData metaData = resolveConcreteIndex(aliasName, clusterState.metaData()); + IndexMetaData metaData = resolveConcreteIndex(aliasName, state.metaData()); if (metaData == null) { - return null; + logger.debug("Index [{}] is not available - no metadata", aliasName); + return false; + } + if (metaData.getState() == IndexMetaData.State.CLOSE) { + logger.warn("Index [{}] is closed", aliasName); + return false; + } + final IndexRoutingTable routingTable = state.routingTable().index(metaData.getIndex()); + if (routingTable == null || routingTable.allPrimaryShardsActive() == false) { + logger.debug("Index [{}] is not yet active", aliasName); + return false; } else { - return clusterState.routingTable().index(metaData.getIndex()); + return true; } } @@ -397,15 +412,15 @@ public void onFailure(Exception e) { * Return true if the state moves from an unhealthy ("RED") index state to a healthy ("non-RED") state. */ public static boolean isMoveFromRedToNonRed(State previousState, State currentState) { - return (previousState.indexStatus == null || previousState.indexStatus == ClusterHealthStatus.RED) - && currentState.indexStatus != null && currentState.indexStatus != ClusterHealthStatus.RED; + return (previousState.indexHealth == null || previousState.indexHealth == ClusterHealthStatus.RED) + && currentState.indexHealth != null && currentState.indexHealth != ClusterHealthStatus.RED; } /** * Return true if the state moves from the index existing to the index not existing. */ public static boolean isIndexDeleted(State previousState, State currentState) { - return previousState.indexStatus != null && currentState.indexStatus == null; + return previousState.indexHealth != null && currentState.indexHealth == null; } private static byte[] readTemplateAsBytes(String templateName) { @@ -435,24 +450,27 @@ private static Tuple parseMappingAndSettingsFromTemplateBytes( * State of the security index. */ public static class State { - public static final State UNRECOVERED_STATE = new State(null, false, false, false, null, null, null); + public static final State UNRECOVERED_STATE = new State(null, false, false, false, null, null, null, null); public final Instant creationTime; public final boolean isIndexUpToDate; public final boolean indexAvailable; public final boolean mappingUpToDate; public final Version mappingVersion; public final String concreteIndexName; - public final ClusterHealthStatus indexStatus; + public final ClusterHealthStatus indexHealth; + public final IndexMetaData.State indexState; public State(Instant creationTime, boolean isIndexUpToDate, boolean indexAvailable, - boolean mappingUpToDate, Version mappingVersion, String concreteIndexName, ClusterHealthStatus indexStatus) { + boolean mappingUpToDate, Version mappingVersion, String concreteIndexName, ClusterHealthStatus indexHealth, + IndexMetaData.State indexState) { this.creationTime = creationTime; this.isIndexUpToDate = isIndexUpToDate; this.indexAvailable = indexAvailable; this.mappingUpToDate = mappingUpToDate; this.mappingVersion = mappingVersion; this.concreteIndexName = concreteIndexName; - this.indexStatus = indexStatus; + this.indexHealth = indexHealth; + this.indexState = indexState; } @Override @@ -466,7 +484,8 @@ public boolean equals(Object o) { mappingUpToDate == state.mappingUpToDate && Objects.equals(mappingVersion, state.mappingVersion) && Objects.equals(concreteIndexName, state.concreteIndexName) && - indexStatus == state.indexStatus; + indexHealth == state.indexHealth && + indexState == state.indexState; } public boolean indexExists() { @@ -476,7 +495,7 @@ public boolean indexExists() { @Override public int hashCode() { return Objects.hash(creationTime, isIndexUpToDate, indexAvailable, mappingUpToDate, mappingVersion, concreteIndexName, - indexStatus); + indexHealth); } } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index d8a7d9447946b..cb13032508909 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.UUIDs; @@ -364,7 +365,7 @@ public void testCacheClearOnSecurityIndexChange() { // green to yellow or yellow to green previousState = dummyState(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW)); - currentState = dummyState(previousState.indexStatus == ClusterHealthStatus.GREEN ? + currentState = dummyState(previousState.indexHealth == ClusterHealthStatus.GREEN ? ClusterHealthStatus.YELLOW : ClusterHealthStatus.GREEN); service.onSecurityIndexStateChange(previousState, currentState); assertEquals(expectedInvalidation, service.getNumInvalidation()); @@ -1402,6 +1403,7 @@ private void setCompletedToTrue(AtomicBoolean completed) { } private SecurityIndexManager.State dummyState(ClusterHealthStatus indexStatus) { - return new SecurityIndexManager.State(Instant.now(), true, true, true, null, concreteSecurityIndexName, indexStatus); + return new SecurityIndexManager.State( + Instant.now(), true, true, true, null, concreteSecurityIndexName, indexStatus, IndexMetaData.State.OPEN); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java index 28625f20627e1..a8682c4e21d84 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.security.authc.esnative; import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.TestEnvironment; @@ -27,7 +28,8 @@ public class NativeRealmTests extends ESTestCase { RestrictedIndicesNames.INTERNAL_SECURITY_MAIN_INDEX_6, RestrictedIndicesNames.INTERNAL_SECURITY_MAIN_INDEX_7); private SecurityIndexManager.State dummyState(ClusterHealthStatus indexStatus) { - return new SecurityIndexManager.State(Instant.now(), true, true, true, null, concreteSecurityIndexName, indexStatus); + return new SecurityIndexManager.State( + Instant.now(), true, true, true, null, concreteSecurityIndexName, indexStatus, IndexMetaData.State.OPEN); } public void testCacheClearOnIndexHealthChange() { @@ -72,7 +74,7 @@ void clearCache() { // green to yellow or yellow to green previousState = dummyState(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW)); - currentState = dummyState(previousState.indexStatus == ClusterHealthStatus.GREEN ? + currentState = dummyState(previousState.indexHealth == ClusterHealthStatus.GREEN ? ClusterHealthStatus.YELLOW : ClusterHealthStatus.GREEN); nativeRealm.onSecurityIndexStateChange(previousState, currentState); assertEquals(expectedInvalidation, numInvalidation.get()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 3cca6cc4fd380..4dd1f13cf030f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -138,7 +139,12 @@ private String randomiseDn(String dn) { } private SecurityIndexManager.State dummyState(ClusterHealthStatus indexStatus) { - return new SecurityIndexManager.State(Instant.now(), true, true, true, null, concreteSecurityIndexName, indexStatus); + return indexState(true, indexStatus); + } + + private SecurityIndexManager.State indexState(boolean isUpToDate, ClusterHealthStatus healthStatus) { + return new SecurityIndexManager.State( + Instant.now(), isUpToDate, true, true, null, concreteSecurityIndexName, healthStatus, IndexMetaData.State.OPEN); } public void testCacheClearOnIndexHealthChange() { @@ -172,7 +178,7 @@ public void testCacheClearOnIndexHealthChange() { // green to yellow or yellow to green previousState = dummyState(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW)); - currentState = dummyState(previousState.indexStatus == ClusterHealthStatus.GREEN ? + currentState = dummyState(previousState.indexHealth == ClusterHealthStatus.GREEN ? ClusterHealthStatus.YELLOW : ClusterHealthStatus.GREEN); store.onSecurityIndexStateChange(previousState, currentState); assertEquals(expectedInvalidation, numInvalidation.get()); @@ -182,14 +188,10 @@ public void testCacheClearOnIndexOutOfDateChange() { final AtomicInteger numInvalidation = new AtomicInteger(0); final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation, true); - store.onSecurityIndexStateChange( - new SecurityIndexManager.State(Instant.now(), false, true, true, null, concreteSecurityIndexName, null), - new SecurityIndexManager.State(Instant.now(), true, true, true, null, concreteSecurityIndexName, null)); + store.onSecurityIndexStateChange(indexState(false, null), indexState(true, null)); assertEquals(1, numInvalidation.get()); - store.onSecurityIndexStateChange( - new SecurityIndexManager.State(Instant.now(), true, true, true, null, concreteSecurityIndexName, null), - new SecurityIndexManager.State(Instant.now(), false, true, true, null, concreteSecurityIndexName, null)); + store.onSecurityIndexStateChange(indexState(true, null), indexState(false, null)); assertEquals(2, numInvalidation.get()); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index a39545f3a9b3a..b4e0a6a22cf81 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -763,7 +763,12 @@ Settings.EMPTY, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(Nativ } private SecurityIndexManager.State dummyState(ClusterHealthStatus indexStatus) { - return new SecurityIndexManager.State(Instant.now(), true, true, true, null, concreteSecurityIndexName, indexStatus); + return dummyIndexState(true, indexStatus); + } + + public SecurityIndexManager.State dummyIndexState(boolean isIndexUpToDate, ClusterHealthStatus healthStatus) { + return new SecurityIndexManager.State( + Instant.now(), isIndexUpToDate, true, true, null, concreteSecurityIndexName, healthStatus, IndexMetaData.State.OPEN); } public void testCacheClearOnIndexHealthChange() { @@ -812,7 +817,7 @@ public void invalidateAll() { // green to yellow or yellow to green previousState = dummyState(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW)); - currentState = dummyState(previousState.indexStatus == ClusterHealthStatus.GREEN ? + currentState = dummyState(previousState.indexHealth == ClusterHealthStatus.GREEN ? ClusterHealthStatus.YELLOW : ClusterHealthStatus.GREEN); compositeRolesStore.onSecurityIndexStateChange(previousState, currentState); assertEquals(expectedInvalidation, numInvalidation.get()); @@ -837,14 +842,10 @@ public void invalidateAll() { } }; - compositeRolesStore.onSecurityIndexStateChange( - new SecurityIndexManager.State(Instant.now(), false, true, true, null, concreteSecurityIndexName, null), - new SecurityIndexManager.State(Instant.now(), true, true, true, null, concreteSecurityIndexName, null)); + compositeRolesStore.onSecurityIndexStateChange(dummyIndexState(false, null), dummyIndexState(true, null)); assertEquals(1, numInvalidation.get()); - compositeRolesStore.onSecurityIndexStateChange( - new SecurityIndexManager.State(Instant.now(), true, true, true, null, concreteSecurityIndexName, null), - new SecurityIndexManager.State(Instant.now(), false, true, true, null, concreteSecurityIndexName, null)); + compositeRolesStore.onSecurityIndexStateChange(dummyIndexState(true, null), dummyIndexState(false, null)); assertEquals(2, numInvalidation.get()); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java index 157e0ffb82013..0d55027d9cce4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java @@ -155,8 +155,8 @@ public void testIndexHealthChangeListeners() throws Exception { manager.clusterChanged(event(clusterStateBuilder)); assertTrue(listenerCalled.get()); - assertNull(previousState.get().indexStatus); - assertEquals(ClusterHealthStatus.GREEN, currentState.get().indexStatus); + assertNull(previousState.get().indexHealth); + assertEquals(ClusterHealthStatus.GREEN, currentState.get().indexHealth); // reset and call with no change to the index listenerCalled.set(false); @@ -191,8 +191,8 @@ public void testIndexHealthChangeListeners() throws Exception { event = new ClusterChangedEvent("different index health", clusterStateBuilder.build(), previousClusterState); manager.clusterChanged(event); assertTrue(listenerCalled.get()); - assertEquals(ClusterHealthStatus.GREEN, previousState.get().indexStatus); - assertEquals(ClusterHealthStatus.RED, currentState.get().indexStatus); + assertEquals(ClusterHealthStatus.GREEN, previousState.get().indexHealth); + assertEquals(ClusterHealthStatus.RED, currentState.get().indexHealth); // swap prev and current listenerCalled.set(false); @@ -201,8 +201,8 @@ public void testIndexHealthChangeListeners() throws Exception { event = new ClusterChangedEvent("different index health swapped", previousClusterState, clusterStateBuilder.build()); manager.clusterChanged(event); assertTrue(listenerCalled.get()); - assertEquals(ClusterHealthStatus.RED, previousState.get().indexStatus); - assertEquals(ClusterHealthStatus.GREEN, currentState.get().indexStatus); + assertEquals(ClusterHealthStatus.RED, previousState.get().indexHealth); + assertEquals(ClusterHealthStatus.GREEN, currentState.get().indexHealth); } public void testWriteBeforeStateNotRecovered() throws Exception { @@ -247,7 +247,7 @@ public void testWriteBeforeStateNotRecovered() throws Exception { assertThat(prepareRunnableCalled.get(), is(true)); } - public void testListeneredNotCalledBeforeStateNotRecovered() throws Exception { + public void testListenerNotCalledBeforeStateNotRecovered() throws Exception { final AtomicBoolean listenerCalled = new AtomicBoolean(false); manager.addIndexStateListener((prev, current) -> { listenerCalled.set(true); @@ -307,6 +307,31 @@ public void testIndexOutOfDateListeners() throws Exception { assertTrue(manager.isIndexUpToDate()); } + public void testProcessClosedIndexState() throws Exception { + // Index initially exists + final ClusterState.Builder indexAvailable = createClusterState(RestrictedIndicesNames.INTERNAL_SECURITY_MAIN_INDEX_7, + RestrictedIndicesNames.SECURITY_MAIN_ALIAS, TEMPLATE_NAME, IndexMetaData.State.OPEN); + markShardsAvailable(indexAvailable); + + manager.clusterChanged(event(indexAvailable)); + assertThat(manager.indexExists(), is(true)); + assertThat(manager.isAvailable(), is(true)); + + // Now close it + final ClusterState.Builder indexClosed = createClusterState(RestrictedIndicesNames.INTERNAL_SECURITY_MAIN_INDEX_7, + RestrictedIndicesNames.SECURITY_MAIN_ALIAS, TEMPLATE_NAME, IndexMetaData.State.CLOSE); + if (randomBoolean()) { + // In old/mixed cluster versions closed indices have no routing table + indexClosed.routingTable(RoutingTable.EMPTY_ROUTING_TABLE); + } else { + markShardsAvailable(indexClosed); + } + + manager.clusterChanged(event(indexClosed)); + assertThat(manager.indexExists(), is(true)); + assertThat(manager.isAvailable(), is(false)); + } + private void assertInitialState() { assertThat(manager.indexExists(), Matchers.equalTo(false)); assertThat(manager.isAvailable(), Matchers.equalTo(false)); @@ -322,18 +347,23 @@ private void assertIndexUpToDateButNotAvailable() { } public static ClusterState.Builder createClusterState(String indexName, String aliasName, String templateName) throws IOException { - return createClusterState(indexName, aliasName, templateName, templateName, SecurityIndexManager.INTERNAL_MAIN_INDEX_FORMAT); + return createClusterState(indexName, aliasName, templateName, IndexMetaData.State.OPEN); + } + + public static ClusterState.Builder createClusterState(String indexName, String aliasName, String templateName, + IndexMetaData.State state) throws IOException { + return createClusterState(indexName, aliasName, templateName, templateName, SecurityIndexManager.INTERNAL_MAIN_INDEX_FORMAT, state); } public static ClusterState.Builder createClusterState(String indexName, String aliasName, String templateName, int format) throws IOException { - return createClusterState(indexName, aliasName, templateName, templateName, format); + return createClusterState(indexName, aliasName, templateName, templateName, format, IndexMetaData.State.OPEN); } private static ClusterState.Builder createClusterState(String indexName, String aliasName, String templateName, String buildMappingFrom, - int format) throws IOException { + int format, IndexMetaData.State state) throws IOException { IndexTemplateMetaData.Builder templateBuilder = getIndexTemplateMetaData(templateName); - IndexMetaData.Builder indexMeta = getIndexMetadata(indexName, aliasName, buildMappingFrom, format); + IndexMetaData.Builder indexMeta = getIndexMetadata(indexName, aliasName, buildMappingFrom, format, state); MetaData.Builder metaDataBuilder = new MetaData.Builder(); metaDataBuilder.put(templateBuilder); @@ -354,7 +384,8 @@ private static ClusterState state() { .build(); } - private static IndexMetaData.Builder getIndexMetadata(String indexName, String aliasName, String templateName, int format) + private static IndexMetaData.Builder getIndexMetadata(String indexName, String aliasName, String templateName, int format, + IndexMetaData.State state) throws IOException { IndexMetaData.Builder indexMetaData = IndexMetaData.builder(indexName); indexMetaData.settings(Settings.builder() @@ -364,6 +395,7 @@ private static IndexMetaData.Builder getIndexMetadata(String indexName, String a .put(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), format) .build()); indexMetaData.putAlias(AliasMetaData.builder(aliasName).build()); + indexMetaData.state(state); final Map mappings = getTemplateMappings(templateName); for (Map.Entry entry : mappings.entrySet()) { indexMetaData.putMapping(entry.getKey(), entry.getValue()); From f70cd1c6dc919691cc9f8c90d0275ecc70a9e3ae Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Thu, 30 May 2019 07:43:00 +0300 Subject: [PATCH 184/224] Fix testTokenExpiry flaky test (#42585) Test was using ClockMock#rewind passing the amount of nanoseconds in order to "strip" nanos from the time value. This was intentional as the expiration time of the UserToken doesn't have nanosecond precision. However, ClockMock#rewind doesn't support nanos either, so when it's called with a TimeValue, it rewinds the clock by the TimeValue's millis instead. This was causing the clock to go enough millis before token expiration time and the test was passing. Once every few hundred times though, the TimeValue by which we attempted to rewind the clock only had nanos and no millis, so rewind moved the clock back just a few millis, but still after expiration time. This change moves the clock explicitly to the same instant as expiration, using clock.setTime and disregarding nanos. --- .../xpack/security/authc/TokenServiceTests.java | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 49796333098ff..4bc1efe772949 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -530,9 +530,8 @@ public void testTokenExpiry() throws Exception { } try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { - // move to expiry - clock.fastForwardSeconds(Math.toIntExact(defaultExpiration.getSeconds()) - fastForwardAmount); - clock.rewind(TimeValue.timeValueNanos(clock.instant().getNano())); // trim off nanoseconds since don't store them in the index + // move to expiry, stripping nanoseconds, as we don't store them in the security-tokens index + clock.setTime(userToken.getExpirationTime().truncatedTo(ChronoUnit.MILLIS).atZone(clock.getZone())); PlainActionFuture future = new PlainActionFuture<>(); tokenService.getAndValidateToken(requestContext, future); assertAuthentication(authentication, future.get().getAuthentication()); From a21ff8672040f6a1a0f67274e74f104a9082f2d6 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Thu, 30 May 2019 09:21:54 +0300 Subject: [PATCH 185/224] Revert "un-mute Watcher rolling upgrade tests and bump up logging (#42377)" This reverts commit 697c793dcbabf1df0351d75a3705047ac4435dca. --- x-pack/qa/rolling-upgrade/build.gradle | 2 -- .../rest-api-spec/test/mixed_cluster/60_watcher.yml | 8 ++++++++ .../rest-api-spec/test/old_cluster/60_watcher.yml | 7 +++++++ .../rest-api-spec/test/upgraded_cluster/60_watcher.yml | 8 ++++++++ 4 files changed, 23 insertions(+), 2 deletions(-) diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index 8be945e701898..d75ecbd7a55ed 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -123,8 +123,6 @@ for (Version version : bwcVersions.wireCompatible) { setting 'xpack.security.authc.token.timeout', '60m' setting 'logger.org.elasticsearch.xpack.security.authc.TokenService', 'trace' setting 'xpack.security.audit.enabled', 'true' - setting 'logger.org.elasticsearch.xpack.watcher', 'debug' - setting 'logger.org.elasticsearch.xpack.core.watcher', 'debug' rootProject.globalInfo.ready { if (project.inFipsJvm) { setting 'xpack.security.transport.ssl.key', 'testnode.pem' diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_watcher.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_watcher.yml index 1ce3f35049ecd..2a1dd4397dc56 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_watcher.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_watcher.yml @@ -1,6 +1,10 @@ --- "CRUD watch APIs": + - skip: + reason: https://github.com/elastic/elasticsearch/issues/33185 + version: "6.7.0 - " + # no need to put watch, exists already - do: watcher.get_watch: @@ -70,6 +74,10 @@ --- "Test watcher stats output": + - skip: + reason: https://github.com/elastic/elasticsearch/issues/33185 + version: "6.7.0 - " + - do: watcher.stats: {} - match: { "manually_stopped": false } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_watcher.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_watcher.yml index 6e51e6b0dc717..aafb7ddf239bb 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_watcher.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_watcher.yml @@ -1,5 +1,8 @@ --- "CRUD watch APIs": + - skip: + reason: https://github.com/elastic/elasticsearch/issues/33185 + version: "6.7.0 - " - do: watcher.put_watch: @@ -90,6 +93,10 @@ --- "Test watcher stats output": + - skip: + reason: https://github.com/elastic/elasticsearch/issues/33185 + version: "6.7.0 - " + - do: watcher.stats: {} - match: { "manually_stopped": false } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_watcher.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_watcher.yml index e9574215dc44e..dacb437d4b449 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_watcher.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_watcher.yml @@ -1,6 +1,10 @@ --- "CRUD watch APIs": + - skip: + reason: https://github.com/elastic/elasticsearch/issues/33185 + version: "6.7.0 - " + # no need to put watch, exists already - do: watcher.get_watch: @@ -69,6 +73,10 @@ --- "Test watcher stats output": + - skip: + reason: https://github.com/elastic/elasticsearch/issues/33185 + version: "6.7.0 - " + - do: watcher.stats: {} - match: { "manually_stopped": false } From 0ee8fed6cfec5801d0e08c0c91b840f13c637d63 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 30 May 2019 08:02:13 +0100 Subject: [PATCH 186/224] Log leader and handshake failures by default (#42342) Today the `LeaderChecker` and `HandshakingTransportAddressConnector` do not log anything above `DEBUG` level. However there are some situations where it is appropriate for them to log at a higher level: - if the low-level handshake succeeds but the high-level one fails then this indicates a config error that the user should resolve, and the exception will help them to do so. - if leader checks fail repeatedly then we restart discovery, and the exception will help to determine what went wrong. Resolves #42153 --- .../cluster/coordination/Coordinator.java | 22 ++++------ .../cluster/coordination/LeaderChecker.java | 40 +++++++++++++------ .../HandshakingTransportAddressConnector.java | 8 ++++ .../coordination/LeaderCheckerTests.java | 15 +++++-- 4 files changed, 55 insertions(+), 30 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 6d86bb613be43..585987b31d771 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -164,7 +164,7 @@ public Coordinator(String nodeName, Settings settings, ClusterSettings clusterSe new HandshakingTransportAddressConnector(settings, transportService), configuredHostsResolver); this.publicationHandler = new PublicationTransportHandler(transportService, namedWriteableRegistry, this::handlePublishRequest, this::handleApplyCommit); - this.leaderChecker = new LeaderChecker(settings, transportService, getOnLeaderFailure()); + this.leaderChecker = new LeaderChecker(settings, transportService, this::onLeaderFailure); this.followersChecker = new FollowersChecker(settings, transportService, this::onFollowerCheckRequest, this::removeNode); this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, logger); this.clusterApplier = clusterApplier; @@ -183,20 +183,14 @@ private ClusterFormationState getClusterFormationState() { StreamSupport.stream(peerFinder.getFoundPeers().spliterator(), false).collect(Collectors.toList()), getCurrentTerm()); } - private Runnable getOnLeaderFailure() { - return new Runnable() { - @Override - public void run() { - synchronized (mutex) { - becomeCandidate("onLeaderFailure"); - } - } - - @Override - public String toString() { - return "notification of leader failure"; + private void onLeaderFailure(Exception e) { + synchronized (mutex) { + if (mode != Mode.CANDIDATE) { + assert lastKnownLeader.isPresent(); + logger.info(new ParameterizedMessage("master node [{}] failed, restarting discovery", lastKnownLeader.get()), e); } - }; + becomeCandidate("onLeaderFailure"); + } } private void removeNode(DiscoveryNode discoveryNode, String reason) { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java b/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java index 5bc5ea866ee83..703c08bf260c7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java @@ -22,6 +22,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Nullable; @@ -33,6 +34,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.NodeDisconnectedException; import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; @@ -48,6 +50,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; /** * The LeaderChecker is responsible for allowing followers to check that the currently elected leader is still connected and healthy. We are @@ -75,20 +78,17 @@ public class LeaderChecker { public static final Setting LEADER_CHECK_RETRY_COUNT_SETTING = Setting.intSetting("cluster.fault_detection.leader_check.retry_count", 3, 1, Setting.Property.NodeScope); - private final Settings settings; - private final TimeValue leaderCheckInterval; private final TimeValue leaderCheckTimeout; private final int leaderCheckRetryCount; private final TransportService transportService; - private final Runnable onLeaderFailure; + private final Consumer onLeaderFailure; private AtomicReference currentChecker = new AtomicReference<>(); private volatile DiscoveryNodes discoveryNodes; - public LeaderChecker(final Settings settings, final TransportService transportService, final Runnable onLeaderFailure) { - this.settings = settings; + public LeaderChecker(final Settings settings, final TransportService transportService, final Consumer onLeaderFailure) { leaderCheckInterval = LEADER_CHECK_INTERVAL_SETTING.get(settings); leaderCheckTimeout = LEADER_CHECK_TIMEOUT_SETTING.get(settings); leaderCheckRetryCount = LEADER_CHECK_RETRY_COUNT_SETTING.get(settings); @@ -234,16 +234,19 @@ public void handleException(TransportException exp) { } if (exp instanceof ConnectTransportException || exp.getCause() instanceof ConnectTransportException) { - logger.debug(new ParameterizedMessage("leader [{}] disconnected, failing immediately", leader), exp); - leaderFailed(); + logger.debug(new ParameterizedMessage( + "leader [{}] disconnected during check", leader), exp); + leaderFailed(new ConnectTransportException(leader, "disconnected during check", exp)); return; } long failureCount = failureCountSinceLastSuccess.incrementAndGet(); if (failureCount >= leaderCheckRetryCount) { - logger.debug(new ParameterizedMessage("{} consecutive failures (limit [{}] is {}) so leader [{}] has failed", - failureCount, LEADER_CHECK_RETRY_COUNT_SETTING.getKey(), leaderCheckRetryCount, leader), exp); - leaderFailed(); + logger.debug(new ParameterizedMessage( + "leader [{}] has failed {} consecutive checks (limit [{}] is {}); last failure was:", + leader, failureCount, LEADER_CHECK_RETRY_COUNT_SETTING.getKey(), leaderCheckRetryCount), exp); + leaderFailed(new ElasticsearchException( + "node [" + leader + "] failed [" + failureCount + "] consecutive checks", exp)); return; } @@ -259,9 +262,19 @@ public String executor() { }); } - void leaderFailed() { + void leaderFailed(Exception e) { if (isClosed.compareAndSet(false, true)) { - transportService.getThreadPool().generic().execute(onLeaderFailure); + transportService.getThreadPool().generic().execute(new Runnable() { + @Override + public void run() { + onLeaderFailure.accept(e); + } + + @Override + public String toString() { + return "notification of leader failure: " + e.getMessage(); + } + }); } else { logger.trace("already closed, not failing leader"); } @@ -269,7 +282,8 @@ void leaderFailed() { void handleDisconnectedNode(DiscoveryNode discoveryNode) { if (discoveryNode.equals(leader)) { - leaderFailed(); + logger.debug("leader [{}] disconnected", leader); + leaderFailed(new NodeDisconnectedException(discoveryNode, "disconnected")); } } diff --git a/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java b/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java index 7f2512f97f87b..4e90ae02e12ac 100644 --- a/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java +++ b/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java @@ -21,6 +21,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -89,6 +90,13 @@ protected void doRun() throws Exception { remoteNode = transportService.handshake(connection, probeHandshakeTimeout.millis()); // success means (amongst other things) that the cluster names match logger.trace("[{}] handshake successful: {}", this, remoteNode); + } catch (Exception e) { + // we opened a connection and successfully performed a low-level handshake, so we were definitely talking to an + // Elasticsearch node, but the high-level handshake failed indicating some kind of mismatched configurations + // (e.g. cluster name) that the user should address + logger.warn(new ParameterizedMessage("handshake failed for [{}]", this), e); + listener.onFailure(e); + return; } finally { IOUtils.closeWhileHandlingException(connection); } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/LeaderCheckerTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/LeaderCheckerTests.java index a806cb84a6818..ce25d24bce6ba 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/LeaderCheckerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/LeaderCheckerTests.java @@ -52,9 +52,12 @@ import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.elasticsearch.transport.TransportService.HANDSHAKE_ACTION_NAME; import static org.elasticsearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.matchesRegex; import static org.hamcrest.Matchers.nullValue; public class LeaderCheckerTests extends ESTestCase { @@ -146,7 +149,10 @@ public String toString() { final AtomicBoolean leaderFailed = new AtomicBoolean(); final LeaderChecker leaderChecker = new LeaderChecker(settings, transportService, - () -> assertTrue(leaderFailed.compareAndSet(false, true))); + e -> { + assertThat(e.getMessage(), matchesRegex("node \\[.*\\] failed \\[[1-9][0-9]*\\] consecutive checks")); + assertTrue(leaderFailed.compareAndSet(false, true)); + }); logger.info("--> creating first checker"); leaderChecker.updateLeader(leader1); @@ -247,7 +253,10 @@ public String toString() { final AtomicBoolean leaderFailed = new AtomicBoolean(); final LeaderChecker leaderChecker = new LeaderChecker(settings, transportService, - () -> assertTrue(leaderFailed.compareAndSet(false, true))); + e -> { + assertThat(e.getMessage(), anyOf(endsWith("disconnected"), endsWith("disconnected during check"))); + assertTrue(leaderFailed.compareAndSet(false, true)); + }); leaderChecker.updateLeader(leader); { @@ -316,7 +325,7 @@ public void testLeaderBehaviour() { transportService.start(); transportService.acceptIncomingRequests(); - final LeaderChecker leaderChecker = new LeaderChecker(settings, transportService, () -> fail("shouldn't be checking anything")); + final LeaderChecker leaderChecker = new LeaderChecker(settings, transportService, e -> fail("shouldn't be checking anything")); final DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId()).build(); From 04b74497314eeec076753a33b3b6cc11549646e8 Mon Sep 17 00:00:00 2001 From: Marios Trivyzas Date: Thu, 30 May 2019 09:31:11 +0200 Subject: [PATCH 187/224] Deprecate CommonTermsQuery and cutoff_frequency (#42619) * Deprecate CommonTermsQuery and cutoff_frequency Since the max_score optimization landed in Elasticsearch 7, the CommonTermsQuery is redundant and slower. Moreover the cutoff_frequency parameter for MatchQuery and MultiMatchQuery is redundant. Relates to #27096 --- .../query-dsl/common-terms-query.asciidoc | 7 ++++ docs/reference/query-dsl/match-query.asciidoc | 3 ++ .../search.query/50_queries_with_synonyms.yml | 27 +++++++++++++ .../lucene/queries/BlendedTermQuery.java | 5 +++ .../queries/ExtendedCommonTermsQuery.java | 4 ++ .../index/query/CommonTermsQueryBuilder.java | 11 +++++ .../index/query/MatchQueryBuilder.java | 15 ++++++- .../index/query/MultiMatchQueryBuilder.java | 15 ++++++- .../index/query/QueryBuilders.java | 3 ++ .../index/search/MatchQuery.java | 4 ++ .../elasticsearch/search/SearchModule.java | 7 +++- .../query/CommonTermsQueryBuilderTests.java | 40 +++++++++++++++++++ .../query/CommonTermsQueryParserTests.java | 4 +- .../index/query/MatchQueryBuilderTests.java | 6 +-- .../query/MultiMatchQueryBuilderTests.java | 3 -- .../search/SearchModuleTests.java | 6 +-- .../profile/query/RandomQueryGenerator.java | 33 +-------------- .../test/AbstractQueryTestCase.java | 3 +- 18 files changed, 145 insertions(+), 51 deletions(-) diff --git a/docs/reference/query-dsl/common-terms-query.asciidoc b/docs/reference/query-dsl/common-terms-query.asciidoc index 87288778246a6..f2d784eb0c4c9 100644 --- a/docs/reference/query-dsl/common-terms-query.asciidoc +++ b/docs/reference/query-dsl/common-terms-query.asciidoc @@ -1,6 +1,8 @@ [[query-dsl-common-terms-query]] === Common Terms Query +deprecated[7.3.0,"Use <> instead, which skips blocks of documents efficiently, without any configuration, provided that the total number of hits is not tracked."] + The `common` terms query is a modern alternative to stopwords which improves the precision and recall of search results (by taking stopwords into account), without sacrificing performance. @@ -83,6 +85,7 @@ GET /_search } -------------------------------------------------- // CONSOLE +// TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] The number of terms which should match can be controlled with the <> @@ -108,6 +111,7 @@ GET /_search } -------------------------------------------------- // CONSOLE +// TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] which is roughly equivalent to: @@ -154,6 +158,7 @@ GET /_search } -------------------------------------------------- // CONSOLE +// TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] which is roughly equivalent to: @@ -209,6 +214,7 @@ GET /_search } -------------------------------------------------- // CONSOLE +// TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] which is roughly equivalent to: @@ -270,6 +276,7 @@ GET /_search } -------------------------------------------------- // CONSOLE +// TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] which is roughly equivalent to: diff --git a/docs/reference/query-dsl/match-query.asciidoc b/docs/reference/query-dsl/match-query.asciidoc index 89a0a942b79ce..14fc155cfccae 100644 --- a/docs/reference/query-dsl/match-query.asciidoc +++ b/docs/reference/query-dsl/match-query.asciidoc @@ -122,6 +122,8 @@ GET /_search [[query-dsl-match-query-cutoff]] ===== Cutoff frequency +deprecated[7.3.0,"This option can be omitted as the <> can skip block of documents efficiently, without any configuration, provided that the total number of hits is not tracked."] + The match query supports a `cutoff_frequency` that allows specifying an absolute or relative document frequency where high frequency terms are moved into an optional subquery and are only scored @@ -158,6 +160,7 @@ GET /_search } -------------------------------------------------- // CONSOLE +// TEST[warning:Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [match] query can skip block of documents efficiently if the total number of hits is not tracked]] IMPORTANT: The `cutoff_frequency` option operates on a per-shard-level. This means that when trying it out on test indexes with low document numbers you diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml index 784ffd9dd123a..ce9cc74955729 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml @@ -1,5 +1,8 @@ --- "Test common terms query with stacked tokens": + - skip: + features: "warnings" + - do: indices.create: index: test @@ -47,6 +50,8 @@ refresh: true - do: + warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: @@ -62,6 +67,8 @@ - match: { hits.hits.2._id: "3" } - do: + warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: @@ -76,6 +83,8 @@ - match: { hits.hits.1._id: "2" } - do: + warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: @@ -90,6 +99,8 @@ - match: { hits.hits.2._id: "3" } - do: + warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: @@ -103,6 +114,8 @@ - match: { hits.hits.0._id: "2" } - do: + warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: @@ -118,6 +131,8 @@ - match: { hits.hits.1._id: "1" } - do: + warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: @@ -132,6 +147,8 @@ - match: { hits.hits.0._id: "2" } - do: + warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: @@ -144,6 +161,8 @@ - match: { hits.hits.0._id: "2" } - do: + warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: @@ -158,6 +177,8 @@ - match: { hits.hits.2._id: "3" } - do: + warnings: + - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [match] query can skip block of documents efficiently if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: @@ -172,6 +193,8 @@ - match: { hits.hits.1._id: "2" } - do: + warnings: + - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [match] query can skip block of documents efficiently if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: @@ -187,6 +210,8 @@ - match: { hits.hits.2._id: "3" } - do: + warnings: + - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [match] query can skip block of documents efficiently if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: @@ -201,6 +226,8 @@ - match: { hits.hits.1._id: "2" } - do: + warnings: + - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [multi_match] query can skip block of documents efficiently if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: diff --git a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java index c696d476bbb43..f823f3a142690 100644 --- a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java @@ -278,6 +278,11 @@ public int hashCode() { return Objects.hash(classHash(), Arrays.hashCode(equalsTerms())); } + /** + * @deprecated Since max_score optimization landed in 7.0, normal MultiMatchQuery + * will achieve the same result without any configuration. + */ + @Deprecated public static BlendedTermQuery commonTermsBlendedQuery(Term[] terms, final float[] boosts, final float maxTermFrequency) { return new BlendedTermQuery(terms, boosts) { @Override diff --git a/server/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java b/server/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java index 249b7fa83b50b..2d70ed8b90a05 100644 --- a/server/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java @@ -26,7 +26,11 @@ * Extended version of {@link CommonTermsQuery} that allows to pass in a * {@code minimumNumberShouldMatch} specification that uses the actual num of high frequent terms * to calculate the minimum matching terms. + * + * @deprecated Since max_optimization optimization landed in 7.0, normal MatchQuery + * will achieve the same result without any configuration. */ +@Deprecated public class ExtendedCommonTermsQuery extends CommonTermsQuery { public ExtendedCommonTermsQuery(Occur highFreqOccur, Occur lowFreqOccur, float maxTermFrequency) { diff --git a/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java index d646dc4bb4b07..5b2853ac359c2 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java @@ -47,9 +47,16 @@ * and high-frequency terms are added to an optional boolean clause. The * optional clause is only executed if the required "low-frequency' clause * matches. + * + * @deprecated Since max_optimization optimization landed in 7.0, normal MatchQuery + * will achieve the same result without any configuration. */ +@Deprecated public class CommonTermsQueryBuilder extends AbstractQueryBuilder { + public static final String COMMON_TERMS_QUERY_DEPRECATION_MSG = "[match] query which can efficiently " + + "skip blocks of documents if the total number of hits is not tracked"; + public static final String NAME = "common"; public static final float DEFAULT_CUTOFF_FREQ = 0.01f; @@ -85,7 +92,9 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilder { + + private static final String CUTOFF_FREQUENCY_DEPRECATION_MSG = "you can omit this option, " + + "the [match] query can skip block of documents efficiently if the total number of hits is not tracked"; + public static final ParseField ZERO_TERMS_QUERY_FIELD = new ParseField("zero_terms_query"); - public static final ParseField CUTOFF_FREQUENCY_FIELD = new ParseField("cutoff_frequency"); + /** + * @deprecated Since max_optimization optimization landed in 7.0, normal MatchQuery + * will achieve the same result without any configuration. + */ + @Deprecated + public static final ParseField CUTOFF_FREQUENCY_FIELD = + new ParseField("cutoff_frequency").withAllDeprecated(CUTOFF_FREQUENCY_DEPRECATION_MSG); public static final ParseField LENIENT_FIELD = new ParseField("lenient"); public static final ParseField FUZZY_TRANSPOSITIONS_FIELD = new ParseField("fuzzy_transpositions"); public static final ParseField FUZZY_REWRITE_FIELD = new ParseField("fuzzy_rewrite"); @@ -235,7 +245,10 @@ public int maxExpansions() { * Set a cutoff value in [0..1] (or absolute number >=1) representing the * maximum threshold of a terms document frequency to be considered a low * frequency term. + * + * @deprecated see {@link MatchQueryBuilder#CUTOFF_FREQUENCY_FIELD} for more details */ + @Deprecated public MatchQueryBuilder cutoffFrequency(float cutoff) { this.cutoffFrequency = cutoff; return this; diff --git a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java index 7827c032ea0d7..fb400a9d3fc75 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java @@ -50,6 +50,10 @@ * Same as {@link MatchQueryBuilder} but supports multiple fields. */ public class MultiMatchQueryBuilder extends AbstractQueryBuilder { + + private static final String CUTOFF_FREQUENCY_DEPRECATION_MSG = "you can omit this option, " + + "the [multi_match] query can skip block of documents efficiently if the total number of hits is not tracked"; + public static final String NAME = "multi_match"; public static final MultiMatchQueryBuilder.Type DEFAULT_TYPE = MultiMatchQueryBuilder.Type.BEST_FIELDS; @@ -63,7 +67,8 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder plugins) { registerQuery(new QuerySpec<>(MoreLikeThisQueryBuilder.NAME, MoreLikeThisQueryBuilder::new, MoreLikeThisQueryBuilder::fromXContent)); registerQuery(new QuerySpec<>(WrapperQueryBuilder.NAME, WrapperQueryBuilder::new, WrapperQueryBuilder::fromXContent)); - registerQuery(new QuerySpec<>(CommonTermsQueryBuilder.NAME, CommonTermsQueryBuilder::new, CommonTermsQueryBuilder::fromXContent)); + registerQuery(new QuerySpec<>(new ParseField(CommonTermsQueryBuilder.NAME).withAllDeprecated(COMMON_TERMS_QUERY_DEPRECATION_MSG), + CommonTermsQueryBuilder::new, CommonTermsQueryBuilder::fromXContent)); registerQuery( new QuerySpec<>(SpanMultiTermQueryBuilder.NAME, SpanMultiTermQueryBuilder::new, SpanMultiTermQueryBuilder::fromXContent)); registerQuery(new QuerySpec<>(FunctionScoreQueryBuilder.NAME, FunctionScoreQueryBuilder::new, diff --git a/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryBuilderTests.java index 5e443ec41ede9..d02b60c52d531 100644 --- a/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryBuilderTests.java @@ -111,6 +111,30 @@ protected void doAssertLuceneQuery(CommonTermsQueryBuilder queryBuilder, Query q assertThat(extendedCommonTermsQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo(queryBuilder.lowFreqMinimumShouldMatch())); } + @Override + public void testUnknownField() throws IOException { + super.testUnknownField(); + assertDeprecationWarning(); + } + + @Override + public void testUnknownObjectException() throws IOException { + super.testUnknownObjectException(); + assertDeprecationWarning(); + } + + @Override + public void testFromXContent() throws IOException { + super.testFromXContent(); + assertDeprecationWarning(); + } + + @Override + public void testValidOutput() throws IOException { + super.testValidOutput(); + assertDeprecationWarning(); + } + public void testIllegalArguments() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new CommonTermsQueryBuilder(null, "text")); assertEquals("field name is null or empty", e.getMessage()); @@ -146,6 +170,8 @@ public void testFromJson() throws IOException { assertEquals(query, Operator.OR, queryBuilder.lowFreqOperator()); assertEquals(query, Operator.AND, queryBuilder.highFreqOperator()); assertEquals(query, "nelly the elephant not as a cartoon", queryBuilder.value()); + + assertDeprecationWarning(); } public void testCommonTermsQuery1() throws IOException { @@ -155,6 +181,8 @@ public void testCommonTermsQuery1() throws IOException { ExtendedCommonTermsQuery ectQuery = (ExtendedCommonTermsQuery) parsedQuery; assertThat(ectQuery.getHighFreqMinimumNumberShouldMatchSpec(), nullValue()); assertThat(ectQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo("2")); + + assertDeprecationWarning(); } public void testCommonTermsQuery2() throws IOException { @@ -164,6 +192,8 @@ public void testCommonTermsQuery2() throws IOException { ExtendedCommonTermsQuery ectQuery = (ExtendedCommonTermsQuery) parsedQuery; assertThat(ectQuery.getHighFreqMinimumNumberShouldMatchSpec(), equalTo("50%")); assertThat(ectQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo("5<20%")); + + assertDeprecationWarning(); } public void testCommonTermsQuery3() throws IOException { @@ -173,12 +203,16 @@ public void testCommonTermsQuery3() throws IOException { ExtendedCommonTermsQuery ectQuery = (ExtendedCommonTermsQuery) parsedQuery; assertThat(ectQuery.getHighFreqMinimumNumberShouldMatchSpec(), nullValue()); assertThat(ectQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo("2")); + + assertDeprecationWarning(); } // see #11730 public void testCommonTermsQuery4() throws IOException { Query parsedQuery = parseQuery(commonTermsQuery("field", "text")).toQuery(createShardContext()); assertThat(parsedQuery, instanceOf(ExtendedCommonTermsQuery.class)); + + assertDeprecationWarning(); } public void testParseFailsWithMultipleFields() throws IOException { @@ -204,5 +238,11 @@ public void testParseFailsWithMultipleFields() throws IOException { "}"; e = expectThrows(ParsingException.class, () -> parseQuery(shortJson)); assertEquals("[common] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage()); + + assertDeprecationWarning(); + } + + private void assertDeprecationWarning() { + assertWarnings("Deprecated field [common] used, replaced by [" + CommonTermsQueryBuilder.COMMON_TERMS_QUERY_DEPRECATION_MSG + "]"); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryParserTests.java b/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryParserTests.java index 2be41f9002015..f393683a10f7f 100644 --- a/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryParserTests.java @@ -22,10 +22,8 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.test.ESSingleNodeTestCase; -import java.io.IOException; - public class CommonTermsQueryParserTests extends ESSingleNodeTestCase { - public void testWhenParsedQueryIsNullNoNullPointerExceptionIsThrown() throws IOException { + public void testWhenParsedQueryIsNullNoNullPointerExceptionIsThrown() { final String index = "test-index"; final String type = "test-type"; client() diff --git a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index a7aad3dbc3e42..f79bbb86242d9 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -124,10 +124,6 @@ protected MatchQueryBuilder doCreateTestQueryBuilder() { matchQuery.zeroTermsQuery(randomFrom(ZeroTermsQuery.ALL, ZeroTermsQuery.NONE)); } - if (randomBoolean()) { - matchQuery.cutoffFrequency((float) 10 / randomIntBetween(1, 100)); - } - if (randomBoolean()) { matchQuery.autoGenerateSynonymsPhraseQuery(randomBoolean()); } @@ -478,7 +474,7 @@ public void testMaxBooleanClause() { query.setAnalyzer(new MockGraphAnalyzer(createGiantGraphMultiTerms())); expectThrows(BooleanQuery.TooManyClauses.class, () -> query.parse(Type.PHRASE, STRING_FIELD_NAME, "")); } - + private static class MockGraphAnalyzer extends Analyzer { CannedBinaryTokenStream tokenStream; diff --git a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java index 6590a5609353a..970a4c3a37ecb 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java @@ -134,9 +134,6 @@ protected MultiMatchQueryBuilder doCreateTestQueryBuilder() { if (randomBoolean()) { query.tieBreaker(randomFloat()); } - if (randomBoolean() && query.type() != Type.BOOL_PREFIX) { - query.cutoffFrequency((float) 10 / randomIntBetween(1, 100)); - } if (randomBoolean()) { query.zeroTermsQuery(randomFrom(MatchQuery.ZeroTermsQuery.NONE, MatchQuery.ZeroTermsQuery.ALL)); } diff --git a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java index b7755dd321416..2e019d1e2c432 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -236,7 +236,7 @@ public Map getHighlighters() { assertSame(highlighters.get("custom"), customHighlighter); } - public void testRegisteredQueries() throws IOException { + public void testRegisteredQueries() { List allSupportedQueries = new ArrayList<>(); Collections.addAll(allSupportedQueries, NON_DEPRECATED_QUERIES); Collections.addAll(allSupportedQueries, DEPRECATED_QUERIES); @@ -244,6 +244,7 @@ public void testRegisteredQueries() throws IOException { Set registeredNonDeprecated = module.getNamedXContents().stream() .filter(e -> e.categoryClass.equals(QueryBuilder.class)) + .filter(e -> e.name.getDeprecatedNames().length == 0) .map(e -> e.name.getPreferredName()) .collect(toSet()); Set registeredAll = module.getNamedXContents().stream() @@ -306,7 +307,6 @@ public List> getRescorers() { private static final String[] NON_DEPRECATED_QUERIES = new String[] { "bool", "boosting", - "common", "constant_score", "dis_max", "exists", @@ -354,7 +354,7 @@ public List> getRescorers() { }; //add here deprecated queries to make sure we log a deprecation warnings when they are used - private static final String[] DEPRECATED_QUERIES = new String[] {}; + private static final String[] DEPRECATED_QUERIES = new String[] {"common"}; /** * Dummy test {@link AggregationBuilder} used to test registering aggregation builders. diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/RandomQueryGenerator.java b/server/src/test/java/org/elasticsearch/search/profile/query/RandomQueryGenerator.java index 00b859394c65f..ea9ef964153b7 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/query/RandomQueryGenerator.java +++ b/server/src/test/java/org/elasticsearch/search/profile/query/RandomQueryGenerator.java @@ -22,11 +22,9 @@ import org.apache.lucene.util.English; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.CommonTermsQueryBuilder; import org.elasticsearch.index.query.DisMaxQueryBuilder; import org.elasticsearch.index.query.FuzzyQueryBuilder; import org.elasticsearch.index.query.IdsQueryBuilder; -import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.RangeQueryBuilder; @@ -72,7 +70,7 @@ public static QueryBuilder randomQueryBuilder(List stringFields, List stringFields, List numericFields, int numDocs) { - switch (randomIntBetween(0,6)) { + switch (randomIntBetween(0,5)) { case 0: return randomTermQuery(stringFields, numDocs); case 1: @@ -82,10 +80,8 @@ private static QueryBuilder randomTerminalQuery(List stringFields, List< case 3: return QueryBuilders.matchAllQuery(); case 4: - return randomCommonTermsQuery(stringFields, numDocs); - case 5: return randomFuzzyQuery(stringFields); - case 6: + case 5: return randomIDsQuery(); default: return randomTermQuery(stringFields, numDocs); @@ -169,31 +165,6 @@ private static QueryBuilder randomConstantScoreQuery(List stringFields, return QueryBuilders.constantScoreQuery(randomQueryBuilder(stringFields, numericFields, numDocs, depth - 1)); } - private static QueryBuilder randomCommonTermsQuery(List fields, int numDocs) { - int numTerms = randomInt(numDocs); - - QueryBuilder q = QueryBuilders.commonTermsQuery(randomField(fields), randomQueryString(numTerms)); - if (randomBoolean()) { - ((CommonTermsQueryBuilder)q).boost(randomFloat()); - } - - if (randomBoolean()) { - ((CommonTermsQueryBuilder)q).cutoffFrequency(randomFloat()); - } - - if (randomBoolean()) { - ((CommonTermsQueryBuilder)q).highFreqMinimumShouldMatch(Integer.toString(randomInt(numTerms))) - .highFreqOperator(randomBoolean() ? Operator.AND : Operator.OR); - } - - if (randomBoolean()) { - ((CommonTermsQueryBuilder)q).lowFreqMinimumShouldMatch(Integer.toString(randomInt(numTerms))) - .lowFreqOperator(randomBoolean() ? Operator.AND : Operator.OR); - } - - return q; - } - private static QueryBuilder randomFuzzyQuery(List fields) { QueryBuilder q = QueryBuilders.fuzzyQuery(randomField(fields), randomQueryString(1)); diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 54f6df6ac0cca..847fb58eca3e7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -20,7 +20,6 @@ package org.elasticsearch.test; import com.fasterxml.jackson.core.io.JsonStringEncoder; - import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; @@ -165,7 +164,7 @@ public void testUnknownField() throws IOException { * parse exception. Some specific objects do not cause any exception as they can hold arbitrary content; they can be * declared by overriding {@link #getObjectsHoldingArbitraryContent()}. */ - public final void testUnknownObjectException() throws IOException { + public void testUnknownObjectException() throws IOException { Set candidates = new HashSet<>(); // Adds the valid query to the list of queries to modify and test candidates.add(createTestQueryBuilder().toString()); From 67be68d21a2c1fe8263d71cc1f223182359077ac Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 30 May 2019 12:20:38 +0200 Subject: [PATCH 188/224] Fix Class Load Order in Netty4Plugin (#42591) * Don't force the logger in the Netty4Plugin class already, at this point log4j might not be fully initialized. * The call was redundant anyway since we do the same thing in the Netty4Transport and Netty4HttpServerTransport classes already and there we do it properly after setting up log4j by initilizing the loggers * Relates #42532 --- .../main/java/org/elasticsearch/transport/Netty4Plugin.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java index 609186fc3c30e..bcfd5e0b326d3 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java @@ -35,7 +35,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.netty4.Netty4Transport; -import org.elasticsearch.transport.netty4.Netty4Utils; import java.util.Arrays; import java.util.Collections; @@ -45,10 +44,6 @@ public class Netty4Plugin extends Plugin implements NetworkPlugin { - static { - Netty4Utils.setup(); - } - public static final String NETTY_TRANSPORT_NAME = "netty4"; public static final String NETTY_HTTP_TRANSPORT_NAME = "netty4"; From 7f3e0806dc70c5328af8ad1144af612f9316eee3 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Thu, 30 May 2019 08:30:30 -0400 Subject: [PATCH 189/224] [DOCS] Rewrite 'wildcard' query (#42670) --- .../query-dsl/wildcard-query.asciidoc | 86 +++++++++++-------- 1 file changed, 51 insertions(+), 35 deletions(-) diff --git a/docs/reference/query-dsl/wildcard-query.asciidoc b/docs/reference/query-dsl/wildcard-query.asciidoc index ba1c72bb1e53b..b2e8eb0adf772 100644 --- a/docs/reference/query-dsl/wildcard-query.asciidoc +++ b/docs/reference/query-dsl/wildcard-query.asciidoc @@ -1,51 +1,67 @@ [[query-dsl-wildcard-query]] === Wildcard Query +Returns documents that contain terms matching a wildcard pattern. -Matches documents that have fields matching a wildcard expression (*not -analyzed*). Supported wildcards are `*`, which matches any character -sequence (including the empty one), and `?`, which matches any single -character. Note that this query can be slow, as it needs to iterate over many -terms. In order to prevent extremely slow wildcard queries, a wildcard -term should not start with one of the wildcards `*` or `?`. The wildcard -query maps to Lucene `WildcardQuery`. +A wildcard operator is a placeholder that matches one or more characters. For +example, the `*` wildcard operator matches zero or more characters. You can +combine wildcard operators with other characters to create a wildcard pattern. -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "wildcard" : { "user" : "ki*y" } - } -} --------------------------------------------------- -// CONSOLE +[[wildcard-query-ex-request]] +==== Example request -A boost can also be associated with the query: +The following search returns documents where the `user` field contains a term +that begins with `ki` and ends with `y`. These matching terms can include `kiy`, +`kity`, or `kimchy`. [source,js] --------------------------------------------------- +---- GET /_search { "query": { - "wildcard" : { "user" : { "value" : "ki*y", "boost" : 2.0 } } + "wildcard": { + "user": { + "value": "ki*y", + "boost": 1.0, + "rewrite": "constant_score" + } + } } } --------------------------------------------------- +---- // CONSOLE -Or : +[[wildcard-top-level-params]] +==== Top-level parameters for `wildcard` +``:: +Field you wish to search. -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "wildcard" : { "user" : { "wildcard" : "ki*y", "boost" : 2.0 } } - } -} --------------------------------------------------- -// CONSOLE +[[wildcard-query-field-params]] +==== Parameters for `` +`value`:: +Wildcard pattern for terms you wish to find in the provided ``. ++ +-- +This parameter supports two wildcard operators: + +* `?`, which matches any single character +* `*`, which can match zero or more characters, including an empty one + +WARNING: Avoid beginning patterns with `*` or `?`. This can increase +the iterations needed to find matching terms and slow search performance. +-- + +`boost`:: +Floating point number used to decrease or increase the +<> of a query. Default is `1.0`. +Optional. ++ +You can use the `boost` parameter to adjust relevance scores for searches +containing two or more queries. ++ +Boost values are relative to the default value of `1.0`. A boost value between +`0` and `1.0` decreases the relevance score. A value greater than `1.0` +increases the relevance score. -This multi term query allows to control how it gets rewritten using the -<> -parameter. +`rewrite` (Expert):: +Method used to rewrite the query. For valid values and more information, see the +<>. Optional. \ No newline at end of file From 588228816afd6881d5c5e86e1db3753d19310981 Mon Sep 17 00:00:00 2001 From: Peter Dyson Date: Tue, 21 May 2019 09:43:01 +1000 Subject: [PATCH 190/224] [DOCS] path_hierarchy tokenizer examples (#39630) Closes #17138 --- docs/reference/analysis/tokenizers.asciidoc | 4 + .../pathhierarchy-tokenizer-examples.asciidoc | 193 ++++++++++++++++++ .../pathhierarchy-tokenizer.asciidoc | 4 + 3 files changed, 201 insertions(+) create mode 100644 docs/reference/analysis/tokenizers/pathhierarchy-tokenizer-examples.asciidoc diff --git a/docs/reference/analysis/tokenizers.asciidoc b/docs/reference/analysis/tokenizers.asciidoc index d6f15ded05fab..628afebfdcbb8 100644 --- a/docs/reference/analysis/tokenizers.asciidoc +++ b/docs/reference/analysis/tokenizers.asciidoc @@ -155,3 +155,7 @@ include::tokenizers/simplepattern-tokenizer.asciidoc[] include::tokenizers/simplepatternsplit-tokenizer.asciidoc[] include::tokenizers/pathhierarchy-tokenizer.asciidoc[] + +include::tokenizers/pathhierarchy-tokenizer-examples.asciidoc[] + + diff --git a/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer-examples.asciidoc b/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer-examples.asciidoc new file mode 100644 index 0000000000000..c93d918822168 --- /dev/null +++ b/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer-examples.asciidoc @@ -0,0 +1,193 @@ +[[analysis-pathhierarchy-tokenizer-examples]] +=== Path Hierarchy Tokenizer Examples + +A common use-case for the `path_hierarchy` tokenizer is filtering results by +file paths. If indexing a file path along with the data, the use of the +`path_hierarchy` tokenizer to analyze the path allows filtering the results +by different parts of the file path string. + + +This example configures an index to have two custom analyzers and applies +those analyzers to multifields of the `file_path` text field that will +store filenames. One of the two analyzers uses reverse tokenization. +Some sample documents are then indexed to represent some file paths +for photos inside photo folders of two different users. + + +[source,js] +-------------------------------------------------- +PUT file-path-test +{ + "settings": { + "analysis": { + "analyzer": { + "custom_path_tree": { + "tokenizer": "custom_hierarchy" + }, + "custom_path_tree_reversed": { + "tokenizer": "custom_hierarchy_reversed" + } + }, + "tokenizer": { + "custom_hierarchy": { + "type": "path_hierarchy", + "delimiter": "/" + }, + "custom_hierarchy_reversed": { + "type": "path_hierarchy", + "delimiter": "/", + "reverse": "true" + } + } + } + }, + "mappings": { + "_doc": { + "properties": { + "file_path": { + "type": "text", + "fields": { + "tree": { + "type": "text", + "analyzer": "custom_path_tree" + }, + "tree_reversed": { + "type": "text", + "analyzer": "custom_path_tree_reversed" + } + } + } + } + } + } +} + +POST file-path-test/_doc/1 +{ + "file_path": "/User/alice/photos/2017/05/16/my_photo1.jpg" +} + +POST file-path-test/_doc/2 +{ + "file_path": "/User/alice/photos/2017/05/16/my_photo2.jpg" +} + +POST file-path-test/_doc/3 +{ + "file_path": "/User/alice/photos/2017/05/16/my_photo3.jpg" +} + +POST file-path-test/_doc/4 +{ + "file_path": "/User/alice/photos/2017/05/15/my_photo1.jpg" +} + +POST file-path-test/_doc/5 +{ + "file_path": "/User/bob/photos/2017/05/16/my_photo1.jpg" +} +-------------------------------------------------- +// CONSOLE +// TESTSETUP + + +A search for a particular file path string against the text field matches all +the example documents, with Bob's documents ranking highest due to `bob` also +being one of the terms created by the standard analyzer boosting relevance for +Bob's documents. + +[source,js] +-------------------------------------------------- +GET file-path-test/_search +{ + "query": { + "match": { + "file_path": "/User/bob/photos/2017/05" + } + } +} +-------------------------------------------------- +// CONSOLE + + +It's simple to match or filter documents with file paths that exist within a +particular directory using the `file_path.tree` field. + +[source,js] +-------------------------------------------------- +GET file-path-test/_search +{ + "query": { + "term": { + "file_path.tree": "/User/alice/photos/2017/05/16" + } + } +} +-------------------------------------------------- +// CONSOLE + +With the reverse parameter for this tokenizer, it's also possible to match +from the other end of the file path, such as individual file names or a deep +level subdirectory. The following example shows a search for all files named +`my_photo1.jpg` within any directory via the `file_path.tree_reversed` field +configured to use the reverse parameter in the mapping. + + +[source,js] +-------------------------------------------------- +GET file-path-test/_search +{ + "query": { + "term": { + "file_path.tree_reversed": { + "value": "my_photo1.jpg" + } + } + } +} +-------------------------------------------------- +// CONSOLE + + +Viewing the tokens generated with both forward and reverse is instructive +in showing the tokens created for the same file path value. + + +[source,js] +-------------------------------------------------- +POST file-path-test/_analyze +{ + "analyzer": "custom_path_tree", + "text": "/User/alice/photos/2017/05/16/my_photo1.jpg" +} + +POST file-path-test/_analyze +{ + "analyzer": "custom_path_tree_reversed", + "text": "/User/alice/photos/2017/05/16/my_photo1.jpg" +} +-------------------------------------------------- +// CONSOLE + + +It's also useful to be able to filter with file paths when combined with other +types of searches, such as this example looking for any files paths with `16` +that also must be in Alice's photo directory. + +[source,js] +-------------------------------------------------- +GET file-path-test/_search +{ + "query": { + "bool" : { + "must" : { + "match" : { "file_path" : "16" } + }, + "filter": { + "term" : { "file_path.tree" : "/User/alice" } + } + } + } +} +-------------------------------------------------- +// CONSOLE diff --git a/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc index 55aa7d66da343..8d425197a2a6d 100644 --- a/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc @@ -170,3 +170,7 @@ If we were to set `reverse` to `true`, it would produce the following: --------------------------- [ one/two/three/, two/three/, three/ ] --------------------------- + +[float] +=== Detailed Examples +See <>. From 6f12eb168f390f48ea41df60ff2763149d3ea7e4 Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Thu, 30 May 2019 10:06:38 -0400 Subject: [PATCH 191/224] Fix error with mapping in docs --- .../pathhierarchy-tokenizer-examples.asciidoc | 24 +++++++++---------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer-examples.asciidoc b/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer-examples.asciidoc index c93d918822168..ee02d66e4034b 100644 --- a/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer-examples.asciidoc +++ b/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer-examples.asciidoc @@ -42,19 +42,17 @@ PUT file-path-test } }, "mappings": { - "_doc": { - "properties": { - "file_path": { - "type": "text", - "fields": { - "tree": { - "type": "text", - "analyzer": "custom_path_tree" - }, - "tree_reversed": { - "type": "text", - "analyzer": "custom_path_tree_reversed" - } + "properties": { + "file_path": { + "type": "text", + "fields": { + "tree": { + "type": "text", + "analyzer": "custom_path_tree" + }, + "tree_reversed": { + "type": "text", + "analyzer": "custom_path_tree_reversed" } } } From 1991ee875c53b0d5616772c9aa3d5630494d097f Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Thu, 30 May 2019 17:33:06 +0300 Subject: [PATCH 192/224] Fix refresh remote JWKS logic (#42662) This change ensures that: - We only attempt to refresh the remote JWKS when there is a signature related error only ( BadJWSException instead of the geric BadJOSEException ) - We do call OpenIDConnectAuthenticator#getUserClaims upon successful refresh. - We test this in OpenIdConnectAuthenticatorTests. Without this fix, when using the OpenID Connect realm with a remote JWKSet configured in `op.jwks_path`, the refresh would be triggered for most configuration errors ( i.e. wrong value for `op.issuer` ) and the kibana wouldn't get a response and timeout since `getUserClaims` wouldn't be called because `ReloadableJWKSource#reloadAsync` wouldn't call `onResponse` on the future. --- .../authc/oidc/OpenIdConnectAuthenticator.java | 6 ++++-- .../oidc/OpenIdConnectAuthenticatorTests.java | 15 +++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java index c652a39b90912..6de933804f307 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java @@ -12,6 +12,7 @@ import com.nimbusds.jose.jwk.JWKSet; import com.nimbusds.jose.jwk.source.JWKSource; import com.nimbusds.jose.proc.BadJOSEException; +import com.nimbusds.jose.proc.BadJWSException; import com.nimbusds.jose.proc.JWSVerificationKeySelector; import com.nimbusds.jose.proc.SecurityContext; import com.nimbusds.jose.util.IOUtils; @@ -240,7 +241,7 @@ private void getUserClaims(@Nullable AccessToken accessToken, JWT idToken, Nonce } claimsListener.onResponse(enrichedVerifiedIdTokenClaims); } - } catch (BadJOSEException e) { + } catch (BadJWSException e) { // We only try to update the cached JWK set once if a remote source is used and // RSA or ECDSA is used for signatures if (shouldRetry @@ -256,7 +257,7 @@ private void getUserClaims(@Nullable AccessToken accessToken, JWT idToken, Nonce } else { claimsListener.onFailure(new ElasticsearchSecurityException("Failed to parse or validate the ID Token", e)); } - } catch (com.nimbusds.oauth2.sdk.ParseException | ParseException | JOSEException e) { + } catch (com.nimbusds.oauth2.sdk.ParseException | ParseException | BadJOSEException | JOSEException e) { claimsListener.onFailure(new ElasticsearchSecurityException("Failed to parse or validate the ID Token", e)); } } @@ -777,6 +778,7 @@ public void completed(HttpResponse result) { StandardCharsets.UTF_8)); reloadFutureRef.set(null); LOGGER.trace("Successfully refreshed and cached remote JWKSet"); + future.onResponse(null); } catch (IOException | ParseException e) { failed(e); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java index 43b58b8d4b521..7a2fa9af03940 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java @@ -88,6 +88,7 @@ public class OpenIdConnectAuthenticatorTests extends OpenIdConnectTestCase { private Settings globalSettings; private Environment env; private ThreadContext threadContext; + private int callsToReloadJwk; @Before public void setup() { @@ -95,6 +96,7 @@ public void setup() { .put("xpack.security.authc.realms.oidc.oidc-realm.ssl.verification_mode", "certificate").build(); env = TestEnvironment.newEnvironment(globalSettings); threadContext = new ThreadContext(globalSettings); + callsToReloadJwk = 0; } @After @@ -278,6 +280,7 @@ public void testClockSkewIsHonored() throws Exception { authenticator.authenticate(token, future); JWTClaimsSet claimsSet = future.actionGet(); assertThat(claimsSet.getSubject(), equalTo(subject)); + assertThat(callsToReloadJwk, equalTo(0)); } public void testImplicitFlowFailsWithExpiredToken() throws Exception { @@ -317,6 +320,7 @@ public void testImplicitFlowFailsWithExpiredToken() throws Exception { assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token")); assertThat(e.getCause(), instanceOf(BadJWTException.class)); assertThat(e.getCause().getMessage(), containsString("Expired JWT")); + assertThat(callsToReloadJwk, equalTo(0)); } public void testImplicitFlowFailsNotYetIssuedToken() throws Exception { @@ -356,6 +360,7 @@ public void testImplicitFlowFailsNotYetIssuedToken() throws Exception { assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token")); assertThat(e.getCause(), instanceOf(BadJWTException.class)); assertThat(e.getCause().getMessage(), containsString("JWT issue time ahead of current time")); + assertThat(callsToReloadJwk, equalTo(0)); } public void testImplicitFlowFailsInvalidIssuer() throws Exception { @@ -394,6 +399,7 @@ public void testImplicitFlowFailsInvalidIssuer() throws Exception { assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token")); assertThat(e.getCause(), instanceOf(BadJWTException.class)); assertThat(e.getCause().getMessage(), containsString("Unexpected JWT issuer")); + assertThat(callsToReloadJwk, equalTo(0)); } public void testImplicitFlowFailsInvalidAudience() throws Exception { @@ -432,6 +438,7 @@ public void testImplicitFlowFailsInvalidAudience() throws Exception { assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token")); assertThat(e.getCause(), instanceOf(BadJWTException.class)); assertThat(e.getCause().getMessage(), containsString("Unexpected JWT audience")); + assertThat(callsToReloadJwk, equalTo(0)); } public void testAuthenticateImplicitFlowFailsWithForgedRsaIdToken() throws Exception { @@ -456,6 +463,7 @@ public void testAuthenticateImplicitFlowFailsWithForgedRsaIdToken() throws Excep assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token")); assertThat(e.getCause(), instanceOf(BadJWSException.class)); assertThat(e.getCause().getMessage(), containsString("Signed JWT rejected: Invalid signature")); + assertThat(callsToReloadJwk, equalTo(1)); } public void testAuthenticateImplicitFlowFailsWithForgedEcsdsaIdToken() throws Exception { @@ -480,6 +488,7 @@ public void testAuthenticateImplicitFlowFailsWithForgedEcsdsaIdToken() throws Ex assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token")); assertThat(e.getCause(), instanceOf(BadJWSException.class)); assertThat(e.getCause().getMessage(), containsString("Signed JWT rejected: Invalid signature")); + assertThat(callsToReloadJwk, equalTo(1)); } public void testAuthenticateImplicitFlowFailsWithForgedHmacIdToken() throws Exception { @@ -503,6 +512,7 @@ public void testAuthenticateImplicitFlowFailsWithForgedHmacIdToken() throws Exce assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token")); assertThat(e.getCause(), instanceOf(BadJWSException.class)); assertThat(e.getCause().getMessage(), containsString("Signed JWT rejected: Invalid signature")); + assertThat(callsToReloadJwk, equalTo(0)); } public void testAuthenticateImplicitFlowFailsWithForgedAccessToken() throws Exception { @@ -532,6 +542,7 @@ public void testAuthenticateImplicitFlowFailsWithForgedAccessToken() throws Exce assertThat(e.getMessage(), containsString("Failed to verify access token")); assertThat(e.getCause(), instanceOf(InvalidHashException.class)); assertThat(e.getCause().getMessage(), containsString("Access token hash (at_hash) mismatch")); + assertThat(callsToReloadJwk, equalTo(0)); } public void testImplicitFlowFailsWithNoneAlgorithm() throws Exception { @@ -569,6 +580,7 @@ public void testImplicitFlowFailsWithNoneAlgorithm() throws Exception { assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token")); assertThat(e.getCause(), instanceOf(BadJOSEException.class)); assertThat(e.getCause().getMessage(), containsString("Another algorithm expected, or no matching key(s) found")); + assertThat(callsToReloadJwk, equalTo(0)); } /** @@ -599,6 +611,7 @@ public void testImplicitFlowFailsWithAlgorithmMixupAttack() throws Exception { assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token")); assertThat(e.getCause(), instanceOf(BadJOSEException.class)); assertThat(e.getCause().getMessage(), containsString("Another algorithm expected, or no matching key(s) found")); + assertThat(callsToReloadJwk, equalTo(0)); } public void testImplicitFlowFailsWithUnsignedJwt() throws Exception { @@ -635,6 +648,7 @@ public void testImplicitFlowFailsWithUnsignedJwt() throws Exception { assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token")); assertThat(e.getCause(), instanceOf(BadJWTException.class)); assertThat(e.getCause().getMessage(), containsString("Signed ID token expected")); + assertThat(callsToReloadJwk, equalTo(0)); } public void testJsonObjectMerging() throws Exception { @@ -832,6 +846,7 @@ private OpenIdConnectAuthenticator.ReloadableJWKSource mockSource(JWK jwk) { Mockito.doAnswer(invocation -> { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[0]; + callsToReloadJwk += 1; listener.onResponse(null); return null; }).when(jwkSource).triggerReload(any(ActionListener.class)); From c76ab9c103171cafeac5eab39d631bffe7ae01f1 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Thu, 30 May 2019 09:48:29 -0500 Subject: [PATCH 193/224] [ML] [Data Frame] add support for weighted_avg agg (#42646) --- .../integration/DataFramePivotRestIT.java | 39 +++++++++++++++++++ .../transforms/pivot/Aggregations.java | 1 + .../transforms/pivot/SchemaUtil.java | 3 +- .../transforms/pivot/AggregationsTests.java | 4 ++ .../transforms/pivot/PivotTests.java | 10 +++++ 5 files changed, 56 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index a0bec6ec13c34..3c661a0f4aca4 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -473,6 +473,45 @@ public void testPivotWithGeoCentroidAgg() throws Exception { assertEquals((4 + 15), Double.valueOf(latlon[1]), 0.000001); } + public void testPivotWithWeightedAvgAgg() throws Exception { + String transformId = "weightedAvgAggTransform"; + String dataFrameIndex = "weighted_avg_pivot_reviews"; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); + + final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, + BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + + String config = "{" + + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," + + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; + + config += " \"pivot\": {" + + " \"group_by\": {" + + " \"reviewer\": {" + + " \"terms\": {" + + " \"field\": \"user_id\"" + + " } } }," + + " \"aggregations\": {" + + " \"avg_rating\": {" + + " \"weighted_avg\": {" + + " \"value\": {\"field\": \"stars\"}," + + " \"weight\": {\"field\": \"stars\"}" + + "} } } }" + + "}"; + + createDataframeTransformRequest.setJsonEntity(config); + Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); + assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + + startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + assertTrue(indexExists(dataFrameIndex)); + + Map searchResult = getAsMap(dataFrameIndex + "/_search?q=reviewer:user_4"); + assertEquals(1, XContentMapValues.extractValue("hits.total.value", searchResult)); + Number actual = (Number) ((List) XContentMapValues.extractValue("hits.hits._source.avg_rating", searchResult)).get(0); + assertEquals(4.47169811, actual.doubleValue(), 0.000001); + } + private void assertOnePivotValue(String query, double expected) throws IOException { Map searchResult = getAsMap(query); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java index 615c9b2e8d2e6..4e74f9085e3a6 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java @@ -37,6 +37,7 @@ enum AggregationType { SUM("sum", SOURCE), GEO_CENTROID("geo_centroid", "geo_point"), SCRIPTED_METRIC("scripted_metric", DYNAMIC), + WEIGHTED_AVG("weighted_avg", DYNAMIC), BUCKET_SCRIPT("bucket_script", DYNAMIC); private final String aggregationType; diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java index 304f35b8c4c8a..4ac77c38f7d5f 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java @@ -17,6 +17,7 @@ import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.ScriptedMetricAggregationBuilder; +import org.elasticsearch.search.aggregations.support.MultiValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfig; @@ -77,7 +78,7 @@ public static void deduceMappings(final Client client, ValuesSourceAggregationBuilder valueSourceAggregation = (ValuesSourceAggregationBuilder) agg; aggregationSourceFieldNames.put(valueSourceAggregation.getName(), valueSourceAggregation.field()); aggregationTypes.put(valueSourceAggregation.getName(), valueSourceAggregation.getType()); - } else if(agg instanceof ScriptedMetricAggregationBuilder) { + } else if(agg instanceof ScriptedMetricAggregationBuilder || agg instanceof MultiValuesSourceAggregationBuilder) { aggregationTypes.put(agg.getName(), agg.getType()); } else { // execution should not reach this point diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java index 8443699430a2a..ace42cb65fcaf 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java @@ -49,5 +49,9 @@ public void testResolveTargetMapping() { // bucket_script assertEquals("_dynamic", Aggregations.resolveTargetMapping("bucket_script", null)); assertEquals("_dynamic", Aggregations.resolveTargetMapping("bucket_script", "int")); + + // weighted_avg + assertEquals("_dynamic", Aggregations.resolveTargetMapping("weighted_avg", null)); + assertEquals("_dynamic", Aggregations.resolveTargetMapping("weighted_avg", "double")); } } diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java index 20ea84502ed82..d54cbad97f726 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java @@ -215,6 +215,16 @@ private AggregationConfig getAggregationConfig(String agg) throws IOException { "\"buckets_path\":{\"param_1\":\"other_bucket\"}," + "\"script\":\"return params.param_1\"}}}"); } + if (agg.equals(AggregationType.WEIGHTED_AVG.getName())) { + return parseAggregations("{\n" + + "\"pivot_weighted_avg\": {\n" + + " \"weighted_avg\": {\n" + + " \"value\": {\"field\": \"values\"},\n" + + " \"weight\": {\"field\": \"weights\"}\n" + + " }\n" + + "}\n" + + "}"); + } return parseAggregations("{\n" + " \"pivot_" + agg + "\": {\n" + " \"" + agg + "\": {\n" + " \"field\": \"values\"\n" + " }\n" + " }" + "}"); } From 8322fb603a1e8b0f301bdfe57dd7547c019119ba Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 30 May 2019 08:37:58 -0700 Subject: [PATCH 194/224] Remove unused Gradle plugin (#42684) --- .../gradle/test/MessyTestPlugin.groovy | 63 ------------------- .../elasticsearch.messy-test.properties | 20 ------ 2 files changed, 83 deletions(-) delete mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/test/MessyTestPlugin.groovy delete mode 100644 buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.messy-test.properties diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/MessyTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/MessyTestPlugin.groovy deleted file mode 100644 index 1c0aec1bc00f3..0000000000000 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/MessyTestPlugin.groovy +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.gradle.test - -import org.elasticsearch.gradle.plugin.PluginBuildPlugin -import org.gradle.api.Project -import org.gradle.api.artifacts.Dependency -import org.gradle.api.artifacts.ProjectDependency -import org.gradle.api.tasks.Copy - -/** - * A plugin to run messy tests, which are generally tests that depend on plugins. - * - * This plugin will add the same test configuration as standalone tests, except - * also add the plugin-metadata and properties files for each plugin project - * dependency. - */ -class MessyTestPlugin extends StandaloneTestPlugin { - @Override - public void apply(Project project) { - super.apply(project) - - project.configurations.testCompile.dependencies.all { Dependency dep -> - // this closure is run every time a compile dependency is added - if (dep instanceof ProjectDependency && dep.dependencyProject.plugins.hasPlugin(PluginBuildPlugin)) { - project.gradle.projectsEvaluated { - addPluginResources(project, dep.dependencyProject) - } - } - } - } - - private static addPluginResources(Project project, Project pluginProject) { - String outputDir = "${project.buildDir}/generated-resources/${pluginProject.name}" - String taskName = ClusterFormationTasks.pluginTaskName("copy", pluginProject.name, "Metadata") - Copy copyPluginMetadata = project.tasks.create(taskName, Copy.class) - copyPluginMetadata.into(outputDir) - copyPluginMetadata.from(pluginProject.tasks.pluginProperties) - copyPluginMetadata.from(pluginProject.file('src/main/plugin-metadata')) - project.sourceSets.test.output.dir(outputDir, builtBy: taskName) - - // add each generated dir to the test classpath in IDEs - project.idea.module.singleEntryLibraries= ['TEST': [project.file(outputDir)]] - // Eclipse doesn't need this because it gets the entire module as a dependency - } -} diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.messy-test.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.messy-test.properties deleted file mode 100644 index 507a0f85a0468..0000000000000 --- a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.messy-test.properties +++ /dev/null @@ -1,20 +0,0 @@ -# -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -implementation-class=org.elasticsearch.gradle.test.MessyTestPlugin From ee828a3aaa7e385665c6103d2a9f26f7821492ad Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 30 May 2019 08:38:17 -0700 Subject: [PATCH 195/224] Remove usage of deprecated compare gradle builds plugin (#42687) * Remove usage of deprecated compare gradle builds plugin * Remove system property only used by build comparison --- build.gradle | 25 ------------------- .../elasticsearch/gradle/BuildPlugin.groovy | 4 --- 2 files changed, 29 deletions(-) diff --git a/build.gradle b/build.gradle index 1de3f919d9c49..c91c1554bd405 100644 --- a/build.gradle +++ b/build.gradle @@ -552,31 +552,6 @@ gradle.projectsEvaluated { } } -if (System.properties.get("build.compare") != null) { - apply plugin: 'compare-gradle-builds' - compareGradleBuilds { - ext.referenceProject = System.properties.get("build.compare") - doFirst { - if (file(referenceProject).exists() == false) { - throw new GradleException( - "Use git worktree to check out a version to compare against to ../elasticsearch_build_reference" - ) - } - } - sourceBuild { - gradleVersion = gradle.getGradleVersion() - projectDir = referenceProject - tasks = ["clean", "assemble"] - arguments = ["-Dbuild.compare_friendly=true"] - } - targetBuild { - tasks = ["clean", "assemble"] - // use -Dorg.gradle.java.home= to alter jdk versions - arguments = ["-Dbuild.compare_friendly=true"] - } - } -} - allprojects { task resolveAllDependencies { dependsOn tasks.matching { it.name == "pullFixture"} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 92d11a8477436..fffbcd273dcb7 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -716,10 +716,6 @@ class BuildPlugin implements Plugin { jarTask.manifest.attributes('Change': shortHash) } } - // Force manifest entries that change by nature to a constant to be able to compare builds more effectively - if (System.properties.getProperty("build.compare_friendly", "false") == "true") { - jarTask.manifest.getAttributes().clear() - } } // add license/notice files From a66d7e8cf85ff7525c795605b1803b0f07ec0c7e Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 30 May 2019 16:54:01 +0100 Subject: [PATCH 196/224] Prevent merging nodes' data paths (#42665) Today Elasticsearch does not prevent you from reconfiguring a node's `path.data` to point to data paths that previously belonged to more than one node. There's no good reason to be able to do this, and the consequences can be quietly disastrous. Furthermore, #42489 might result in a user trying to split up a previously-shared collection of data paths by hand and there's definitely scope for mixing the paths up across nodes when doing this. This change adds a check during startup to ensure that each data path belongs to the same node. --- .../elasticsearch/env/NodeEnvironment.java | 16 ++++++++++ .../elasticsearch/env/NodeEnvironmentIT.java | 32 ++++++++++++++++++- 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 75f39e70cfc7b..160662a63e5b3 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -72,6 +72,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -494,10 +495,25 @@ private void maybeLogHeapDetails() { private static NodeMetaData loadOrCreateNodeMetaData(Settings settings, Logger logger, NodePath... nodePaths) throws IOException { final Path[] paths = Arrays.stream(nodePaths).map(np -> np.path).toArray(Path[]::new); + + final Set nodeIds = new HashSet<>(); + for (final Path path : paths) { + final NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path); + if (metaData != null) { + nodeIds.add(metaData.nodeId()); + } + } + if (nodeIds.size() > 1) { + throw new IllegalStateException( + "data paths " + Arrays.toString(paths) + " belong to multiple nodes with IDs " + nodeIds); + } + NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, paths); if (metaData == null) { + assert nodeIds.isEmpty() : nodeIds; metaData = new NodeMetaData(generateNodeId(settings), Version.CURRENT); } else { + assert nodeIds.equals(Collections.singleton(metaData.nodeId())) : nodeIds + " doesn't match " + metaData; metaData = metaData.upgradeToCurrentVersion(); } diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java index 4d1848428e5a7..b65e40ec43e24 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -34,6 +34,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; +import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; @@ -46,7 +47,7 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class NodeEnvironmentIT extends ESIntegTestCase { - public void testStartFailureOnDataForNonDataNode() throws Exception { + public void testStartFailureOnDataForNonDataNode() { final String indexName = "test-fail-on-data"; logger.info("--> starting one node"); @@ -208,4 +209,33 @@ public void testUpgradeDataFolder() throws IOException, InterruptedException { ensureYellow("test"); assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L); } + + public void testFailsToStartOnDataPathsFromMultipleNodes() throws IOException { + final List nodes = internalCluster().startNodes(2); + ensureStableCluster(2); + + final List node0DataPaths = Environment.PATH_DATA_SETTING.get(internalCluster().dataPathSettings(nodes.get(0))); + final List node1DataPaths = Environment.PATH_DATA_SETTING.get(internalCluster().dataPathSettings(nodes.get(1))); + + final List allDataPaths = new ArrayList<>(node0DataPaths); + allDataPaths.addAll(node1DataPaths); + + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(1))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(0))); + + final IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, + () -> internalCluster().startNode(Settings.builder().putList(Environment.PATH_DATA_SETTING.getKey(), allDataPaths))); + + assertThat(illegalStateException.getMessage(), containsString("belong to multiple nodes with IDs")); + + final List node0DataPathsPlusOne = new ArrayList<>(node0DataPaths); + node0DataPathsPlusOne.add(createTempDir().toString()); + internalCluster().startNode(Settings.builder().putList(Environment.PATH_DATA_SETTING.getKey(), node0DataPathsPlusOne)); + + final List node1DataPathsPlusOne = new ArrayList<>(node1DataPaths); + node1DataPathsPlusOne.add(createTempDir().toString()); + internalCluster().startNode(Settings.builder().putList(Environment.PATH_DATA_SETTING.getKey(), node1DataPathsPlusOne)); + + ensureStableCluster(2); + } } From 2bc436309d47391daabe3d131ff7572618ba33aa Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Thu, 30 May 2019 09:23:38 -0700 Subject: [PATCH 197/224] Clarify the settings around limiting nested mappings. (#42686) * Previously, we mentioned multiple times that each nested object was indexed as its own document. This is repetitive, and is also a bit confusing in the context of `index.mapping.nested_fields.limit`, as that applies to the number of distinct `nested` types in the mappings, not the number of nested objects. We now just describe the issue once at the beginning of the section, to illustrate why `nested` types can be expensive. * Reference the ongoing example to clarify the meaning of the two settings. Addresses #28363. --- docs/reference/mapping.asciidoc | 10 ++--- docs/reference/mapping/types/nested.asciidoc | 42 +++++++++++++------- 2 files changed, 30 insertions(+), 22 deletions(-) diff --git a/docs/reference/mapping.asciidoc b/docs/reference/mapping.asciidoc index 2e09a0a8ca24a..d0a3c6e06cd66 100644 --- a/docs/reference/mapping.asciidoc +++ b/docs/reference/mapping.asciidoc @@ -87,15 +87,11 @@ causing a mapping explosion: `2`, etc. The default is `20`. `index.mapping.nested_fields.limit`:: - The maximum number of `nested` fields in an index, defaults to `50`. - Indexing 1 document with 100 nested fields actually indexes 101 documents - as each nested document is indexed as a separate hidden document. + The maximum number of distinct `nested` mappings in an index, defaults to `50`. `index.mapping.nested_objects.limit`:: - The maximum number of `nested` json objects within a single document across - all nested fields, defaults to 10000. Indexing one document with an array of - 100 objects within a nested field, will actually create 101 documents, as - each nested object will be indexed as a separate hidden document. + The maximum number of `nested` JSON objects within a single document across + all nested types, defaults to 10000. `index.mapping.field_name_length.limit`:: Setting for the maximum length of a field name. The default value is diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc index fe150a69b4900..de0f3f2a5f1cd 100644 --- a/docs/reference/mapping/types/nested.asciidoc +++ b/docs/reference/mapping/types/nested.asciidoc @@ -193,20 +193,32 @@ phase. Instead, highlighting needs to be performed via ============================================= -[[limit-number-nested-fields]] -==== Limiting the number of `nested` fields - -Indexing a document with 100 nested fields actually indexes 101 documents as each nested -document is indexed as a separate document. To safeguard against ill-defined mappings -the number of nested fields that can be defined per index has been limited to 50. See -<>. - -[[limit-nested-json-objects-number]] -==== Limiting the number of `nested` json objects -Indexing a document with an array of 100 objects within a nested field, will actually -create 101 documents, as each nested object will be indexed as a separate document. -To prevent out of memory errors when a single document contains too many nested json -objects, the number of nested json objects that a single document may contain across all fields -has been limited to 10000. See <>. +[float] +=== Limits on `nested` mappings and objects + +As described earlier, each nested object is indexed as a separate document under the hood. +Continuing with the example above, if we indexed a single document containing 100 `user` objects, +then 101 Lucene documents would be created -- one for the parent document, and one for each +nested object. Because of the expense associated with `nested` mappings, Elasticsearch puts +settings in place to guard against performance problems: + +`index.mapping.nested_fields.limit`:: + + The `nested` type should only be used in special cases, when arrays of objects need to be + queried independently of each other. To safeguard against poorly designed mappings, this setting + limits the number of unique `nested` types per index. In our example, the `user` mapping would + count as only 1 towards this limit. Defaults to 50. + +`index.mapping.nested_objects.limit`:: + + This setting limits the number of nested objects that a single document may contain across all + `nested` types, in order to prevent out of memory errors when a document contains too many nested + objects. To illustrate how the setting works, say we added another `nested` type called `comments` + to our example mapping above. Then for each document, the combined number of `user` and `comment` + objects it contains must be below the limit. Defaults to 10000. + +Additional background on these settings, including information on their default values, can be found +in <>. + From 8d0bae67921b50ad3f5b4069d4b4d6b06824fbdd Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Thu, 30 May 2019 10:27:17 -0600 Subject: [PATCH 198/224] Make hashed token ids url safe (#42651) This commit changes the way token ids are hashed so that the output is url safe without requiring encoding. This follows the pattern that we use for document ids that are autogenerated, see UUIDs and the associated classes for additional details. --- .../xpack/core/security/authc/support/Hasher.java | 4 ++-- .../xpack/security/authc/TokenService.java | 2 +- .../xpack/security/authc/TokenServiceTests.java | 12 +++++++++--- .../upgrades/TokenBackwardsCompatibilityIT.java | 2 ++ .../test/mixed_cluster/50_token_auth.yml | 6 ++++++ .../test/upgraded_cluster/50_token_auth.yml | 4 ++++ 6 files changed, 24 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java index 5413a38bd6288..1b5b65e60c258 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java @@ -360,14 +360,14 @@ public boolean verify(SecureString text, char[] hash) { public char[] hash(SecureString text) { MessageDigest md = MessageDigests.sha256(); md.update(CharArrays.toUtf8Bytes(text.getChars())); - return Base64.getEncoder().encodeToString(md.digest()).toCharArray(); + return Base64.getUrlEncoder().withoutPadding().encodeToString(md.digest()).toCharArray(); } @Override public boolean verify(SecureString text, char[] hash) { MessageDigest md = MessageDigests.sha256(); md.update(CharArrays.toUtf8Bytes(text.getChars())); - return CharArrays.constantTimeEquals(Base64.getEncoder().encodeToString(md.digest()).toCharArray(), hash); + return CharArrays.constantTimeEquals(Base64.getUrlEncoder().withoutPadding().encodeToString(md.digest()).toCharArray(), hash); } }, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index a8f68870556e6..6472220482966 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -181,7 +181,7 @@ public final class TokenService { TimeValue.MINUS_ONE, Property.NodeScope); static final String TOKEN_DOC_TYPE = "token"; - private static final int HASHED_TOKEN_LENGTH = 44; + private static final int HASHED_TOKEN_LENGTH = 43; // UUIDs are 16 bytes encoded base64 without padding, therefore the length is (16 / 3) * 4 + ((16 % 3) * 8 + 5) / 6 chars private static final int TOKEN_LENGTH = 22; private static final String TOKEN_DOC_ID_PREFIX = TOKEN_DOC_TYPE + "_"; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 4bc1efe772949..47e5b734c8042 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -53,14 +53,17 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; import org.elasticsearch.xpack.security.support.SecurityIndexManager; -import org.junit.After; import org.elasticsearch.xpack.security.test.SecurityMocks; import org.hamcrest.Matchers; +import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import javax.crypto.SecretKey; import java.io.IOException; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.security.GeneralSecurityException; import java.time.Clock; import java.time.Instant; @@ -70,8 +73,6 @@ import java.util.HashMap; import java.util.Map; -import javax.crypto.SecretKey; - import static java.time.Clock.systemUTC; import static org.elasticsearch.repositories.ESBlobStoreTestCase.randomBytes; import static org.elasticsearch.test.ClusterServiceUtils.setState; @@ -721,6 +722,11 @@ public void testCannotValidateTokenIfLicenseDoesNotAllowTokens() throws Exceptio assertThat(authToken, Matchers.nullValue()); } + public void testHashedTokenIsUrlSafe() { + final String hashedId = TokenService.hashTokenString(UUIDs.randomBase64UUID()); + assertEquals(hashedId, URLEncoder.encode(hashedId, StandardCharsets.UTF_8)); + } + private TokenService createTokenService(Settings settings, Clock clock) throws GeneralSecurityException { return new TokenService(settings, clock, client, licenseState, securityMainIndex, securityTokensIndex, clusterService); } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java index 69c515d80a3d2..4a0639050d522 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java @@ -7,6 +7,7 @@ import org.apache.http.HttpHeaders; import org.apache.http.HttpHost; +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -25,6 +26,7 @@ import java.util.List; import java.util.Map; +@AwaitsFix(bugUrl = "need to backport #42651") public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase { private Collection twoClients = null; diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/50_token_auth.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/50_token_auth.yml index f426d9b2525b4..a34128579f3f8 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/50_token_auth.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/50_token_auth.yml @@ -2,6 +2,8 @@ "Get the indexed token and use if to authenticate": - skip: features: headers + version: " - 7.99.99" + reason: "Need to backport PR #42651" - do: cluster.health: @@ -59,6 +61,8 @@ "Get the indexed refreshed access token and use if to authenticate": - skip: features: headers + version: " - 7.99.99" + reason: "Need to backport PR #42651" - do: get: @@ -111,6 +115,8 @@ "Get the indexed refresh token and use it to get another access token and authenticate": - skip: features: headers + version: " - 7.99.99" + reason: "Need to backport PR #42651" - do: get: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/50_token_auth.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/50_token_auth.yml index 430f94c1064d6..64897707c15d3 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/50_token_auth.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/50_token_auth.yml @@ -2,6 +2,8 @@ "Get the indexed token and use if to authenticate": - skip: features: headers + version: " - 8.0.0" + reason: "Need to backport PR #42651" - do: cluster.health: @@ -49,6 +51,8 @@ "Get the indexed refresh token and use if to get another access token and authenticate": - skip: features: headers + version: " - 8.0.0" + reason: "Need to backport PR #42651" - do: get: From 31121507448e34ee54c8040d1206c46652de0bd5 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 30 May 2019 12:12:37 -0700 Subject: [PATCH 199/224] [DOCS] Disable Metricbeat system module (#42601) --- .../monitoring/configuring-metricbeat.asciidoc | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/docs/reference/monitoring/configuring-metricbeat.asciidoc b/docs/reference/monitoring/configuring-metricbeat.asciidoc index 265eba5d480ab..0a3dad3f3785d 100644 --- a/docs/reference/monitoring/configuring-metricbeat.asciidoc +++ b/docs/reference/monitoring/configuring-metricbeat.asciidoc @@ -146,6 +146,23 @@ file. // end::remote-monitoring-user[] -- +. Optional: Disable the system module in {metricbeat}. ++ +-- +// tag::disable-system-module[] +By default, the {metricbeat-ref}/metricbeat-module-system.html[system module] is +enabled. The information it collects, however, is not shown on the *Monitoring* +page in {kib}. Unless you want to use that information for other purposes, run +the following command: + +["source","sh",subs="attributes,callouts"] +---------------------------------------------------------------------- +metricbeat modules disable system +---------------------------------------------------------------------- + +// end::disable-system-module[] +-- + . Identify where to send the monitoring data. + + -- From e586a218133b6e63c0e6b9c61ea28e97af08bf4e Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Thu, 30 May 2019 13:38:38 -0600 Subject: [PATCH 200/224] Remove SecurityClient from x-pack (#42471) This commit removes the SecurityClient class from x-pack. This client class is a relic of the transport client, which is in the process of being removed. Some tests were changed to use the high level rest client and others use a client directly without the security client wrapping it. --- .../elasticsearch/client/RequestOptions.java | 9 +- .../elasticsearch/xpack/core/XPackClient.java | 7 - .../DeletePrivilegesRequestBuilder.java | 4 +- .../GetPrivilegesRequestBuilder.java | 4 +- .../PutPrivilegesRequestBuilder.java | 4 +- .../DeleteRoleMappingRequestBuilder.java | 5 +- .../GetRoleMappingsRequestBuilder.java | 4 +- .../PutRoleMappingRequestBuilder.java | 4 +- .../core/security/client/SecurityClient.java | 392 ------------- .../authc/esnative/NativeUsersStore.java | 12 +- .../mapper/NativeRoleMappingStore.java | 12 +- .../authz/store/NativePrivilegeStore.java | 10 +- .../authz/store/NativeRolesStore.java | 10 +- .../action/apikey/RestCreateApiKeyAction.java | 17 +- .../privilege/RestDeletePrivilegesAction.java | 32 +- .../privilege/RestGetPrivilegesAction.java | 48 +- .../privilege/RestPutPrivilegesAction.java | 7 +- .../realm/RestClearRealmCacheAction.java | 8 +- .../role/RestClearRolesCacheAction.java | 8 +- .../action/role/RestDeleteRoleAction.java | 23 +- .../rest/action/role/RestGetRolesAction.java | 6 +- .../rest/action/role/RestPutRoleAction.java | 9 +- .../RestDeleteRoleMappingAction.java | 21 +- .../RestGetRoleMappingsAction.java | 35 +- .../rolemapping/RestPutRoleMappingAction.java | 9 +- .../saml/RestSamlAuthenticateAction.java | 6 +- .../action/user/RestChangePasswordAction.java | 23 +- .../action/user/RestDeleteUserAction.java | 25 +- .../user/RestGetUserPrivilegesAction.java | 3 +- .../rest/action/user/RestGetUsersAction.java | 4 +- .../action/user/RestHasPrivilegesAction.java | 5 +- .../rest/action/user/RestPutUserAction.java | 9 +- .../action/user/RestSetEnabledAction.java | 18 +- .../integration/ClearRealmsCacheTests.java | 6 +- .../integration/ClearRolesCacheTests.java | 79 +-- .../MultipleIndicesPermissionsTests.java | 16 +- .../test/NativeRealmIntegTestCase.java | 11 +- .../test/SecurityIntegTestCase.java | 32 +- .../test/SecuritySettingsSource.java | 14 +- .../PutPrivilegesRequestBuilderTests.java | 8 +- .../PutRoleMappingRequestTests.java | 9 +- .../security/authc/ApiKeyIntegTests.java | 82 ++- .../security/authc/TokenAuthIntegTests.java | 522 +++++++----------- .../authc/esnative/NativeRealmIntegTests.java | 256 ++++----- .../esnative/ReservedRealmIntegTests.java | 42 +- .../security/authz/SecurityScrollTests.java | 9 +- .../authz/SnapshotUserRoleIntegTests.java | 42 +- .../SecurityIndexManagerIntegTests.java | 20 +- .../ldap/AbstractAdLdapRealmTestCase.java | 6 +- 49 files changed, 730 insertions(+), 1217 deletions(-) delete mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/client/SecurityClient.java diff --git a/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java b/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java index 0b2cdce3d52f7..3758b4c489ca2 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java @@ -170,10 +170,11 @@ public RequestOptions build() { /** * Add the provided header to the request. */ - public void addHeader(String name, String value) { + public Builder addHeader(String name, String value) { Objects.requireNonNull(name, "header name cannot be null"); Objects.requireNonNull(value, "header value cannot be null"); this.headers.add(new ReqHeader(name, value)); + return this; } /** @@ -182,9 +183,10 @@ public void addHeader(String name, String value) { * response body gets streamed from a non-blocking HTTP connection on the * client side. */ - public void setHttpAsyncResponseConsumerFactory(HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) { + public Builder setHttpAsyncResponseConsumerFactory(HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) { this.httpAsyncResponseConsumerFactory = Objects.requireNonNull(httpAsyncResponseConsumerFactory, "httpAsyncResponseConsumerFactory cannot be null"); + return this; } /** @@ -204,8 +206,9 @@ public void setHttpAsyncResponseConsumerFactory(HttpAsyncResponseConsumerFactory * fail the request if the warnings returned don't * exactly match some set. */ - public void setWarningsHandler(WarningsHandler warningsHandler) { + public Builder setWarningsHandler(WarningsHandler warningsHandler) { this.warningsHandler = warningsHandler; + return this; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java index e6cd2ed176c9c..acf97b63684ae 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java @@ -21,7 +21,6 @@ import org.elasticsearch.xpack.core.indexlifecycle.client.ILMClient; import org.elasticsearch.xpack.core.ml.client.MachineLearningClient; import org.elasticsearch.xpack.core.monitoring.client.MonitoringClient; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.core.watcher.client.WatcherClient; import java.util.Collections; @@ -39,7 +38,6 @@ public class XPackClient { private final CcrClient ccrClient; private final LicensingClient licensingClient; private final MonitoringClient monitoringClient; - private final SecurityClient securityClient; private final WatcherClient watcherClient; private final MachineLearningClient machineLearning; private final ILMClient ilmClient; @@ -49,7 +47,6 @@ public XPackClient(Client client) { this.ccrClient = new CcrClient(client); this.licensingClient = new LicensingClient(client); this.monitoringClient = new MonitoringClient(client); - this.securityClient = new SecurityClient(client); this.watcherClient = new WatcherClient(client); this.machineLearning = new MachineLearningClient(client); this.ilmClient = new ILMClient(client); @@ -71,10 +68,6 @@ public MonitoringClient monitoring() { return monitoringClient; } - public SecurityClient security() { - return securityClient; - } - public WatcherClient watcher() { return watcherClient; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesRequestBuilder.java index c1d364476ea3e..020d78bf3544d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesRequestBuilder.java @@ -15,8 +15,8 @@ public final class DeletePrivilegesRequestBuilder extends ActionRequestBuilder implements WriteRequestBuilder { - public DeletePrivilegesRequestBuilder(ElasticsearchClient client, DeletePrivilegesAction action) { - super(client, action, new DeletePrivilegesRequest()); + public DeletePrivilegesRequestBuilder(ElasticsearchClient client) { + super(client, DeletePrivilegesAction.INSTANCE, new DeletePrivilegesRequest()); } public DeletePrivilegesRequestBuilder privileges(String[] privileges) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesRequestBuilder.java index 305c8d1ff7946..5adfd5774fab8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesRequestBuilder.java @@ -13,8 +13,8 @@ */ public final class GetPrivilegesRequestBuilder extends ActionRequestBuilder { - public GetPrivilegesRequestBuilder(ElasticsearchClient client, GetPrivilegesAction action) { - super(client, action, new GetPrivilegesRequest()); + public GetPrivilegesRequestBuilder(ElasticsearchClient client) { + super(client, GetPrivilegesAction.INSTANCE, new GetPrivilegesRequest()); } public GetPrivilegesRequestBuilder privileges(String... privileges) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java index 562e22a1eb925..b3ff11bba0ebf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java @@ -29,8 +29,8 @@ public final class PutPrivilegesRequestBuilder extends ActionRequestBuilder implements WriteRequestBuilder { - public PutPrivilegesRequestBuilder(ElasticsearchClient client, PutPrivilegesAction action) { - super(client, action, new PutPrivilegesRequest()); + public PutPrivilegesRequestBuilder(ElasticsearchClient client) { + super(client, PutPrivilegesAction.INSTANCE, new PutPrivilegesRequest()); } ApplicationPrivilegeDescriptor parsePrivilege(XContentParser parser, String applicationName, String privilegeName) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingRequestBuilder.java index 2ca23d31f693a..608b034e4e259 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingRequestBuilder.java @@ -16,9 +16,8 @@ public class DeleteRoleMappingRequestBuilder extends ActionRequestBuilder implements WriteRequestBuilder { - public DeleteRoleMappingRequestBuilder(ElasticsearchClient client, - DeleteRoleMappingAction action) { - super(client, action, new DeleteRoleMappingRequest()); + public DeleteRoleMappingRequestBuilder(ElasticsearchClient client) { + super(client, DeleteRoleMappingAction.INSTANCE, new DeleteRoleMappingRequest()); } public DeleteRoleMappingRequestBuilder name(String name) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsRequestBuilder.java index db4f8b7d88746..23b94967fb1e1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsRequestBuilder.java @@ -15,8 +15,8 @@ */ public class GetRoleMappingsRequestBuilder extends ActionRequestBuilder { - public GetRoleMappingsRequestBuilder(ElasticsearchClient client, GetRoleMappingsAction action) { - super(client, action, new GetRoleMappingsRequest()); + public GetRoleMappingsRequestBuilder(ElasticsearchClient client) { + super(client, GetRoleMappingsAction.INSTANCE, new GetRoleMappingsRequest()); } public GetRoleMappingsRequestBuilder names(String... names) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequestBuilder.java index 14f722d169410..8e1b63c2ab8d9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequestBuilder.java @@ -26,8 +26,8 @@ public class PutRoleMappingRequestBuilder extends ActionRequestBuilder implements WriteRequestBuilder { - public PutRoleMappingRequestBuilder(ElasticsearchClient client, PutRoleMappingAction action) { - super(client, action, new PutRoleMappingRequest()); + public PutRoleMappingRequestBuilder(ElasticsearchClient client) { + super(client, PutRoleMappingAction.INSTANCE, new PutRoleMappingRequest()); } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/client/SecurityClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/client/SecurityClient.java deleted file mode 100644 index 4619035d0daaf..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/client/SecurityClient.java +++ /dev/null @@ -1,392 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.security.client; - -import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequestBuilder; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; -import org.elasticsearch.xpack.core.security.action.GetApiKeyAction; -import org.elasticsearch.xpack.core.security.action.GetApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.GetApiKeyResponse; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyResponse; -import org.elasticsearch.xpack.core.security.action.privilege.DeletePrivilegesAction; -import org.elasticsearch.xpack.core.security.action.privilege.DeletePrivilegesRequestBuilder; -import org.elasticsearch.xpack.core.security.action.privilege.GetPrivilegesAction; -import org.elasticsearch.xpack.core.security.action.privilege.GetPrivilegesRequestBuilder; -import org.elasticsearch.xpack.core.security.action.privilege.PutPrivilegesAction; -import org.elasticsearch.xpack.core.security.action.privilege.PutPrivilegesRequestBuilder; -import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; -import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheRequest; -import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheRequestBuilder; -import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheResponse; -import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheAction; -import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheRequest; -import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheRequestBuilder; -import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheResponse; -import org.elasticsearch.xpack.core.security.action.role.DeleteRoleAction; -import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequest; -import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequestBuilder; -import org.elasticsearch.xpack.core.security.action.role.DeleteRoleResponse; -import org.elasticsearch.xpack.core.security.action.role.GetRolesAction; -import org.elasticsearch.xpack.core.security.action.role.GetRolesRequest; -import org.elasticsearch.xpack.core.security.action.role.GetRolesRequestBuilder; -import org.elasticsearch.xpack.core.security.action.role.GetRolesResponse; -import org.elasticsearch.xpack.core.security.action.role.PutRoleAction; -import org.elasticsearch.xpack.core.security.action.role.PutRoleRequest; -import org.elasticsearch.xpack.core.security.action.role.PutRoleRequestBuilder; -import org.elasticsearch.xpack.core.security.action.role.PutRoleResponse; -import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingAction; -import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingRequestBuilder; -import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsAction; -import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequest; -import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequestBuilder; -import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsResponse; -import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingAction; -import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequestBuilder; -import org.elasticsearch.xpack.core.security.action.saml.SamlAuthenticateAction; -import org.elasticsearch.xpack.core.security.action.saml.SamlAuthenticateRequest; -import org.elasticsearch.xpack.core.security.action.saml.SamlAuthenticateRequestBuilder; -import org.elasticsearch.xpack.core.security.action.saml.SamlAuthenticateResponse; -import org.elasticsearch.xpack.core.security.action.saml.SamlPrepareAuthenticationRequestBuilder; -import org.elasticsearch.xpack.core.security.action.token.CreateTokenAction; -import org.elasticsearch.xpack.core.security.action.token.CreateTokenRequest; -import org.elasticsearch.xpack.core.security.action.token.CreateTokenRequestBuilder; -import org.elasticsearch.xpack.core.security.action.token.CreateTokenResponse; -import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenAction; -import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenRequest; -import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenRequestBuilder; -import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenResponse; -import org.elasticsearch.xpack.core.security.action.token.RefreshTokenAction; -import org.elasticsearch.xpack.core.security.action.user.ChangePasswordAction; -import org.elasticsearch.xpack.core.security.action.user.ChangePasswordRequest; -import org.elasticsearch.xpack.core.security.action.user.ChangePasswordRequestBuilder; -import org.elasticsearch.xpack.core.security.action.user.ChangePasswordResponse; -import org.elasticsearch.xpack.core.security.action.user.DeleteUserAction; -import org.elasticsearch.xpack.core.security.action.user.DeleteUserRequest; -import org.elasticsearch.xpack.core.security.action.user.DeleteUserRequestBuilder; -import org.elasticsearch.xpack.core.security.action.user.DeleteUserResponse; -import org.elasticsearch.xpack.core.security.action.user.GetUsersAction; -import org.elasticsearch.xpack.core.security.action.user.GetUsersRequest; -import org.elasticsearch.xpack.core.security.action.user.GetUsersRequestBuilder; -import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; -import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesAction; -import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequest; -import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequestBuilder; -import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse; -import org.elasticsearch.xpack.core.security.action.user.GetUserPrivilegesAction; -import org.elasticsearch.xpack.core.security.action.user.GetUserPrivilegesRequest; -import org.elasticsearch.xpack.core.security.action.user.GetUserPrivilegesRequestBuilder; -import org.elasticsearch.xpack.core.security.action.user.GetUserPrivilegesResponse; -import org.elasticsearch.xpack.core.security.action.user.PutUserAction; -import org.elasticsearch.xpack.core.security.action.user.PutUserRequest; -import org.elasticsearch.xpack.core.security.action.user.PutUserRequestBuilder; -import org.elasticsearch.xpack.core.security.action.user.PutUserResponse; -import org.elasticsearch.xpack.core.security.action.user.SetEnabledAction; -import org.elasticsearch.xpack.core.security.action.user.SetEnabledRequest; -import org.elasticsearch.xpack.core.security.action.user.SetEnabledRequestBuilder; -import org.elasticsearch.xpack.core.security.action.user.SetEnabledResponse; -import org.elasticsearch.xpack.core.security.authc.support.Hasher; - -import java.io.IOException; -import java.util.List; - -/** - * A wrapper to elasticsearch clients that exposes all Security related APIs - */ -public class SecurityClient { - - private final ElasticsearchClient client; - - public SecurityClient(ElasticsearchClient client) { - this.client = client; - } - - /**************** - * authc things * - ****************/ - - /** - * Clears the realm caches. It's possible to clear all user entries from all realms in the cluster or alternatively - * select the realms (by their unique names) and/or users (by their usernames) that should be evicted. - */ - public ClearRealmCacheRequestBuilder prepareClearRealmCache() { - return new ClearRealmCacheRequestBuilder(client); - } - - /** - * Clears the realm caches. It's possible to clear all user entries from all realms in the cluster or alternatively - * select the realms (by their unique names) and/or users (by their usernames) that should be evicted. - */ - public void clearRealmCache(ClearRealmCacheRequest request, ActionListener listener) { - client.execute(ClearRealmCacheAction.INSTANCE, request, listener); - } - - /** - * Clears the realm caches. It's possible to clear all user entries from all realms in the cluster or alternatively - * select the realms (by their unique names) and/or users (by their usernames) that should be evicted. - */ - public ActionFuture clearRealmCache(ClearRealmCacheRequest request) { - return client.execute(ClearRealmCacheAction.INSTANCE, request); - } - - /**************** - * authz things * - ****************/ - - /** - * Clears the roles cache. This API only works for the naitve roles that are stored in an elasticsearch index. It is - * possible to clear the cache of all roles or to specify the names of individual roles that should have their cache - * cleared. - */ - public ClearRolesCacheRequestBuilder prepareClearRolesCache() { - return new ClearRolesCacheRequestBuilder(client); - } - - /** - * Clears the roles cache. This API only works for the naitve roles that are stored in an elasticsearch index. It is - * possible to clear the cache of all roles or to specify the names of individual roles that should have their cache - * cleared. - */ - public void clearRolesCache(ClearRolesCacheRequest request, ActionListener listener) { - client.execute(ClearRolesCacheAction.INSTANCE, request, listener); - } - - /** - * Clears the roles cache. This API only works for the naitve roles that are stored in an elasticsearch index. It is - * possible to clear the cache of all roles or to specify the names of individual roles that should have their cache - * cleared. - */ - public ActionFuture clearRolesCache(ClearRolesCacheRequest request) { - return client.execute(ClearRolesCacheAction.INSTANCE, request); - } - - /** - * Permissions / Privileges - */ - public HasPrivilegesRequestBuilder prepareHasPrivileges(String username) { - return new HasPrivilegesRequestBuilder(client).username(username); - } - - public HasPrivilegesRequestBuilder prepareHasPrivileges(String username, BytesReference source, XContentType xContentType) - throws IOException { - return new HasPrivilegesRequestBuilder(client).source(username, source, xContentType); - } - - public void hasPrivileges(HasPrivilegesRequest request, ActionListener listener) { - client.execute(HasPrivilegesAction.INSTANCE, request, listener); - } - - public GetUserPrivilegesRequestBuilder prepareGetUserPrivileges(String username) { - return new GetUserPrivilegesRequestBuilder(client).username(username); - } - - public void listUserPrivileges(GetUserPrivilegesRequest request, ActionListener listener) { - client.execute(GetUserPrivilegesAction.INSTANCE, request, listener); - } - - /** - * User Management - */ - - public GetUsersRequestBuilder prepareGetUsers(String... usernames) { - return new GetUsersRequestBuilder(client).usernames(usernames); - } - - public void getUsers(GetUsersRequest request, ActionListener listener) { - client.execute(GetUsersAction.INSTANCE, request, listener); - } - - public DeleteUserRequestBuilder prepareDeleteUser(String username) { - return new DeleteUserRequestBuilder(client).username(username); - } - - public void deleteUser(DeleteUserRequest request, ActionListener listener) { - client.execute(DeleteUserAction.INSTANCE, request, listener); - } - - public PutUserRequestBuilder preparePutUser(String username, BytesReference source, XContentType xContentType, Hasher hasher) - throws IOException { - return new PutUserRequestBuilder(client).source(username, source, xContentType, hasher); - } - - public PutUserRequestBuilder preparePutUser(String username, char[] password, Hasher hasher, String... roles) { - return new PutUserRequestBuilder(client).username(username).password(password, hasher).roles(roles); - } - - public void putUser(PutUserRequest request, ActionListener listener) { - client.execute(PutUserAction.INSTANCE, request, listener); - } - - /** - * Populates the {@link ChangePasswordRequest} with the username and password. Note: the passed in char[] will be cleared by this - * method. - */ - public ChangePasswordRequestBuilder prepareChangePassword(String username, char[] password, Hasher hasher) { - return new ChangePasswordRequestBuilder(client).username(username).password(password, hasher); - } - - public ChangePasswordRequestBuilder prepareChangePassword(String username, BytesReference source, XContentType xContentType, - Hasher hasher) throws IOException { - return new ChangePasswordRequestBuilder(client).username(username).source(source, xContentType, hasher); - } - - public void changePassword(ChangePasswordRequest request, ActionListener listener) { - client.execute(ChangePasswordAction.INSTANCE, request, listener); - } - - public SetEnabledRequestBuilder prepareSetEnabled(String username, boolean enabled) { - return new SetEnabledRequestBuilder(client).username(username).enabled(enabled); - } - - public void setEnabled(SetEnabledRequest request, ActionListener listener) { - client.execute(SetEnabledAction.INSTANCE, request, listener); - } - - /** - * Role Management - */ - - public GetRolesRequestBuilder prepareGetRoles(String... names) { - return new GetRolesRequestBuilder(client).names(names); - } - - public void getRoles(GetRolesRequest request, ActionListener listener) { - client.execute(GetRolesAction.INSTANCE, request, listener); - } - - public DeleteRoleRequestBuilder prepareDeleteRole(String name) { - return new DeleteRoleRequestBuilder(client).name(name); - } - - public void deleteRole(DeleteRoleRequest request, ActionListener listener) { - client.execute(DeleteRoleAction.INSTANCE, request, listener); - } - - public PutRoleRequestBuilder preparePutRole(String name) { - return new PutRoleRequestBuilder(client).name(name); - } - - public PutRoleRequestBuilder preparePutRole(String name, BytesReference source, XContentType xContentType) throws IOException { - return new PutRoleRequestBuilder(client).source(name, source, xContentType); - } - - public void putRole(PutRoleRequest request, ActionListener listener) { - client.execute(PutRoleAction.INSTANCE, request, listener); - } - - /** - * Role Mappings - */ - - public GetRoleMappingsRequestBuilder prepareGetRoleMappings(String... names) { - return new GetRoleMappingsRequestBuilder(client, GetRoleMappingsAction.INSTANCE) - .names(names); - } - - public void getRoleMappings(GetRoleMappingsRequest request, - ActionListener listener) { - client.execute(GetRoleMappingsAction.INSTANCE, request, listener); - } - - public PutRoleMappingRequestBuilder preparePutRoleMapping( - String name, BytesReference content, XContentType xContentType) throws IOException { - return new PutRoleMappingRequestBuilder(client, PutRoleMappingAction.INSTANCE).source(name, content, xContentType); - } - - public DeleteRoleMappingRequestBuilder prepareDeleteRoleMapping(String name) { - return new DeleteRoleMappingRequestBuilder(client, DeleteRoleMappingAction.INSTANCE) - .name(name); - } - - /* -- Application Privileges -- */ - public GetPrivilegesRequestBuilder prepareGetPrivileges(String applicationName, String[] privileges) { - return new GetPrivilegesRequestBuilder(client, GetPrivilegesAction.INSTANCE).application(applicationName).privileges(privileges); - } - - public PutPrivilegesRequestBuilder preparePutPrivileges(BytesReference bytesReference, XContentType xContentType) throws IOException { - return new PutPrivilegesRequestBuilder(client, PutPrivilegesAction.INSTANCE).source(bytesReference, xContentType); - } - - public DeletePrivilegesRequestBuilder prepareDeletePrivileges(String applicationName, String[] privileges) { - return new DeletePrivilegesRequestBuilder(client, DeletePrivilegesAction.INSTANCE) - .application(applicationName) - .privileges(privileges); - } - - public CreateTokenRequestBuilder prepareCreateToken() { - return new CreateTokenRequestBuilder(client, CreateTokenAction.INSTANCE); - } - - public void createToken(CreateTokenRequest request, ActionListener listener) { - client.execute(CreateTokenAction.INSTANCE, request, listener); - } - - public InvalidateTokenRequestBuilder prepareInvalidateToken(String token) { - return new InvalidateTokenRequestBuilder(client).setTokenString(token); - } - - public InvalidateTokenRequestBuilder prepareInvalidateToken() { - return new InvalidateTokenRequestBuilder(client); - } - - public void invalidateToken(InvalidateTokenRequest request, ActionListener listener) { - client.execute(InvalidateTokenAction.INSTANCE, request, listener); - } - - /* -- Api Keys -- */ - public CreateApiKeyRequestBuilder prepareCreateApiKey() { - return new CreateApiKeyRequestBuilder(client); - } - - public CreateApiKeyRequestBuilder prepareCreateApiKey(BytesReference bytesReference, XContentType xContentType) throws IOException { - return new CreateApiKeyRequestBuilder(client).source(bytesReference, xContentType); - } - - public void createApiKey(CreateApiKeyRequest request, ActionListener listener) { - client.execute(CreateApiKeyAction.INSTANCE, request, listener); - } - - public void invalidateApiKey(InvalidateApiKeyRequest request, ActionListener listener) { - client.execute(InvalidateApiKeyAction.INSTANCE, request, listener); - } - - public void getApiKey(GetApiKeyRequest request, ActionListener listener) { - client.execute(GetApiKeyAction.INSTANCE, request, listener); - } - - public SamlAuthenticateRequestBuilder prepareSamlAuthenticate(byte[] xmlContent, List validIds) { - final SamlAuthenticateRequestBuilder builder = new SamlAuthenticateRequestBuilder(client); - builder.saml(xmlContent); - builder.validRequestIds(validIds); - return builder; - } - - public void samlAuthenticate(SamlAuthenticateRequest request, ActionListener listener) { - client.execute(SamlAuthenticateAction.INSTANCE, request, listener); - } - - public SamlPrepareAuthenticationRequestBuilder prepareSamlPrepareAuthentication() { - return new SamlPrepareAuthenticationRequestBuilder(client); - } - - public CreateTokenRequestBuilder prepareRefreshToken(String refreshToken) { - return new CreateTokenRequestBuilder(client, RefreshTokenAction.INSTANCE) - .setRefreshToken(refreshToken) - .setGrantType("refresh_token"); - } - - public void refreshToken(CreateTokenRequest request, ActionListener listener) { - client.execute(RefreshTokenAction.INSTANCE, request, listener); - } -} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java index 0270e31216c42..b8495e20b10bd 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java @@ -38,6 +38,7 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.ScrollHelper; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheRequest; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheResponse; import org.elasticsearch.xpack.core.security.action.user.ChangePasswordRequest; @@ -46,7 +47,6 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.esnative.ClientReservedRealm; import org.elasticsearch.xpack.core.security.authc.support.Hasher; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.core.security.user.SystemUser; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.User.Fields; @@ -617,11 +617,9 @@ public void onFailure(Exception e) { } private void clearRealmCache(String username, ActionListener listener, Response response) { - SecurityClient securityClient = new SecurityClient(client); - ClearRealmCacheRequest request = securityClient.prepareClearRealmCache() - .usernames(username).request(); - executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, - new ActionListener() { + ClearRealmCacheRequest request = new ClearRealmCacheRequest().usernames(username); + executeAsyncWithOrigin(client, SECURITY_ORIGIN, ClearRealmCacheAction.INSTANCE, request, + new ActionListener<>() { @Override public void onResponse(ClearRealmCacheResponse nodes) { listener.onResponse(response); @@ -634,7 +632,7 @@ public void onFailure(Exception e) { + "] failed. please clear the realm cache manually", e); listener.onFailure(exception); } - }, securityClient::clearRealmCache); + }); } @Nullable diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java index bb98dddbe1ddf..0c425d771a4c3 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java @@ -30,12 +30,11 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.xpack.core.security.ScrollHelper; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; -import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheResponse; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionModel; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.elasticsearch.xpack.security.authc.support.CachingRealm; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; @@ -336,10 +335,8 @@ private void refreshRealms(ActionListener listener, Result resu } final String[] realmNames = this.realmsToRefresh.toArray(Strings.EMPTY_ARRAY); - final SecurityClient securityClient = new SecurityClient(client); - executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, - securityClient.prepareClearRealmCache().realms(realmNames).request(), - ActionListener.wrap( + executeAsyncWithOrigin(client, SECURITY_ORIGIN, ClearRealmCacheAction.INSTANCE, new ClearRealmCacheRequest().realms(realmNames), + ActionListener.wrap( response -> { logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( "Cleared cached in realms [{}] due to role mapping change", Arrays.toString(realmNames))); @@ -348,8 +345,7 @@ private void refreshRealms(ActionListener listener, Result resu ex -> { logger.warn(new ParameterizedMessage("Failed to clear cache for realms [{}]", Arrays.toString(realmNames)), ex); listener.onFailure(ex); - }), - securityClient::clearRealmCache); + })); } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java index 7e70f6116cee7..64c9df8e7005a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java @@ -41,10 +41,10 @@ import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.security.ScrollHelper; +import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheAction; import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheRequest; import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheResponse; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.io.IOException; @@ -83,13 +83,11 @@ public class NativePrivilegeStore { private final Settings settings; private final Client client; - private final SecurityClient securityClient; private final SecurityIndexManager securityIndexManager; public NativePrivilegeStore(Settings settings, Client client, SecurityIndexManager securityIndexManager) { this.settings = settings; this.client = client; - this.securityClient = new SecurityClient(client); this.securityIndexManager = securityIndexManager; } @@ -295,8 +293,8 @@ public void deletePrivileges(String application, Collection names, Write private void clearRolesCache(ActionListener listener, T value) { // This currently clears _all_ roles, but could be improved to clear only those roles that reference the affected application ClearRolesCacheRequest request = new ClearRolesCacheRequest(); - executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, - new ActionListener() { + executeAsyncWithOrigin(client, SECURITY_ORIGIN, ClearRolesCacheAction.INSTANCE, request, + new ActionListener<>() { @Override public void onResponse(ClearRolesCacheResponse nodes) { listener.onResponse(value); @@ -308,7 +306,7 @@ public void onFailure(Exception e) { listener.onFailure( new ElasticsearchException("clearing the role cache failed. please clear the role cache manually", e)); } - }, securityClient::clearRolesCache); + }); } private ApplicationPrivilegeDescriptor buildPrivilege(String docId, BytesReference source) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java index 2169f84082b7f..b962e853cc629 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -36,6 +36,7 @@ import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.xpack.core.security.ScrollHelper; +import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheAction; import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheRequest; import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheResponse; import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequest; @@ -43,7 +44,6 @@ import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.io.IOException; @@ -84,13 +84,11 @@ public class NativeRolesStore implements BiConsumer, ActionListener< private final Client client; private final XPackLicenseState licenseState; - private SecurityClient securityClient; private final SecurityIndexManager securityIndex; public NativeRolesStore(Settings settings, Client client, XPackLicenseState licenseState, SecurityIndexManager securityIndex) { this.settings = settings; this.client = client; - this.securityClient = new SecurityClient(client); this.licenseState = licenseState; this.securityIndex = securityIndex; } @@ -338,8 +336,8 @@ private void executeGetRoleRequest(String role, ActionListener list private void clearRoleCache(final String role, ActionListener listener, Response response) { ClearRolesCacheRequest request = new ClearRolesCacheRequest().names(role); - executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, - new ActionListener() { + executeAsyncWithOrigin(client, SECURITY_ORIGIN, ClearRolesCacheAction.INSTANCE, request, + new ActionListener<>() { @Override public void onResponse(ClearRolesCacheResponse nodes) { listener.onResponse(response); @@ -352,7 +350,7 @@ public void onFailure(Exception e) { + "] failed. please clear the role cache manually", e); listener.onFailure(exception); } - }, securityClient::clearRolesCache); + }); } @Nullable diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java index 14d4726553dff..b75de301e7e4a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java @@ -9,15 +9,12 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequestBuilder; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import java.io.IOException; @@ -44,13 +41,11 @@ public String getName() { @Override protected RestChannelConsumer innerPrepareRequest(final RestRequest request, final NodeClient client) throws IOException { - try (XContentParser parser = request.contentParser()) { - String refresh = request.param("refresh"); - CreateApiKeyRequestBuilder builder = new SecurityClient(client) - .prepareCreateApiKey(request.requiredContent(), request.getXContentType()) - .setRefreshPolicy((refresh != null) ? WriteRequest.RefreshPolicy.parse(request.param("refresh")) - : CreateApiKeyRequest.DEFAULT_REFRESH_POLICY); - return channel -> builder.execute(new RestToXContentListener(channel)); - } + String refresh = request.param("refresh"); + CreateApiKeyRequestBuilder builder = new CreateApiKeyRequestBuilder(client) + .source(request.requiredContent(), request.getXContentType()) + .setRefreshPolicy((refresh != null) ? + WriteRequest.RefreshPolicy.parse(request.param("refresh")) : CreateApiKeyRequest.DEFAULT_REFRESH_POLICY); + return channel -> builder.execute(new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestDeletePrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestDeletePrivilegesAction.java index 1f96df23e0e77..41758aa26016f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestDeletePrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestDeletePrivilegesAction.java @@ -17,8 +17,8 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.privilege.DeletePrivilegesRequestBuilder; import org.elasticsearch.xpack.core.security.action.privilege.DeletePrivilegesResponse; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; @@ -54,21 +54,23 @@ public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient c final String application = request.param("application"); final String[] privileges = request.paramAsStringArray("privilege", null); final String refresh = request.param("refresh"); - return channel -> new SecurityClient(client).prepareDeletePrivileges(application, privileges) - .setRefreshPolicy(refresh) - .execute(new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(DeletePrivilegesResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - builder.startObject(application); - for (String privilege : new HashSet<>(Arrays.asList(privileges))) { - builder.field(privilege, Collections.singletonMap("found", response.found().contains(privilege))); - } - builder.endObject(); - builder.endObject(); - return new BytesRestResponse(response.found().isEmpty() ? RestStatus.NOT_FOUND : RestStatus.OK, builder); + return channel -> new DeletePrivilegesRequestBuilder(client) + .application(application) + .privileges(privileges) + .setRefreshPolicy(refresh) + .execute(new RestBuilderListener<>(channel) { + @Override + public RestResponse buildResponse(DeletePrivilegesResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + builder.startObject(application); + for (String privilege : new HashSet<>(Arrays.asList(privileges))) { + builder.field(privilege, Collections.singletonMap("found", response.found().contains(privilege))); } - }); + builder.endObject(); + builder.endObject(); + return new BytesRestResponse(response.found().isEmpty() ? RestStatus.NOT_FOUND : RestStatus.OK, builder); + } + }); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestGetPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestGetPrivilegesAction.java index e0d0898704e3b..4e390dda0ac72 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestGetPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestGetPrivilegesAction.java @@ -19,9 +19,9 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.privilege.GetPrivilegesRequestBuilder; import org.elasticsearch.xpack.core.security.action.privilege.GetPrivilegesResponse; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; @@ -64,32 +64,34 @@ public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient c final String application = request.param("application"); final String[] privileges = request.paramAsStringArray("privilege", Strings.EMPTY_ARRAY); - return channel -> new SecurityClient(client).prepareGetPrivileges(application, privileges) - .execute(new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(GetPrivilegesResponse response, XContentBuilder builder) throws Exception { - final Map> privsByApp = groupByApplicationName(response.privileges()); - builder.startObject(); - for (String app : privsByApp.keySet()) { - builder.startObject(app); - for (ApplicationPrivilegeDescriptor privilege : privsByApp.get(app)) { - builder.field(privilege.getName(), privilege); - } - builder.endObject(); + return channel -> new GetPrivilegesRequestBuilder(client) + .application(application) + .privileges(privileges) + .execute(new RestBuilderListener<>(channel) { + @Override + public RestResponse buildResponse(GetPrivilegesResponse response, XContentBuilder builder) throws Exception { + final Map> privsByApp = groupByApplicationName(response.privileges()); + builder.startObject(); + for (String app : privsByApp.keySet()) { + builder.startObject(app); + for (ApplicationPrivilegeDescriptor privilege : privsByApp.get(app)) { + builder.field(privilege.getName(), privilege); } builder.endObject(); + } + builder.endObject(); - // if the user asked for specific privileges, but none of them were found - // we'll return an empty result and 404 status code - if (privileges.length != 0 && response.privileges().length == 0) { - return new BytesRestResponse(RestStatus.NOT_FOUND, builder); - } - - // either the user asked for all privileges, or at least one of the privileges - // was found - return new BytesRestResponse(RestStatus.OK, builder); + // if the user asked for specific privileges, but none of them were found + // we'll return an empty result and 404 status code + if (privileges.length != 0 && response.privileges().length == 0) { + return new BytesRestResponse(RestStatus.NOT_FOUND, builder); } - }); + + // either the user asked for all privileges, or at least one of the privileges + // was found + return new BytesRestResponse(RestStatus.OK, builder); + } + }); } static Map> groupByApplicationName(ApplicationPrivilegeDescriptor[] privileges) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestPutPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestPutPrivilegesAction.java index abc2f00afcd81..082751affb8b5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestPutPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestPutPrivilegesAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.xpack.core.security.action.privilege.PutPrivilegesResponse; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; @@ -57,9 +56,9 @@ public String getName() { @Override public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { - PutPrivilegesRequestBuilder requestBuilder = new SecurityClient(client) - .preparePutPrivileges(request.requiredContent(), request.getXContentType()) - .setRefreshPolicy(request.param("refresh")); + PutPrivilegesRequestBuilder requestBuilder = new PutPrivilegesRequestBuilder(client) + .source(request.requiredContent(), request.getXContentType()) + .setRefreshPolicy(request.param("refresh")); return execute(requestBuilder); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/realm/RestClearRealmCacheAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/realm/RestClearRealmCacheAction.java index da9b1a1828b66..e1590c39fe90e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/realm/RestClearRealmCacheAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/realm/RestClearRealmCacheAction.java @@ -13,12 +13,10 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions.NodesResponseRestListener; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheRequest; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; -import java.io.IOException; - import static org.elasticsearch.rest.RestRequest.Method.POST; public final class RestClearRealmCacheAction extends SecurityBaseRestHandler { @@ -39,13 +37,13 @@ public String getName() { } @Override - public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) { String[] realms = request.paramAsStringArrayOrEmptyIfAll("realms"); String[] usernames = request.paramAsStringArrayOrEmptyIfAll("usernames"); ClearRealmCacheRequest req = new ClearRealmCacheRequest().realms(realms).usernames(usernames); - return channel -> new SecurityClient(client).clearRealmCache(req, new NodesResponseRestListener<>(channel)); + return channel -> client.execute(ClearRealmCacheAction.INSTANCE, req, new NodesResponseRestListener<>(channel)); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestClearRolesCacheAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestClearRolesCacheAction.java index a51db748ccc29..0fd15051e8231 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestClearRolesCacheAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestClearRolesCacheAction.java @@ -13,12 +13,10 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions.NodesResponseRestListener; +import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheAction; import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheRequest; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; -import java.io.IOException; - import static org.elasticsearch.rest.RestRequest.Method.POST; public final class RestClearRolesCacheAction extends SecurityBaseRestHandler { @@ -39,11 +37,11 @@ public String getName() { } @Override - public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) { String[] roles = request.paramAsStringArrayOrEmptyIfAll("name"); ClearRolesCacheRequest req = new ClearRolesCacheRequest().names(roles); - return channel -> new SecurityClient(client).clearRolesCache(req, new NodesResponseRestListener<>(channel)); + return channel -> client.execute(ClearRolesCacheAction.INSTANCE, req, new NodesResponseRestListener<>(channel)); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestDeleteRoleAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestDeleteRoleAction.java index 26399f4da9118..957cac123486c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestDeleteRoleAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestDeleteRoleAction.java @@ -17,8 +17,8 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequestBuilder; import org.elasticsearch.xpack.core.security.action.role.DeleteRoleResponse; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; @@ -50,15 +50,16 @@ public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient c final String name = request.param("name"); final String refresh = request.param("refresh"); - return channel -> new SecurityClient(client).prepareDeleteRole(name) - .setRefreshPolicy(refresh) - .execute(new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(DeleteRoleResponse response, XContentBuilder builder) throws Exception { - return new BytesRestResponse( - response.found() ? RestStatus.OK : RestStatus.NOT_FOUND, - builder.startObject().field("found", response.found()).endObject()); - } - }); + return channel -> new DeleteRoleRequestBuilder(client) + .name(name) + .setRefreshPolicy(refresh) + .execute(new RestBuilderListener<>(channel) { + @Override + public RestResponse buildResponse(DeleteRoleResponse response, XContentBuilder builder) throws Exception { + return new BytesRestResponse( + response.found() ? RestStatus.OK : RestStatus.NOT_FOUND, + builder.startObject().field("found", response.found()).endObject()); + } + }); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestGetRolesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestGetRolesAction.java index 850709eea8634..a5141f25f37a1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestGetRolesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestGetRolesAction.java @@ -18,9 +18,9 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.role.GetRolesRequestBuilder; import org.elasticsearch.xpack.core.security.action.role.GetRolesResponse; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; @@ -53,7 +53,9 @@ public String getName() { @Override public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { final String[] roles = request.paramAsStringArray("name", Strings.EMPTY_ARRAY); - return channel -> new SecurityClient(client).prepareGetRoles(roles).execute(new RestBuilderListener(channel) { + return channel -> new GetRolesRequestBuilder(client) + .names(roles) + .execute(new RestBuilderListener<>(channel) { @Override public RestResponse buildResponse(GetRolesResponse response, XContentBuilder builder) throws Exception { builder.startObject(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestPutRoleAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestPutRoleAction.java index 10673cd79fac7..e8b2e6a778386 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestPutRoleAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestPutRoleAction.java @@ -19,7 +19,6 @@ import org.elasticsearch.rest.action.RestBuilderListener; import org.elasticsearch.xpack.core.security.action.role.PutRoleRequestBuilder; import org.elasticsearch.xpack.core.security.action.role.PutRoleResponse; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; @@ -52,10 +51,10 @@ public String getName() { @Override public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { - PutRoleRequestBuilder requestBuilder = new SecurityClient(client) - .preparePutRole(request.param("name"), request.requiredContent(), request.getXContentType()) - .setRefreshPolicy(request.param("refresh")); - return channel -> requestBuilder.execute(new RestBuilderListener(channel) { + PutRoleRequestBuilder requestBuilder = new PutRoleRequestBuilder(client) + .source(request.param("name"), request.requiredContent(), request.getXContentType()) + .setRefreshPolicy(request.param("refresh")); + return channel -> requestBuilder.execute(new RestBuilderListener<>(channel) { @Override public RestResponse buildResponse(PutRoleResponse putRoleResponse, XContentBuilder builder) throws Exception { return new BytesRestResponse(RestStatus.OK, builder.startObject().field("role", putRoleResponse).endObject()); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestDeleteRoleMappingAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestDeleteRoleMappingAction.java index a2be9ba9e041e..a2383534115cc 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestDeleteRoleMappingAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestDeleteRoleMappingAction.java @@ -17,8 +17,8 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingRequestBuilder; import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingResponse; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; @@ -51,14 +51,15 @@ public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient c final String name = request.param("name"); final String refresh = request.param("refresh"); - return channel -> new SecurityClient(client).prepareDeleteRoleMapping(name) - .setRefreshPolicy(refresh) - .execute(new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(DeleteRoleMappingResponse response, XContentBuilder builder) throws Exception { - return new BytesRestResponse(response.isFound() ? RestStatus.OK : RestStatus.NOT_FOUND, - builder.startObject().field("found", response.isFound()).endObject()); - } - }); + return channel -> new DeleteRoleMappingRequestBuilder(client) + .name(name) + .setRefreshPolicy(refresh) + .execute(new RestBuilderListener<>(channel) { + @Override + public RestResponse buildResponse(DeleteRoleMappingResponse response, XContentBuilder builder) throws Exception { + return new BytesRestResponse(response.isFound() ? RestStatus.OK : RestStatus.NOT_FOUND, + builder.startObject().field("found", response.isFound()).endObject()); + } + }); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestGetRoleMappingsAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestGetRoleMappingsAction.java index ceabeb3a62eb2..aeac89e8ffb9a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestGetRoleMappingsAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestGetRoleMappingsAction.java @@ -17,9 +17,9 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequestBuilder; import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsResponse; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; @@ -52,23 +52,24 @@ public String getName() { @Override public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { final String[] names = request.paramAsStringArrayOrEmptyIfAll("name"); - return channel -> new SecurityClient(client).prepareGetRoleMappings(names) - .execute(new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(GetRoleMappingsResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - for (ExpressionRoleMapping mapping : response.mappings()) { - builder.field(mapping.getName(), mapping); - } - builder.endObject(); + return channel -> new GetRoleMappingsRequestBuilder(client) + .names(names) + .execute(new RestBuilderListener<>(channel) { + @Override + public RestResponse buildResponse(GetRoleMappingsResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + for (ExpressionRoleMapping mapping : response.mappings()) { + builder.field(mapping.getName(), mapping); + } + builder.endObject(); - // if the request specified mapping names, but nothing was found then return a 404 result - if (names.length != 0 && response.mappings().length == 0) { - return new BytesRestResponse(RestStatus.NOT_FOUND, builder); - } else { - return new BytesRestResponse(RestStatus.OK, builder); - } + // if the request specified mapping names, but nothing was found then return a 404 result + if (names.length != 0 && response.mappings().length == 0) { + return new BytesRestResponse(RestStatus.NOT_FOUND, builder); + } else { + return new BytesRestResponse(RestStatus.OK, builder); } - }); + } + }); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestPutRoleMappingAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestPutRoleMappingAction.java index 64c4bc9c52bc1..532a12644a7bd 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestPutRoleMappingAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestPutRoleMappingAction.java @@ -19,7 +19,6 @@ import org.elasticsearch.rest.action.RestBuilderListener; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequestBuilder; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; @@ -55,11 +54,11 @@ public String getName() { @Override public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { final String name = request.param("name"); - PutRoleMappingRequestBuilder requestBuilder = new SecurityClient(client) - .preparePutRoleMapping(name, request.requiredContent(), request.getXContentType()) - .setRefreshPolicy(request.param("refresh")); + PutRoleMappingRequestBuilder requestBuilder = new PutRoleMappingRequestBuilder(client) + .source(name, request.requiredContent(), request.getXContentType()) + .setRefreshPolicy(request.param("refresh")); return channel -> requestBuilder.execute( - new RestBuilderListener(channel) { + new RestBuilderListener<>(channel) { @Override public RestResponse buildResponse(PutRoleMappingResponse response, XContentBuilder builder) throws Exception { return new BytesRestResponse(RestStatus.OK, builder.startObject().field("role_mapping", response).endObject()); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlAuthenticateAction.java index b73fb70c3fa9d..a61aaf650f99d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlAuthenticateAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.rest.action.RestBuilderListener; import org.elasticsearch.xpack.core.security.action.saml.SamlAuthenticateRequestBuilder; import org.elasticsearch.xpack.core.security.action.saml.SamlAuthenticateResponse; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import java.io.IOException; import java.util.Base64; @@ -80,8 +79,9 @@ public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient c logger.trace("SAML Authenticate: [{}...] [{}]", Strings.cleanTruncate(input.content, 128), input.ids); return channel -> { final byte[] bytes = decodeBase64(input.content); - final SamlAuthenticateRequestBuilder requestBuilder = new SecurityClient(client).prepareSamlAuthenticate(bytes, input.ids); - requestBuilder.execute(new RestBuilderListener(channel) { + final SamlAuthenticateRequestBuilder requestBuilder = + new SamlAuthenticateRequestBuilder(client).saml(bytes).validRequestIds(input.ids); + requestBuilder.execute(new RestBuilderListener<>(channel) { @Override public RestResponse buildResponse(SamlAuthenticateResponse response, XContentBuilder builder) throws Exception { builder.startObject() diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestChangePasswordAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestChangePasswordAction.java index 53341ce0b7283..4a37df0f6fa77 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestChangePasswordAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestChangePasswordAction.java @@ -19,9 +19,9 @@ import org.elasticsearch.rest.action.RestBuilderListener; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordRequestBuilder; import org.elasticsearch.xpack.core.security.action.user.ChangePasswordResponse; import org.elasticsearch.xpack.core.security.authc.support.Hasher; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.core.security.rest.RestRequestFilter; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; @@ -75,17 +75,16 @@ public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient c } final String refresh = request.param("refresh"); - return channel -> - new SecurityClient(client) - .prepareChangePassword(username, request.requiredContent(), request.getXContentType(), passwordHasher) - .setRefreshPolicy(refresh) - .execute(new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(ChangePasswordResponse changePasswordResponse, - XContentBuilder builder) throws Exception { - return new BytesRestResponse(RestStatus.OK, builder.startObject().endObject()); - } - }); + return channel -> new ChangePasswordRequestBuilder(client) + .username(username) + .source(request.requiredContent(), request.getXContentType(), passwordHasher) + .setRefreshPolicy(refresh) + .execute(new RestBuilderListener<>(channel) { + @Override + public RestResponse buildResponse(ChangePasswordResponse response, XContentBuilder builder) throws Exception { + return new BytesRestResponse(RestStatus.OK, builder.startObject().endObject()); + } + }); } private static final Set FILTERED_FIELDS = Collections.singleton("password"); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestDeleteUserAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestDeleteUserAction.java index 3c810ca230830..bc0eb728c6bf6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestDeleteUserAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestDeleteUserAction.java @@ -17,8 +17,8 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.user.DeleteUserRequestBuilder; import org.elasticsearch.xpack.core.security.action.user.DeleteUserResponse; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; @@ -49,16 +49,17 @@ public String getName() { public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { final String username = request.param("username"); final String refresh = request.param("refresh"); - return channel -> new SecurityClient(client).prepareDeleteUser(username) - .setRefreshPolicy(refresh) - .execute(new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(DeleteUserResponse response, XContentBuilder builder) throws Exception { - return new BytesRestResponse(response.found() ? RestStatus.OK : RestStatus.NOT_FOUND, - builder.startObject() - .field("found", response.found()) - .endObject()); - } - }); + return channel -> new DeleteUserRequestBuilder(client) + .username(username) + .setRefreshPolicy(refresh) + .execute(new RestBuilderListener<>(channel) { + @Override + public RestResponse buildResponse(DeleteUserResponse response, XContentBuilder builder) throws Exception { + return new BytesRestResponse(response.found() ? RestStatus.OK : RestStatus.NOT_FOUND, + builder.startObject() + .field("found", response.found()) + .endObject()); + } + }); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesAction.java index b073349d842a3..1f32c866cb3c8 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesAction.java @@ -26,7 +26,6 @@ import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivileges; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; @@ -66,7 +65,7 @@ public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient c return restChannel -> { throw new ElasticsearchSecurityException("there is no authenticated user"); }; } final String username = user.principal(); - final GetUserPrivilegesRequestBuilder requestBuilder = new SecurityClient(client).prepareGetUserPrivileges(username); + final GetUserPrivilegesRequestBuilder requestBuilder = new GetUserPrivilegesRequestBuilder(client).username(username); return channel -> requestBuilder.execute(new RestListener(channel)); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUsersAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUsersAction.java index 2751022d9f007..eef11d478ca65 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUsersAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUsersAction.java @@ -18,8 +18,8 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.user.GetUsersRequestBuilder; import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; @@ -54,7 +54,7 @@ public String getName() { public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { String[] usernames = request.paramAsStringArray("username", Strings.EMPTY_ARRAY); - return channel -> new SecurityClient(client).prepareGetUsers(usernames).execute(new RestBuilderListener(channel) { + return channel -> new GetUsersRequestBuilder(client).usernames(usernames).execute(new RestBuilderListener<>(channel) { @Override public RestResponse buildResponse(GetUsersResponse response, XContentBuilder builder) throws Exception { builder.startObject(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java index d26d8db206936..975f25a6d2055 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java @@ -26,7 +26,6 @@ import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequestBuilder; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; @@ -75,8 +74,8 @@ public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient c return restChannel -> { throw new ElasticsearchSecurityException("there is no authenticated user"); }; } final Tuple content = request.contentOrSourceParam(); - HasPrivilegesRequestBuilder requestBuilder = new SecurityClient(client).prepareHasPrivileges(username, content.v2(), content.v1()); - return channel -> requestBuilder.execute(new RestBuilderListener(channel) { + HasPrivilegesRequestBuilder requestBuilder = new HasPrivilegesRequestBuilder(client).source(username, content.v2(), content.v1()); + return channel -> requestBuilder.execute(new RestBuilderListener<>(channel) { @Override public RestResponse buildResponse(HasPrivilegesResponse response, XContentBuilder builder) throws Exception { response.toXContent(builder, ToXContent.EMPTY_PARAMS); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestPutUserAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestPutUserAction.java index b56daba0bfa4a..39cd5f35fa54b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestPutUserAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestPutUserAction.java @@ -22,7 +22,6 @@ import org.elasticsearch.xpack.core.security.action.user.PutUserRequestBuilder; import org.elasticsearch.xpack.core.security.action.user.PutUserResponse; import org.elasticsearch.xpack.core.security.authc.support.Hasher; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.core.security.rest.RestRequestFilter; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; @@ -60,11 +59,11 @@ public String getName() { @Override public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { - PutUserRequestBuilder requestBuilder = new SecurityClient(client) - .preparePutUser(request.param("username"), request.requiredContent(), request.getXContentType(), passwordHasher) - .setRefreshPolicy(request.param("refresh")); + PutUserRequestBuilder requestBuilder = new PutUserRequestBuilder(client) + .source(request.param("username"), request.requiredContent(), request.getXContentType(), passwordHasher) + .setRefreshPolicy(request.param("refresh")); - return channel -> requestBuilder.execute(new RestBuilderListener(channel) { + return channel -> requestBuilder.execute(new RestBuilderListener<>(channel) { @Override public RestResponse buildResponse(PutUserResponse putUserResponse, XContentBuilder builder) throws Exception { putUserResponse.toXContent(builder, request); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestSetEnabledAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestSetEnabledAction.java index a78b2c85aa62c..b13f0b328d993 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestSetEnabledAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestSetEnabledAction.java @@ -17,8 +17,8 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.user.SetEnabledRequestBuilder; import org.elasticsearch.xpack.core.security.action.user.SetEnabledResponse; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; @@ -61,12 +61,14 @@ public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient c final boolean enabled = request.path().endsWith("_enable"); assert enabled || request.path().endsWith("_disable"); final String username = request.param("username"); - return channel -> new SecurityClient(client).prepareSetEnabled(username, enabled) - .execute(new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(SetEnabledResponse setEnabledResponse, XContentBuilder builder) throws Exception { - return new BytesRestResponse(RestStatus.OK, builder.startObject().endObject()); - } - }); + return channel -> new SetEnabledRequestBuilder(client) + .username(username) + .enabled(enabled) + .execute(new RestBuilderListener<>(channel) { + @Override + public RestResponse buildResponse(SetEnabledResponse setEnabledResponse, XContentBuilder builder) throws Exception { + return new BytesRestResponse(RestStatus.OK, builder.startObject().endObject()); + } + }); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRealmsCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRealmsCacheTests.java index 145213094d1d8..557fd5813bf2e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRealmsCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRealmsCacheTests.java @@ -16,12 +16,12 @@ import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheRequest; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheResponse; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.Realms; import org.junit.BeforeClass; @@ -133,11 +133,9 @@ public void executeRequest() throws Exception { public abstract void executeRequest() throws Exception; static void executeTransportRequest(ClearRealmCacheRequest request) throws Exception { - SecurityClient securityClient = securityClient(client()); - final CountDownLatch latch = new CountDownLatch(1); final AtomicReference error = new AtomicReference<>(); - securityClient.clearRealmCache(request, new ActionListener() { + client().execute(ClearRealmCacheAction.INSTANCE, request, new ActionListener<>() { @Override public void onResponse(ClearRealmCacheResponse response) { assertThat(response.getNodes().size(), equalTo(internalCluster().getNodeNames().length)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java index 2a9f2017c50f3..5eeab715ec148 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java @@ -6,29 +6,33 @@ package org.elasticsearch.integration; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.security.DeleteRoleRequest; +import org.elasticsearch.client.security.DeleteRoleResponse; +import org.elasticsearch.client.security.GetRolesRequest; +import org.elasticsearch.client.security.GetRolesResponse; +import org.elasticsearch.client.security.PutRoleRequest; +import org.elasticsearch.client.security.PutRoleResponse; +import org.elasticsearch.client.security.RefreshPolicy; +import org.elasticsearch.client.security.user.privileges.IndicesPrivileges; +import org.elasticsearch.client.security.user.privileges.Role; import org.elasticsearch.test.NativeRealmIntegTestCase; -import org.elasticsearch.xpack.core.security.action.role.DeleteRoleResponse; -import org.elasticsearch.xpack.core.security.action.role.GetRolesResponse; -import org.elasticsearch.xpack.core.security.action.role.PutRoleResponse; import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; import org.junit.Before; import org.junit.BeforeClass; +import java.io.IOException; import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Set; -import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; -import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.NONE; +import static org.elasticsearch.test.SecuritySettingsSource.SECURITY_REQUEST_OPTIONS; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; - /** * Test for the clear roles API */ @@ -45,14 +49,17 @@ public static void init() throws Exception { } @Before - public void setupForTests() { - SecurityClient c = securityClient(); + public void setupForTests() throws IOException { + final RestHighLevelClient restClient = new TestRestHighLevelClient(); // create roles for (String role : roles) { - c.preparePutRole(role) - .cluster("none") - .addIndices(new String[] { "*" }, new String[] { "ALL" }, null, null, null, randomBoolean()) - .get(); + restClient.security().putRole(new PutRoleRequest( + Role.builder().name(role) + .clusterPrivileges("none") + .indicesPrivileges( + IndicesPrivileges.builder().indices("*").privileges("ALL").allowRestrictedIndices(randomBoolean()).build()) + .build(), RefreshPolicy.IMMEDIATE), + SECURITY_REQUEST_OPTIONS); logger.debug("--> created role [{}]", role); } @@ -74,50 +81,50 @@ protected boolean addMockHttpTransport() { } public void testModifyingViaApiClearsCache() throws Exception { - Client client = client(); - SecurityClient securityClient = securityClient(client); - + final RestHighLevelClient restClient = new TestRestHighLevelClient(); int modifiedRolesCount = randomIntBetween(1, roles.length); List toModify = randomSubsetOf(modifiedRolesCount, roles); logger.debug("--> modifying roles {} to have run_as", toModify); for (String role : toModify) { - PutRoleResponse response = securityClient.preparePutRole(role) - .cluster("none") - .addIndices(new String[] { "*" }, new String[] { "ALL" }, null, null, null, randomBoolean()) - .runAs(role) - .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE) - .get(); + PutRoleResponse response = restClient.security().putRole(new PutRoleRequest(Role.builder().name(role) + .clusterPrivileges("none") + .indicesPrivileges( + IndicesPrivileges.builder().indices("*").privileges("ALL").allowRestrictedIndices(randomBoolean()).build()) + .runAsPrivilege(role) + .build(), randomBoolean() ? RefreshPolicy.IMMEDIATE : RefreshPolicy.NONE), SECURITY_REQUEST_OPTIONS); assertThat(response.isCreated(), is(false)); logger.debug("--> updated role [{}] with run_as", role); } - assertRolesAreCorrect(securityClient, toModify); + assertRolesAreCorrect(restClient, toModify); } public void testDeletingViaApiClearsCache() throws Exception { + final RestHighLevelClient restClient = new TestRestHighLevelClient(); final int rolesToDelete = randomIntBetween(1, roles.length - 1); List toDelete = randomSubsetOf(rolesToDelete, roles); for (String role : toDelete) { - DeleteRoleResponse response = securityClient().prepareDeleteRole(role).get(); - assertTrue(response.found()); + DeleteRoleResponse response = restClient.security() + .deleteRole(new DeleteRoleRequest(role, RefreshPolicy.IMMEDIATE), SECURITY_REQUEST_OPTIONS); + assertTrue(response.isFound()); } - GetRolesResponse roleResponse = securityClient().prepareGetRoles().names(roles).get(); - assertTrue(roleResponse.hasRoles()); - assertThat(roleResponse.roles().length, is(roles.length - rolesToDelete)); + GetRolesResponse roleResponse = restClient.security().getRoles(new GetRolesRequest(roles), SECURITY_REQUEST_OPTIONS); + assertFalse(roleResponse.getRoles().isEmpty()); + assertThat(roleResponse.getRoles().size(), is(roles.length - rolesToDelete)); } - private void assertRolesAreCorrect(SecurityClient securityClient, List toModify) { + private void assertRolesAreCorrect(RestHighLevelClient restClient, List toModify) throws IOException { for (String role : roles) { logger.debug("--> getting role [{}]", role); - GetRolesResponse roleResponse = securityClient.prepareGetRoles().names(role).get(); - assertThat(roleResponse.hasRoles(), is(true)); - final String[] runAs = roleResponse.roles()[0].getRunAs(); + GetRolesResponse roleResponse = restClient.security().getRoles(new GetRolesRequest(role), SECURITY_REQUEST_OPTIONS); + assertThat(roleResponse.getRoles().isEmpty(), is(false)); + final Set runAs = roleResponse.getRoles().get(0).getRunAsPrivilege(); if (toModify.contains(role)) { - assertThat("role [" + role + "] should be modified and have run as", runAs == null || runAs.length == 0, is(false)); - assertThat(Arrays.asList(runAs).contains(role), is(true)); + assertThat("role [" + role + "] should be modified and have run as", runAs == null || runAs.size() == 0, is(false)); + assertThat(runAs.contains(role), is(true)); } else { - assertThat("role [" + role + "] should be cached and not have run as set but does!", runAs == null || runAs.length == 0, + assertThat("role [" + role + "] should be cached and not have run as set but does!", runAs == null || runAs.size() == 0, is(true)); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java index 2035a8b6c19dd..71cdd6c03967e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java @@ -20,25 +20,29 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; +import org.elasticsearch.client.security.PutUserRequest; +import org.elasticsearch.client.security.RefreshPolicy; +import org.elasticsearch.client.security.user.User; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.test.SecuritySettingsSource; -import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.junit.After; import org.junit.Before; import java.util.Collections; +import java.util.List; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; -import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.elasticsearch.test.SecuritySettingsSource.SECURITY_REQUEST_OPTIONS; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.is; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.is; public class MultipleIndicesPermissionsTests extends SecurityIntegTestCase { @@ -47,7 +51,9 @@ public class MultipleIndicesPermissionsTests extends SecurityIntegTestCase { @Before public void waitForSecurityIndexWritable() throws Exception { // adds a dummy user to the native realm to force .security index creation - securityClient().preparePutUser("dummy_user", "password".toCharArray(), Hasher.BCRYPT, "missing_role").get(); + new TestRestHighLevelClient().security().putUser( + PutUserRequest.withPassword(new User("dummy_user", List.of("missing_role")), "password".toCharArray(), true, + RefreshPolicy.IMMEDIATE), SECURITY_REQUEST_OPTIONS); assertSecurityIndexActive(); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java index 78d95ecbca0b8..312886b7dc450 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java @@ -8,10 +8,11 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.security.ClearRealmCacheRequest; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.core.security.user.APMSystemUser; import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; @@ -24,9 +25,12 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Set; +import static org.elasticsearch.test.SecuritySettingsSource.SECURITY_REQUEST_OPTIONS; + /** * Test case with method to handle the starting and stopping the stores for native users and roles */ @@ -46,8 +50,9 @@ public void stopESNativeStores() throws Exception { if (getCurrentClusterScope() == Scope.SUITE) { // Clear the realm cache for all realms since we use a SUITE scoped cluster - SecurityClient client = securityClient(client()); - client.prepareClearRealmCache().get(); + RestHighLevelClient restClient = new TestRestHighLevelClient(); + restClient.security() + .clearRealmCache(new ClearRealmCacheRequest(Collections.emptyList(), Collections.emptyList()), SECURITY_REQUEST_OPTIONS); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java index a3b75b188a278..2a3b706058cb7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; +import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -24,11 +25,9 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -36,11 +35,8 @@ import org.elasticsearch.index.Index; import org.elasticsearch.license.LicenseService; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.LocalStateSecurity; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.AfterClass; @@ -50,7 +46,6 @@ import org.junit.Rule; import org.junit.rules.ExternalResource; -import java.net.InetSocketAddress; import java.nio.file.Path; import java.util.Collection; import java.util.Collections; @@ -406,25 +401,6 @@ protected Function getClientWrapper() { return client -> (client instanceof NodeClient) ? client.filterWithHeader(headers) : client; } - protected SecurityClient securityClient() { - return securityClient(client()); - } - - public static SecurityClient securityClient(Client client) { - return randomBoolean() ? new XPackClient(client).security() : new SecurityClient(client); - } - - protected String getHttpURL() { - final NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get(); - final List nodes = nodeInfos.getNodes(); - assertTrue("there is at least one node", nodes.size() > 0); - NodeInfo ni = randomFrom(nodes); - boolean useSSL = XPackSettings.HTTP_SSL_ENABLED.get(ni.getSettings()); - TransportAddress publishAddress = ni.getHttp().address().publishAddress(); - InetSocketAddress address = publishAddress.address(); - return (useSSL ? "https://" : "http://") + NetworkAddress.format(address.getAddress()) + ":" + address.getPort(); - } - public void assertSecurityIndexActive() throws Exception { assertSecurityIndexActive(cluster()); } @@ -480,4 +456,10 @@ protected boolean isTransportSSLEnabled() { protected static Hasher getFastStoredHashAlgoForTests() { return Hasher.resolve(randomFrom("pbkdf2", "pbkdf2_1000", "bcrypt", "bcrypt9")); } + + protected class TestRestHighLevelClient extends RestHighLevelClient { + public TestRestHighLevelClient() { + super(getRestClient(), client -> {}, List.of()); + } + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java index 09e1c00cb543e..a4141207ba658 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java @@ -7,6 +7,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.MockSecureSettings; @@ -30,11 +31,13 @@ import java.io.IOException; import java.io.InputStream; import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.util.ArrayList; import java.util.Arrays; +import java.util.Base64; import java.util.Collection; import java.util.List; import java.util.function.Consumer; @@ -43,6 +46,7 @@ import static org.apache.lucene.util.LuceneTestCase.createTempFile; import static org.elasticsearch.test.ESTestCase.inFipsJvm; import static org.elasticsearch.test.ESTestCase.randomFrom; +import static org.elasticsearch.test.SecuritySettingsSourceField.TEST_PASSWORD; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.elasticsearch.xpack.security.test.SecurityTestUtils.writeFile; @@ -56,9 +60,13 @@ public class SecuritySettingsSource extends NodeConfigurationSource { public static final String TEST_USER_NAME = "test_user"; public static final String TEST_PASSWORD_HASHED = new String(Hasher.resolve(randomFrom("pbkdf2", "pbkdf2_1000", "bcrypt9", "bcrypt8", "bcrypt")). - hash(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()))); + hash(new SecureString(TEST_PASSWORD.toCharArray()))); public static final String TEST_ROLE = "user"; public static final String TEST_SUPERUSER = "test_superuser"; + public static final RequestOptions SECURITY_REQUEST_OPTIONS = RequestOptions.DEFAULT.toBuilder() + .addHeader("Authorization", + "Basic " + Base64.getEncoder().encodeToString((TEST_USER_NAME + ":" + TEST_PASSWORD).getBytes(StandardCharsets.UTF_8))) + .build(); public static final String DEFAULT_TRANSPORT_CLIENT_ROLE = "transport_client"; public static final String DEFAULT_TRANSPORT_CLIENT_USER_NAME = "test_trans_client_user"; @@ -199,7 +207,7 @@ protected String nodeClientUsername() { } protected SecureString nodeClientPassword() { - return new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()); + return new SecureString(TEST_PASSWORD.toCharArray()); } protected String transportClientUsername() { @@ -207,7 +215,7 @@ protected String transportClientUsername() { } protected SecureString transportClientPassword() { - return new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()); + return new SecureString(TEST_PASSWORD.toCharArray()); } public static void addSSLSettingsForNodePEMFiles(Settings.Builder builder, String prefix, boolean hostnameVerificationEnabled) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilderTests.java index 2ece398d3d19f..98bae94a33a72 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilderTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilderTests.java @@ -23,7 +23,7 @@ public class PutPrivilegesRequestBuilderTests extends ESTestCase { public void testBuildRequestWithMultipleElements() throws Exception { - final PutPrivilegesRequestBuilder builder = new PutPrivilegesRequestBuilder(null, PutPrivilegesAction.INSTANCE); + final PutPrivilegesRequestBuilder builder = new PutPrivilegesRequestBuilder(null); builder.source(new BytesArray("{ " + "\"foo\":{" + " \"read\":{ \"application\":\"foo\", \"name\":\"read\", \"actions\":[ \"data:/read/*\", \"admin:/read/*\" ] }," @@ -53,7 +53,7 @@ private ApplicationPrivilegeDescriptor descriptor(String app, String name, Strin } public void testPrivilegeNameValidationOfMultipleElement() throws Exception { - final PutPrivilegesRequestBuilder builder = new PutPrivilegesRequestBuilder(null, PutPrivilegesAction.INSTANCE); + final PutPrivilegesRequestBuilder builder = new PutPrivilegesRequestBuilder(null); final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> builder.source(new BytesArray("{ \"foo\":{" + "\"write\":{ \"application\":\"foo\", \"name\":\"read\", \"actions\":[\"data:/read/*\",\"admin:/read/*\"] }," @@ -65,7 +65,7 @@ public void testPrivilegeNameValidationOfMultipleElement() throws Exception { } public void testApplicationNameValidationOfMultipleElement() throws Exception { - final PutPrivilegesRequestBuilder builder = new PutPrivilegesRequestBuilder(null, PutPrivilegesAction.INSTANCE); + final PutPrivilegesRequestBuilder builder = new PutPrivilegesRequestBuilder(null); final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> builder.source(new BytesArray("{ \"bar\":{" + "\"read\":{ \"application\":\"foo\", \"name\":\"read\", \"actions\":[ \"data:/read/*\", \"admin:/read/*\" ] }," @@ -78,7 +78,7 @@ public void testApplicationNameValidationOfMultipleElement() throws Exception { } public void testInferApplicationNameAndPrivilegeName() throws Exception { - final PutPrivilegesRequestBuilder builder = new PutPrivilegesRequestBuilder(null, PutPrivilegesAction.INSTANCE); + final PutPrivilegesRequestBuilder builder = new PutPrivilegesRequestBuilder(null); builder.source(new BytesArray("{ \"foo\":{" + "\"read\":{ \"actions\":[ \"data:/read/*\", \"admin:/read/*\" ] }," + "\"write\":{ \"actions\":[ \"data:/write/*\", \"admin:/*\" ] }," diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/PutRoleMappingRequestTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/PutRoleMappingRequestTests.java index 3079a17b77424..bb12a9872169c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/PutRoleMappingRequestTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/PutRoleMappingRequestTests.java @@ -5,18 +5,17 @@ */ package org.elasticsearch.xpack.security.action.rolemapping; -import java.util.Collections; - import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ValidationException; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingAction; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequestBuilder; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; import org.junit.Before; import org.mockito.Mockito; +import java.util.Collections; + import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.notNullValue; @@ -27,7 +26,7 @@ public class PutRoleMappingRequestTests extends ESTestCase { @Before public void setupBuilder() { final ElasticsearchClient client = Mockito.mock(ElasticsearchClient.class); - builder = new PutRoleMappingRequestBuilder(client, PutRoleMappingAction.INSTANCE); + builder = new PutRoleMappingRequestBuilder(client); } public void testValidateMissingName() throws Exception { @@ -70,4 +69,4 @@ private void assertValidationFailure(PutRoleMappingRequest request, String expec assertThat(ve.getMessage(), containsString(expectedMessage)); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index c8cea45037942..5c6c04b6ad491 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.security.authc; import com.google.common.collect.Sets; - import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; @@ -26,14 +25,16 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.action.ApiKey; +import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequestBuilder; import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.GetApiKeyAction; import org.elasticsearch.xpack.core.security.action.GetApiKeyRequest; import org.elasticsearch.xpack.core.security.action.GetApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyAction; import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyResponse; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.transport.filter.IPFilter; import org.junit.After; import org.junit.Before; @@ -100,8 +101,7 @@ public void testCreateApiKey() { Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); - final CreateApiKeyResponse response = securityClient.prepareCreateApiKey() + final CreateApiKeyResponse response = new CreateApiKeyRequestBuilder(client) .setName("test key") .setExpiration(TimeValue.timeValueHours(TimeUnit.DAYS.toHours(7L))) .setRoleDescriptors(Collections.singletonList(descriptor)) @@ -115,7 +115,7 @@ public void testCreateApiKey() { assertThat(daysBetween, is(7L)); // create simple api key - final CreateApiKeyResponse simple = securityClient.prepareCreateApiKey().setName("simple").get(); + final CreateApiKeyResponse simple = new CreateApiKeyRequestBuilder(client).setName("simple").get(); assertEquals("simple", simple.getName()); assertNotNull(simple.getId()); assertNotNull(simple.getKey()); @@ -151,8 +151,7 @@ public void testCreateApiKeyFailsWhenApiKeyWithSameNameAlreadyExists() throws In final RoleDescriptor descriptor = new RoleDescriptor("role", new String[] { "monitor" }, null, null); Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); - final CreateApiKeyResponse response = securityClient.prepareCreateApiKey().setName(keyName).setExpiration(null) + final CreateApiKeyResponse response = new CreateApiKeyRequestBuilder(client).setName(keyName).setExpiration(null) .setRoleDescriptors(Collections.singletonList(descriptor)).get(); assertNotNull(response.getId()); assertNotNull(response.getKey()); @@ -163,8 +162,7 @@ public void testCreateApiKeyFailsWhenApiKeyWithSameNameAlreadyExists() throws In Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); - ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> securityClient.prepareCreateApiKey() + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> new CreateApiKeyRequestBuilder(client) .setName(keyName) .setExpiration(TimeValue.timeValueHours(TimeUnit.DAYS.toHours(7L))) .setRoleDescriptors(Collections.singletonList(descriptor)) @@ -173,12 +171,12 @@ public void testCreateApiKeyFailsWhenApiKeyWithSameNameAlreadyExists() throws In // Now invalidate the API key PlainActionFuture listener = new PlainActionFuture<>(); - securityClient.invalidateApiKey(InvalidateApiKeyRequest.usingApiKeyName(keyName), listener); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyName(keyName), listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); verifyInvalidateResponse(1, responses, invalidateResponse); // try to create API key with same name, should succeed now - CreateApiKeyResponse createResponse = securityClient.prepareCreateApiKey().setName(keyName) + CreateApiKeyResponse createResponse = new CreateApiKeyRequestBuilder(client).setName(keyName) .setExpiration(TimeValue.timeValueHours(TimeUnit.DAYS.toHours(7L))) .setRoleDescriptors(Collections.singletonList(descriptor)).get(); assertNotNull(createResponse.getId()); @@ -190,9 +188,8 @@ public void testInvalidateApiKeysForRealm() throws InterruptedException, Executi List responses = createApiKeys(noOfApiKeys, null); Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); PlainActionFuture listener = new PlainActionFuture<>(); - securityClient.invalidateApiKey(InvalidateApiKeyRequest.usingRealmName("file"), listener); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingRealmName("file"), listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); verifyInvalidateResponse(noOfApiKeys, responses, invalidateResponse); } @@ -202,9 +199,9 @@ public void testInvalidateApiKeysForUser() throws Exception { List responses = createApiKeys(noOfApiKeys, null); Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); PlainActionFuture listener = new PlainActionFuture<>(); - securityClient.invalidateApiKey(InvalidateApiKeyRequest.usingUserName(SecuritySettingsSource.TEST_SUPERUSER), listener); + client.execute(InvalidateApiKeyAction.INSTANCE, + InvalidateApiKeyRequest.usingUserName(SecuritySettingsSource.TEST_SUPERUSER), listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); verifyInvalidateResponse(noOfApiKeys, responses, invalidateResponse); } @@ -213,10 +210,9 @@ public void testInvalidateApiKeysForRealmAndUser() throws InterruptedException, List responses = createApiKeys(1, null); Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); PlainActionFuture listener = new PlainActionFuture<>(); - securityClient.invalidateApiKey(InvalidateApiKeyRequest.usingRealmAndUserName("file", SecuritySettingsSource.TEST_SUPERUSER), - listener); + client.execute(InvalidateApiKeyAction.INSTANCE, + InvalidateApiKeyRequest.usingRealmAndUserName("file", SecuritySettingsSource.TEST_SUPERUSER), listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); verifyInvalidateResponse(1, responses, invalidateResponse); } @@ -225,9 +221,8 @@ public void testInvalidateApiKeysForApiKeyId() throws InterruptedException, Exec List responses = createApiKeys(1, null); Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); PlainActionFuture listener = new PlainActionFuture<>(); - securityClient.invalidateApiKey(InvalidateApiKeyRequest.usingApiKeyId(responses.get(0).getId()), listener); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(responses.get(0).getId()), listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); verifyInvalidateResponse(1, responses, invalidateResponse); } @@ -236,9 +231,8 @@ public void testInvalidateApiKeysForApiKeyName() throws InterruptedException, Ex List responses = createApiKeys(1, null); Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); PlainActionFuture listener = new PlainActionFuture<>(); - securityClient.invalidateApiKey(InvalidateApiKeyRequest.usingApiKeyName(responses.get(0).getName()), listener); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyName(responses.get(0).getName()), listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); verifyInvalidateResponse(1, responses, invalidateResponse); } @@ -259,27 +253,24 @@ public void testInvalidatedApiKeysDeletedByRemover() throws Exception { List createdApiKeys = createApiKeys(2, null); - SecurityClient securityClient = new SecurityClient(client); - PlainActionFuture listener = new PlainActionFuture<>(); - securityClient.invalidateApiKey(InvalidateApiKeyRequest.usingApiKeyId(createdApiKeys.get(0).getId()), listener); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(createdApiKeys.get(0).getId()), listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); assertThat(invalidateResponse.getInvalidatedApiKeys().size(), equalTo(1)); assertThat(invalidateResponse.getPreviouslyInvalidatedApiKeys().size(), equalTo(0)); assertThat(invalidateResponse.getErrors().size(), equalTo(0)); PlainActionFuture getApiKeyResponseListener = new PlainActionFuture<>(); - securityClient.getApiKey(GetApiKeyRequest.usingRealmName("file"), getApiKeyResponseListener); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingRealmName("file"), getApiKeyResponseListener); assertThat(getApiKeyResponseListener.get().getApiKeyInfos().length, is(2)); client = waitForExpiredApiKeysRemoverTriggerReadyAndGetClient().filterWithHeader( Collections.singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - securityClient = new SecurityClient(client); // invalidate API key to trigger remover listener = new PlainActionFuture<>(); - securityClient.invalidateApiKey(InvalidateApiKeyRequest.usingApiKeyId(createdApiKeys.get(1).getId()), listener); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(createdApiKeys.get(1).getId()), listener); assertThat(listener.get().getInvalidatedApiKeys().size(), is(1)); awaitApiKeysRemoverCompletion(); @@ -288,7 +279,7 @@ public void testInvalidatedApiKeysDeletedByRemover() throws Exception { // Verify that 1st invalidated API key is deleted whereas the next one is not getApiKeyResponseListener = new PlainActionFuture<>(); - securityClient.getApiKey(GetApiKeyRequest.usingRealmName("file"), getApiKeyResponseListener); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingRealmName("file"), getApiKeyResponseListener); assertThat(getApiKeyResponseListener.get().getApiKeyInfos().length, is(1)); ApiKey apiKey = getApiKeyResponseListener.get().getApiKeyInfos()[0]; assertThat(apiKey.getId(), is(createdApiKeys.get(1).getId())); @@ -325,10 +316,8 @@ public void testExpiredApiKeysBehaviorWhenKeysExpired1WeekBeforeAnd1DayBefore() List createdApiKeys = createApiKeys(noOfKeys, null); Instant created = Instant.now(); - SecurityClient securityClient = new SecurityClient(client); - PlainActionFuture getApiKeyResponseListener = new PlainActionFuture<>(); - securityClient.getApiKey(GetApiKeyRequest.usingRealmName("file"), getApiKeyResponseListener); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingRealmName("file"), getApiKeyResponseListener); assertThat(getApiKeyResponseListener.get().getApiKeyInfos().length, is(noOfKeys)); // Expire the 1st key such that it cannot be deleted by the remover @@ -354,7 +343,7 @@ public void testExpiredApiKeysBehaviorWhenKeysExpired1WeekBeforeAnd1DayBefore() // Invalidate to trigger the remover PlainActionFuture listener = new PlainActionFuture<>(); - securityClient.invalidateApiKey(InvalidateApiKeyRequest.usingApiKeyId(createdApiKeys.get(2).getId()), listener); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(createdApiKeys.get(2).getId()), listener); assertThat(listener.get().getInvalidatedApiKeys().size(), is(1)); awaitApiKeysRemoverCompletion(); @@ -363,7 +352,7 @@ public void testExpiredApiKeysBehaviorWhenKeysExpired1WeekBeforeAnd1DayBefore() // Verify get API keys does not return expired and deleted key getApiKeyResponseListener = new PlainActionFuture<>(); - securityClient.getApiKey(GetApiKeyRequest.usingRealmName("file"), getApiKeyResponseListener); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingRealmName("file"), getApiKeyResponseListener); assertThat(getApiKeyResponseListener.get().getApiKeyInfos().length, is(3)); Set expectedKeyIds = Sets.newHashSet(createdApiKeys.get(0).getId(), createdApiKeys.get(2).getId(), @@ -400,17 +389,16 @@ public void testActiveApiKeysWithNoExpirationNeverGetDeletedByRemover() throws E Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); PlainActionFuture listener = new PlainActionFuture<>(); // trigger expired keys remover - securityClient.invalidateApiKey(InvalidateApiKeyRequest.usingApiKeyId(responses.get(1).getId()), listener); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(responses.get(1).getId()), listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); assertThat(invalidateResponse.getInvalidatedApiKeys().size(), equalTo(1)); assertThat(invalidateResponse.getPreviouslyInvalidatedApiKeys().size(), equalTo(0)); assertThat(invalidateResponse.getErrors().size(), equalTo(0)); PlainActionFuture getApiKeyResponseListener = new PlainActionFuture<>(); - securityClient.getApiKey(GetApiKeyRequest.usingRealmName("file"), getApiKeyResponseListener); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingRealmName("file"), getApiKeyResponseListener); GetApiKeyResponse response = getApiKeyResponseListener.get(); verifyGetResponse(2, responses, response, Collections.singleton(responses.get(0).getId()), Collections.singletonList(responses.get(1).getId())); @@ -421,13 +409,12 @@ public void testGetApiKeysForRealm() throws InterruptedException, ExecutionExcep List responses = createApiKeys(noOfApiKeys, null); Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); boolean invalidate= randomBoolean(); List invalidatedApiKeyIds = null; Set expectedValidKeyIds = null; if (invalidate) { PlainActionFuture listener = new PlainActionFuture<>(); - securityClient.invalidateApiKey(InvalidateApiKeyRequest.usingApiKeyId(responses.get(0).getId()), listener); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(responses.get(0).getId()), listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); invalidatedApiKeyIds = invalidateResponse.getInvalidatedApiKeys(); expectedValidKeyIds = responses.stream().filter(o -> !o.getId().equals(responses.get(0).getId())).map(o -> o.getId()) @@ -438,7 +425,7 @@ public void testGetApiKeysForRealm() throws InterruptedException, ExecutionExcep } PlainActionFuture listener = new PlainActionFuture<>(); - securityClient.getApiKey(GetApiKeyRequest.usingRealmName("file"), listener); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingRealmName("file"), listener); GetApiKeyResponse response = listener.get(); verifyGetResponse(noOfApiKeys, responses, response, expectedValidKeyIds, @@ -450,9 +437,8 @@ public void testGetApiKeysForUser() throws Exception { List responses = createApiKeys(noOfApiKeys, null); Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); PlainActionFuture listener = new PlainActionFuture<>(); - securityClient.getApiKey(GetApiKeyRequest.usingUserName(SecuritySettingsSource.TEST_SUPERUSER), listener); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingUserName(SecuritySettingsSource.TEST_SUPERUSER), listener); GetApiKeyResponse response = listener.get(); verifyGetResponse(noOfApiKeys, responses, response, responses.stream().map(o -> o.getId()).collect(Collectors.toSet()), null); } @@ -461,9 +447,8 @@ public void testGetApiKeysForRealmAndUser() throws InterruptedException, Executi List responses = createApiKeys(1, null); Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); PlainActionFuture listener = new PlainActionFuture<>(); - securityClient.getApiKey(GetApiKeyRequest.usingRealmAndUserName("file", SecuritySettingsSource.TEST_SUPERUSER), + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingRealmAndUserName("file", SecuritySettingsSource.TEST_SUPERUSER), listener); GetApiKeyResponse response = listener.get(); verifyGetResponse(1, responses, response, Collections.singleton(responses.get(0).getId()), null); @@ -473,9 +458,8 @@ public void testGetApiKeysForApiKeyId() throws InterruptedException, ExecutionEx List responses = createApiKeys(1, null); Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); PlainActionFuture listener = new PlainActionFuture<>(); - securityClient.getApiKey(GetApiKeyRequest.usingApiKeyId(responses.get(0).getId()), listener); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyId(responses.get(0).getId()), listener); GetApiKeyResponse response = listener.get(); verifyGetResponse(1, responses, response, Collections.singleton(responses.get(0).getId()), null); } @@ -484,9 +468,8 @@ public void testGetApiKeysForApiKeyName() throws InterruptedException, Execution List responses = createApiKeys(1, null); Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); PlainActionFuture listener = new PlainActionFuture<>(); - securityClient.getApiKey(GetApiKeyRequest.usingApiKeyName(responses.get(0).getName()), listener); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyName(responses.get(0).getName()), listener); GetApiKeyResponse response = listener.get(); verifyGetResponse(1, responses, response, Collections.singleton(responses.get(0).getId()), null); } @@ -524,8 +507,7 @@ private List createApiKeys(int noOfApiKeys, TimeValue expi final RoleDescriptor descriptor = new RoleDescriptor("role", new String[] { "monitor" }, null, null); Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); - final CreateApiKeyResponse response = securityClient.prepareCreateApiKey() + final CreateApiKeyResponse response = new CreateApiKeyRequestBuilder(client) .setName("test-key-" + randomAlphaOfLengthBetween(5, 9) + i).setExpiration(expiration) .setRoleDescriptors(Collections.singletonList(descriptor)).get(); assertNotNull(response.getId()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java index 7bac18cfcfb67..bfc7508857120 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java @@ -6,16 +6,23 @@ package org.elasticsearch.xpack.security.authc; import org.apache.directory.api.util.Strings; -import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.action.ActionListener; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.security.AuthenticateResponse; +import org.elasticsearch.client.security.CreateTokenRequest; +import org.elasticsearch.client.security.CreateTokenResponse; +import org.elasticsearch.client.security.InvalidateTokenRequest; +import org.elasticsearch.client.security.InvalidateTokenResponse; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; -import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilders; @@ -26,20 +33,13 @@ import org.elasticsearch.test.SecuritySettingsSourceField; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.security.action.token.CreateTokenRequest; -import org.elasticsearch.xpack.core.security.action.token.CreateTokenResponse; -import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenRequest; -import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenResponse; -import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; -import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; -import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; import org.elasticsearch.xpack.core.security.authc.TokenMetaData; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.junit.After; import org.junit.Before; +import java.io.IOException; import java.time.Clock; import java.time.Instant; import java.time.temporal.ChronoUnit; @@ -53,8 +53,9 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; -import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; +import static org.elasticsearch.test.SecuritySettingsSource.SECURITY_REQUEST_OPTIONS; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @TestLogging("org.elasticsearch.xpack.security.authz.store.FileRolesStore:DEBUG") @@ -77,48 +78,45 @@ protected int maxNumberOfNodes() { return defaultMaxNumberOfNodes() + 1; } + @Override + protected boolean addMockHttpTransport() { + return false; // need real http + } + public void testTokenServiceBootstrapOnNodeJoin() throws Exception { - final Client client = client(); - SecurityClient securityClient = new SecurityClient(client); - CreateTokenResponse response = securityClient.prepareCreateToken() - .setGrantType("password") - .setUsername(SecuritySettingsSource.TEST_USER_NAME) - .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) - .get(); + final RestHighLevelClient restClient = new TestRestHighLevelClient(); + CreateTokenResponse response = restClient.security().createToken(CreateTokenRequest.passwordGrant( + SecuritySettingsSource.TEST_USER_NAME, SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()), SECURITY_REQUEST_OPTIONS); for (TokenService tokenService : internalCluster().getInstances(TokenService.class)) { PlainActionFuture userTokenFuture = new PlainActionFuture<>(); - tokenService.decodeToken(response.getTokenString(), userTokenFuture); + tokenService.decodeToken(response.getAccessToken(), userTokenFuture); assertNotNull(userTokenFuture.actionGet()); } // start a new node and see if it can decrypt the token String nodeName = internalCluster().startNode(); for (TokenService tokenService : internalCluster().getInstances(TokenService.class)) { PlainActionFuture userTokenFuture = new PlainActionFuture<>(); - tokenService.decodeToken(response.getTokenString(), userTokenFuture); + tokenService.decodeToken(response.getAccessToken(), userTokenFuture); assertNotNull(userTokenFuture.actionGet()); } TokenService tokenService = internalCluster().getInstance(TokenService.class, nodeName); PlainActionFuture userTokenFuture = new PlainActionFuture<>(); - tokenService.decodeToken(response.getTokenString(), userTokenFuture); + tokenService.decodeToken(response.getAccessToken(), userTokenFuture); assertNotNull(userTokenFuture.actionGet()); } public void testTokenServiceCanRotateKeys() throws Exception { - final Client client = client(); - SecurityClient securityClient = new SecurityClient(client); - CreateTokenResponse response = securityClient.prepareCreateToken() - .setGrantType("password") - .setUsername(SecuritySettingsSource.TEST_USER_NAME) - .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) - .get(); + final RestHighLevelClient restClient = new TestRestHighLevelClient(); + CreateTokenResponse response = restClient.security().createToken(CreateTokenRequest.passwordGrant( + SecuritySettingsSource.TEST_USER_NAME, SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()), SECURITY_REQUEST_OPTIONS); String masterName = internalCluster().getMasterName(); TokenService masterTokenService = internalCluster().getInstance(TokenService.class, masterName); String activeKeyHash = masterTokenService.getActiveKeyHash(); for (TokenService tokenService : internalCluster().getInstances(TokenService.class)) { PlainActionFuture userTokenFuture = new PlainActionFuture<>(); - tokenService.decodeToken(response.getTokenString(), userTokenFuture); + tokenService.decodeToken(response.getAccessToken(), userTokenFuture); assertNotNull(userTokenFuture.actionGet()); assertEquals(activeKeyHash, tokenService.getActiveKeyHash()); } @@ -131,7 +129,7 @@ public void testTokenServiceCanRotateKeys() throws Exception { for (TokenService tokenService : internalCluster().getInstances(TokenService.class)) { PlainActionFuture userTokenFuture = new PlainActionFuture<>(); - tokenService.decodeToken(response.getTokenString(), userTokenFuture); + tokenService.decodeToken(response.getAccessToken(), userTokenFuture); assertNotNull(userTokenFuture.actionGet()); assertNotEquals(activeKeyHash, tokenService.getActiveKeyHash()); } @@ -139,33 +137,24 @@ public void testTokenServiceCanRotateKeys() throws Exception { @TestLogging("org.elasticsearch.xpack.security.authc:DEBUG") public void testExpiredTokensDeletedAfterExpiration() throws Exception { - final Client client = client().filterWithHeader(Collections.singletonMap("Authorization", - UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, - SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); - CreateTokenResponse response = securityClient.prepareCreateToken() - .setGrantType("password") - .setUsername(SecuritySettingsSource.TEST_USER_NAME) - .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) - .get(); + final RestHighLevelClient restClient = new TestRestHighLevelClient(); + CreateTokenResponse response = restClient.security().createToken(CreateTokenRequest.passwordGrant( + SecuritySettingsSource.TEST_USER_NAME, SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()), SECURITY_REQUEST_OPTIONS); Instant created = Instant.now(); - InvalidateTokenResponse invalidateResponse = securityClient - .prepareInvalidateToken(response.getTokenString()) - .setType(InvalidateTokenRequest.Type.ACCESS_TOKEN) - .get(); - assertThat(invalidateResponse.getResult().getInvalidatedTokens().size(), equalTo(1)); - assertThat(invalidateResponse.getResult().getPreviouslyInvalidatedTokens().size(), equalTo(0)); - assertThat(invalidateResponse.getResult().getErrors().size(), equalTo(0)); + InvalidateTokenResponse invalidateResponse = restClient.security().invalidateToken( + new InvalidateTokenRequest(response.getAccessToken(), null, null, null), SECURITY_REQUEST_OPTIONS); + assertThat(invalidateResponse.getInvalidatedTokens(), equalTo(1)); + assertThat(invalidateResponse.getPreviouslyInvalidatedTokens(), equalTo(0)); + assertThat(invalidateResponse.getErrors().size(), equalTo(0)); AtomicReference docId = new AtomicReference<>(); assertBusy(() -> { - SearchResponse searchResponse = client.prepareSearch(RestrictedIndicesNames.SECURITY_TOKENS_ALIAS) - .setSource(SearchSourceBuilder.searchSource() - .query(QueryBuilders.termQuery("doc_type", "token"))) - .setSize(1) - .setTerminateAfter(1) - .get(); + SearchResponse searchResponse = restClient.search(new SearchRequest(RestrictedIndicesNames.SECURITY_TOKENS_ALIAS) + .source(SearchSourceBuilder.searchSource() + .size(1) + .terminateAfter(1) + .query(QueryBuilders.termQuery("doc_type", "token"))), SECURITY_REQUEST_OPTIONS); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); docId.set(searchResponse.getHits().getAt(0).getId()); }); @@ -173,198 +162,153 @@ public void testExpiredTokensDeletedAfterExpiration() throws Exception { // hack doc to modify the creation time to the day before Instant yesterday = created.minus(36L, ChronoUnit.HOURS); assertTrue(Instant.now().isAfter(yesterday)); - client.prepareUpdate(RestrictedIndicesNames.SECURITY_TOKENS_ALIAS, SINGLE_MAPPING_NAME, docId.get()) - .setDoc("creation_time", yesterday.toEpochMilli()) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); + restClient.update(new UpdateRequest(RestrictedIndicesNames.SECURITY_TOKENS_ALIAS, docId.get()) + .doc("creation_time", yesterday.toEpochMilli()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), SECURITY_REQUEST_OPTIONS); AtomicBoolean deleteTriggered = new AtomicBoolean(false); assertBusy(() -> { if (deleteTriggered.compareAndSet(false, true)) { // invalidate a invalid token... doesn't matter that it is bad... we just want this action to trigger the deletion - try { - securityClient.prepareInvalidateToken("fooobar") - .setType(randomFrom(InvalidateTokenRequest.Type.values())) - .execute() - .actionGet(); - } catch (ElasticsearchSecurityException e) { - assertEquals("token malformed", e.getMessage()); - } + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> + restClient.security().invalidateToken(new InvalidateTokenRequest("fooobar", null, null, null), + SECURITY_REQUEST_OPTIONS)); + assertThat(e.getMessage(), containsString("token malformed")); } - client.admin().indices().prepareRefresh(RestrictedIndicesNames.SECURITY_TOKENS_ALIAS).get(); - SearchResponse searchResponse = client.prepareSearch(RestrictedIndicesNames.SECURITY_TOKENS_ALIAS) - .setSource(SearchSourceBuilder.searchSource() - .query(QueryBuilders.termQuery("doc_type", "token"))) - .setTerminateAfter(1) - .get(); + restClient.indices().refresh(new RefreshRequest(RestrictedIndicesNames.SECURITY_TOKENS_ALIAS), SECURITY_REQUEST_OPTIONS); + SearchResponse searchResponse = restClient.search(new SearchRequest(RestrictedIndicesNames.SECURITY_TOKENS_ALIAS) + .source(SearchSourceBuilder.searchSource() + .query(QueryBuilders.termQuery("doc_type", "token")).terminateAfter(1)), SECURITY_REQUEST_OPTIONS); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); }, 30, TimeUnit.SECONDS); } - public void testInvalidateAllTokensForUser() throws Exception{ + public void testInvalidateAllTokensForUser() throws Exception { + final RestHighLevelClient restClient = new TestRestHighLevelClient(); final int numOfRequests = randomIntBetween(5, 10); for (int i = 0; i < numOfRequests; i++) { - securityClient().prepareCreateToken() - .setGrantType("password") - .setUsername(SecuritySettingsSource.TEST_USER_NAME) - .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) - .get(); + restClient.security().createToken(CreateTokenRequest.passwordGrant(SecuritySettingsSource.TEST_USER_NAME, + SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()), SECURITY_REQUEST_OPTIONS); } - Client client = client().filterWithHeader(Collections.singletonMap("Authorization", - UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, - SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClientSuperuser = new SecurityClient(client); - InvalidateTokenResponse invalidateResponse = securityClientSuperuser - .prepareInvalidateToken() - .setUserName(SecuritySettingsSource.TEST_USER_NAME) - .get(); - assertThat(invalidateResponse.getResult().getInvalidatedTokens().size(), equalTo(2 * (numOfRequests))); - assertThat(invalidateResponse.getResult().getPreviouslyInvalidatedTokens().size(), equalTo(0)); - assertThat(invalidateResponse.getResult().getErrors().size(), equalTo(0)); + InvalidateTokenResponse invalidateResponse = restClient.security().invalidateToken( + new InvalidateTokenRequest(null, null, null, SecuritySettingsSource.TEST_USER_NAME), + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", + UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)).build()); + assertThat(invalidateResponse.getInvalidatedTokens(), equalTo(2 * (numOfRequests))); + assertThat(invalidateResponse.getPreviouslyInvalidatedTokens(), equalTo(0)); + assertThat(invalidateResponse.getErrors().size(), equalTo(0)); } - public void testInvalidateAllTokensForRealm() throws Exception{ + public void testInvalidateAllTokensForRealm() throws Exception { + final RestHighLevelClient restClient = new TestRestHighLevelClient(); final int numOfRequests = randomIntBetween(5, 10); for (int i = 0; i < numOfRequests; i++) { - securityClient().prepareCreateToken() - .setGrantType("password") - .setUsername(SecuritySettingsSource.TEST_USER_NAME) - .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) - .get(); + restClient.security().createToken(CreateTokenRequest.passwordGrant(SecuritySettingsSource.TEST_USER_NAME, + SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()), SECURITY_REQUEST_OPTIONS); } - Client client = client().filterWithHeader(Collections.singletonMap("Authorization", - UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, - SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClientSuperuser = new SecurityClient(client); - InvalidateTokenResponse invalidateResponse = securityClientSuperuser - .prepareInvalidateToken() - .setRealmName("file") - .get(); - assertThat(invalidateResponse.getResult().getInvalidatedTokens().size(), equalTo(2 * (numOfRequests))); - assertThat(invalidateResponse.getResult().getPreviouslyInvalidatedTokens().size(), equalTo(0)); - assertThat(invalidateResponse.getResult().getErrors().size(), equalTo(0)); + InvalidateTokenResponse invalidateResponse = restClient.security().invalidateToken( + new InvalidateTokenRequest(null, null, "file", null), + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", + UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)).build()); + assertThat(invalidateResponse.getInvalidatedTokens(), equalTo(2 * (numOfRequests))); + assertThat(invalidateResponse.getPreviouslyInvalidatedTokens(), equalTo(0)); + assertThat(invalidateResponse.getErrors().size(), equalTo(0)); } - public void testInvalidateAllTokensForRealmThatHasNone() { + public void testInvalidateAllTokensForRealmThatHasNone() throws IOException { + final RestHighLevelClient restClient = new TestRestHighLevelClient(); final int numOfRequests = randomIntBetween(2, 4); for (int i = 0; i < numOfRequests; i++) { - securityClient().prepareCreateToken() - .setGrantType("password") - .setUsername(SecuritySettingsSource.TEST_USER_NAME) - .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) - .get(); + restClient.security().createToken(CreateTokenRequest.passwordGrant(SecuritySettingsSource.TEST_USER_NAME, + SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()), SECURITY_REQUEST_OPTIONS); } - Client client = client().filterWithHeader(Collections.singletonMap("Authorization", - UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, - SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClientSuperuser = new SecurityClient(client); - InvalidateTokenResponse invalidateResponse = securityClientSuperuser - .prepareInvalidateToken() - .setRealmName("saml") - .get(); - assertThat(invalidateResponse.getResult().getInvalidatedTokens().size(), equalTo(0)); - assertThat(invalidateResponse.getResult().getPreviouslyInvalidatedTokens().size(), equalTo(0)); - assertThat(invalidateResponse.getResult().getErrors().size(), equalTo(0)); + InvalidateTokenResponse invalidateResponse = restClient.security().invalidateToken( + new InvalidateTokenRequest(null, null, "saml", null), + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", + UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)).build()); + assertThat(invalidateResponse.getInvalidatedTokens(), equalTo(0)); + assertThat(invalidateResponse.getPreviouslyInvalidatedTokens(), equalTo(0)); + assertThat(invalidateResponse.getErrors().size(), equalTo(0)); } - public void testExpireMultipleTimes() { - CreateTokenResponse response = securityClient().prepareCreateToken() - .setGrantType("password") - .setUsername(SecuritySettingsSource.TEST_USER_NAME) - .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) - .get(); - - InvalidateTokenResponse invalidateResponse = securityClient() - .prepareInvalidateToken(response.getTokenString()) - .setType(InvalidateTokenRequest.Type.ACCESS_TOKEN) - .get(); - assertThat(invalidateResponse.getResult().getInvalidatedTokens().size(), equalTo(1)); - assertThat(invalidateResponse.getResult().getPreviouslyInvalidatedTokens().size(), equalTo(0)); - assertThat(invalidateResponse.getResult().getErrors().size(), equalTo(0)); - InvalidateTokenResponse invalidateAgainResponse = securityClient() - .prepareInvalidateToken(response.getTokenString()) - .setType(InvalidateTokenRequest.Type.ACCESS_TOKEN) - .get(); - assertThat(invalidateAgainResponse.getResult().getInvalidatedTokens().size(), equalTo(0)); - assertThat(invalidateAgainResponse.getResult().getPreviouslyInvalidatedTokens().size(), equalTo(1)); - assertThat(invalidateAgainResponse.getResult().getErrors().size(), equalTo(0)); + public void testExpireMultipleTimes() throws IOException { + final RestHighLevelClient restClient = new TestRestHighLevelClient(); + CreateTokenResponse response = restClient.security().createToken(CreateTokenRequest.passwordGrant( + SecuritySettingsSource.TEST_USER_NAME, SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()), SECURITY_REQUEST_OPTIONS); + + InvalidateTokenResponse invalidateResponse = restClient.security() + .invalidateToken(new InvalidateTokenRequest(response.getAccessToken(), null, null, null), SECURITY_REQUEST_OPTIONS); + assertThat(invalidateResponse.getInvalidatedTokens(), equalTo(1)); + assertThat(invalidateResponse.getPreviouslyInvalidatedTokens(), equalTo(0)); + assertThat(invalidateResponse.getErrors().size(), equalTo(0)); + InvalidateTokenResponse invalidateAgainResponse = restClient.security() + .invalidateToken(new InvalidateTokenRequest(response.getAccessToken(), null, null, null), SECURITY_REQUEST_OPTIONS); + assertThat(invalidateAgainResponse.getInvalidatedTokens(), equalTo(0)); + assertThat(invalidateAgainResponse.getPreviouslyInvalidatedTokens(), equalTo(1)); + assertThat(invalidateAgainResponse.getErrors().size(), equalTo(0)); } - public void testRefreshingToken() { - Client client = client().filterWithHeader(Collections.singletonMap("Authorization", - UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, - SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); - CreateTokenResponse createTokenResponse = securityClient.prepareCreateToken() - .setGrantType("password") - .setUsername(SecuritySettingsSource.TEST_USER_NAME) - .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) - .get(); - assertNotNull(createTokenResponse.getRefreshToken()); + public void testRefreshingToken() throws IOException { + final RestHighLevelClient restClient = new TestRestHighLevelClient(); + CreateTokenResponse response = restClient.security().createToken(CreateTokenRequest.passwordGrant( + SecuritySettingsSource.TEST_USER_NAME, SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()), SECURITY_REQUEST_OPTIONS); + assertNotNull(response.getRefreshToken()); // get cluster health with token assertNoTimeout(client() - .filterWithHeader(Collections.singletonMap("Authorization", "Bearer " + createTokenResponse.getTokenString())) + .filterWithHeader(Collections.singletonMap("Authorization", "Bearer " + response.getAccessToken())) .admin().cluster().prepareHealth().get()); - CreateTokenResponse refreshResponse = securityClient.prepareRefreshToken(createTokenResponse.getRefreshToken()).get(); + CreateTokenResponse refreshResponse = restClient.security() + .createToken(CreateTokenRequest.refreshTokenGrant(response.getRefreshToken()), SECURITY_REQUEST_OPTIONS); assertNotNull(refreshResponse.getRefreshToken()); - assertNotEquals(refreshResponse.getRefreshToken(), createTokenResponse.getRefreshToken()); - assertNotEquals(refreshResponse.getTokenString(), createTokenResponse.getTokenString()); + assertNotEquals(refreshResponse.getRefreshToken(), response.getRefreshToken()); + assertNotEquals(refreshResponse.getAccessToken(), response.getAccessToken()); - assertNoTimeout(client().filterWithHeader(Collections.singletonMap("Authorization", "Bearer " + refreshResponse.getTokenString())) + assertNoTimeout(client().filterWithHeader(Collections.singletonMap("Authorization", "Bearer " + refreshResponse.getAccessToken())) .admin().cluster().prepareHealth().get()); } - public void testRefreshingInvalidatedToken() { - Client client = client().filterWithHeader(Collections.singletonMap("Authorization", - UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, - SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); - CreateTokenResponse createTokenResponse = securityClient.prepareCreateToken() - .setGrantType("password") - .setUsername(SecuritySettingsSource.TEST_USER_NAME) - .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) - .get(); + public void testRefreshingInvalidatedToken() throws IOException { + final RestHighLevelClient restClient = new TestRestHighLevelClient(); + CreateTokenResponse createTokenResponse = restClient.security().createToken(CreateTokenRequest.passwordGrant( + SecuritySettingsSource.TEST_USER_NAME, SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()), SECURITY_REQUEST_OPTIONS); assertNotNull(createTokenResponse.getRefreshToken()); - InvalidateTokenResponse invalidateResponse = securityClient - .prepareInvalidateToken(createTokenResponse.getRefreshToken()) - .setType(InvalidateTokenRequest.Type.REFRESH_TOKEN) - .get(); - assertThat(invalidateResponse.getResult().getInvalidatedTokens().size(), equalTo(1)); - assertThat(invalidateResponse.getResult().getPreviouslyInvalidatedTokens().size(), equalTo(0)); - assertThat(invalidateResponse.getResult().getErrors().size(), equalTo(0)); - - ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, - () -> securityClient.prepareRefreshToken(createTokenResponse.getRefreshToken()).get()); - assertEquals("invalid_grant", e.getMessage()); + InvalidateTokenResponse invalidateResponse = restClient.security() + .invalidateToken(new InvalidateTokenRequest(null, createTokenResponse.getRefreshToken(), null, null), SECURITY_REQUEST_OPTIONS); + assertThat(invalidateResponse.getInvalidatedTokens(), equalTo(1)); + assertThat(invalidateResponse.getPreviouslyInvalidatedTokens(), equalTo(0)); + assertThat(invalidateResponse.getErrors().size(), equalTo(0)); + + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> restClient.security().createToken( + CreateTokenRequest.refreshTokenGrant(createTokenResponse.getRefreshToken()), SECURITY_REQUEST_OPTIONS)); + assertThat(e.getCause().getMessage(), containsString("invalid_grant")); assertEquals(RestStatus.BAD_REQUEST, e.status()); - assertEquals("token has been invalidated", e.getHeader("error_description").get(0)); + assertThat(e.getCause().getMessage(), containsString("token has been invalidated")); } public void testRefreshingMultipleTimesFails() throws Exception { - Client client = client().filterWithHeader(Collections.singletonMap("Authorization", - UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, - SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); - CreateTokenResponse createTokenResponse = securityClient.prepareCreateToken() - .setGrantType("password") - .setUsername(SecuritySettingsSource.TEST_USER_NAME) - .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) - .get(); + final RestHighLevelClient restClient = new TestRestHighLevelClient(); + CreateTokenResponse createTokenResponse = restClient.security().createToken(CreateTokenRequest.passwordGrant( + SecuritySettingsSource.TEST_USER_NAME, SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()), SECURITY_REQUEST_OPTIONS); assertNotNull(createTokenResponse.getRefreshToken()); - CreateTokenResponse refreshResponse = securityClient.prepareRefreshToken(createTokenResponse.getRefreshToken()).get(); + + CreateTokenResponse refreshResponse = restClient.security() + .createToken(CreateTokenRequest.refreshTokenGrant(createTokenResponse.getRefreshToken()), SECURITY_REQUEST_OPTIONS); assertNotNull(refreshResponse); // We now have two documents, the original(now refreshed) token doc and the new one with the new access doc AtomicReference docId = new AtomicReference<>(); assertBusy(() -> { - SearchResponse searchResponse = client.prepareSearch(RestrictedIndicesNames.SECURITY_TOKENS_ALIAS) - .setSource(SearchSourceBuilder.searchSource() + SearchResponse searchResponse = restClient.search(new SearchRequest(RestrictedIndicesNames.SECURITY_TOKENS_ALIAS) + .source(SearchSourceBuilder.searchSource() .query(QueryBuilders.boolQuery() .must(QueryBuilders.termQuery("doc_type", TokenService.TOKEN_DOC_TYPE)) - .must(QueryBuilders.termQuery("refresh_token.refreshed", "true")))) - .setSize(1) - .setTerminateAfter(1) - .get(); + .must(QueryBuilders.termQuery("refresh_token.refreshed", "true"))) + .size(1) + .terminateAfter(1)), SECURITY_REQUEST_OPTIONS); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); docId.set(searchResponse.getHits().getAt(0).getId()); }); @@ -373,34 +317,27 @@ public void testRefreshingMultipleTimesFails() throws Exception { Instant refreshed = Instant.now(); Instant aWhileAgo = refreshed.minus(50L, ChronoUnit.SECONDS); assertTrue(Instant.now().isAfter(aWhileAgo)); - UpdateResponse updateResponse = client.prepareUpdate(RestrictedIndicesNames.SECURITY_TOKENS_ALIAS, SINGLE_MAPPING_NAME, docId.get()) - .setDoc("refresh_token", Collections.singletonMap("refresh_time", aWhileAgo.toEpochMilli())) + UpdateResponse updateResponse = restClient.update(new UpdateRequest(RestrictedIndicesNames.SECURITY_TOKENS_ALIAS, docId.get()) + .doc("refresh_token", Collections.singletonMap("refresh_time", aWhileAgo.toEpochMilli())) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .setFetchSource("refresh_token", Strings.EMPTY_STRING) - .get(); + .fetchSource("refresh_token", Strings.EMPTY_STRING), SECURITY_REQUEST_OPTIONS); assertNotNull(updateResponse); Map refreshTokenMap = (Map) updateResponse.getGetResult().sourceAsMap().get("refresh_token"); assertTrue( Instant.ofEpochMilli((long) refreshTokenMap.get("refresh_time")).isBefore(Instant.now().minus(30L, ChronoUnit.SECONDS))); - ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, - () -> securityClient.prepareRefreshToken(createTokenResponse.getRefreshToken()).get()); - assertEquals("invalid_grant", e.getMessage()); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> restClient.security() + .createToken(CreateTokenRequest.refreshTokenGrant(createTokenResponse.getRefreshToken()), SECURITY_REQUEST_OPTIONS)); + assertThat(e.getCause().getMessage(), containsString("invalid_grant")); assertEquals(RestStatus.BAD_REQUEST, e.status()); - assertEquals("token has already been refreshed more than 30 seconds in the past", e.getHeader("error_description").get(0)); + assertThat(e.getCause().getMessage(), containsString("token has already been refreshed more than 30 seconds in the past")); } public void testRefreshingMultipleTimesWithinWindowSucceeds() throws Exception { final Clock clock = Clock.systemUTC(); - Client client = client().filterWithHeader(Collections.singletonMap("Authorization", - UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, - SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); final List tokens = Collections.synchronizedList(new ArrayList<>()); - CreateTokenResponse createTokenResponse = securityClient.prepareCreateToken() - .setGrantType("password") - .setUsername(SecuritySettingsSource.TEST_USER_NAME) - .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) - .get(); + final RestHighLevelClient restClient = new TestRestHighLevelClient(); + CreateTokenResponse createTokenResponse = restClient.security().createToken(CreateTokenRequest.passwordGrant( + SecuritySettingsSource.TEST_USER_NAME, SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()), SECURITY_REQUEST_OPTIONS); assertNotNull(createTokenResponse.getRefreshToken()); final int numberOfProcessors = Runtime.getRuntime().availableProcessors(); final int numberOfThreads = scaledRandomIntBetween((numberOfProcessors + 1) / 2, numberOfProcessors * 3); @@ -411,13 +348,6 @@ public void testRefreshingMultipleTimesWithinWindowSucceeds() throws Exception { final Instant t1 = clock.instant(); for (int i = 0; i < numberOfThreads; i++) { threads.add(new Thread(() -> { - // Each thread gets its own client so that more than one nodes will be hit - Client threadClient = client().filterWithHeader(Collections.singletonMap("Authorization", - UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, - SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient threadSecurityClient = new SecurityClient(threadClient); - CreateTokenRequest refreshRequest = - threadSecurityClient.prepareRefreshToken(createTokenResponse.getRefreshToken()).request(); readyLatch.countDown(); try { readyLatch.await(); @@ -426,21 +356,25 @@ public void testRefreshingMultipleTimesWithinWindowSucceeds() throws Exception { completedLatch.countDown(); return; } - threadSecurityClient.refreshToken(refreshRequest, ActionListener.wrap(result -> { + + try { + // safe to use same rest client across threads since it round robins between nodes + CreateTokenResponse result = restClient.security() + .createToken(CreateTokenRequest.refreshTokenGrant(createTokenResponse.getRefreshToken()), SECURITY_REQUEST_OPTIONS); final Instant t2 = clock.instant(); - if (t1.plusSeconds(30L).isBefore(t2)){ + if (t1.plusSeconds(30L).isBefore(t2)) { logger.warn("Tokens [{}], [{}] were received more than 30 seconds after the request, not checking them", - result.getTokenString(), result.getRefreshToken()); + result.getAccessToken(), result.getRefreshToken()); } else { - tokens.add(result.getTokenString() + result.getRefreshToken()); + tokens.add(result.getAccessToken() + result.getRefreshToken()); } - logger.info("received access token [{}] and refresh token [{}]", result.getTokenString(), result.getRefreshToken()); + logger.info("received access token [{}] and refresh token [{}]", result.getAccessToken(), result.getRefreshToken()); completedLatch.countDown(); - }, e -> { + } catch (IOException e) { failed.set(true); completedLatch.countDown(); logger.error("caught exception", e); - })); + } })); } for (Thread thread : threads) { @@ -457,102 +391,74 @@ public void testRefreshingMultipleTimesWithinWindowSucceeds() throws Exception { assertThat(tokens.stream().distinct().collect(Collectors.toList()).size(), equalTo(1)); } - public void testRefreshAsDifferentUser() { - Client client = client().filterWithHeader(Collections.singletonMap("Authorization", - UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, - SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); - CreateTokenResponse createTokenResponse = securityClient.prepareCreateToken() - .setGrantType("password") - .setUsername(SecuritySettingsSource.TEST_USER_NAME) - .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) - .get(); + public void testRefreshAsDifferentUser() throws IOException { + final RestHighLevelClient restClient = new TestRestHighLevelClient(); + CreateTokenResponse createTokenResponse = restClient.security().createToken(CreateTokenRequest.passwordGrant( + SecuritySettingsSource.TEST_USER_NAME, SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()), SECURITY_REQUEST_OPTIONS); assertNotNull(createTokenResponse.getRefreshToken()); - ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, - () -> new SecurityClient(client() - .filterWithHeader(Collections.singletonMap("Authorization", - UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, - SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)))) - .prepareRefreshToken(createTokenResponse.getRefreshToken()).get()); - assertEquals("invalid_grant", e.getMessage()); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> restClient.security().createToken(CreateTokenRequest.refreshTokenGrant(createTokenResponse.getRefreshToken()), + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", + UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)).build())); + assertThat(e.getCause().getMessage(), containsString("invalid_grant")); assertEquals(RestStatus.BAD_REQUEST, e.status()); - assertEquals("tokens must be refreshed by the creating client", e.getHeader("error_description").get(0)); + assertThat(e.getCause().getMessage(), containsString("tokens must be refreshed by the creating client")); } - public void testCreateThenRefreshAsDifferentUser() { - Client client = client().filterWithHeader(Collections.singletonMap("Authorization", - UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, - SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); - CreateTokenResponse createTokenResponse = securityClient.prepareCreateToken() - .setGrantType("password") - .setUsername(SecuritySettingsSource.TEST_USER_NAME) - .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) - .get(); + public void testCreateThenRefreshAsDifferentUser() throws IOException { + final RequestOptions superuserOptions = RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", + UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)).build(); + final RestHighLevelClient restClient = new TestRestHighLevelClient(); + CreateTokenResponse createTokenResponse = restClient.security().createToken(CreateTokenRequest.passwordGrant( + SecuritySettingsSource.TEST_USER_NAME, SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()), superuserOptions); assertNotNull(createTokenResponse.getRefreshToken()); - CreateTokenResponse refreshResponse = securityClient.prepareRefreshToken(createTokenResponse.getRefreshToken()).get(); - assertNotEquals(refreshResponse.getTokenString(), createTokenResponse.getTokenString()); + CreateTokenResponse refreshResponse = restClient.security() + .createToken(CreateTokenRequest.refreshTokenGrant(createTokenResponse.getRefreshToken()), superuserOptions); + assertNotEquals(refreshResponse.getAccessToken(), createTokenResponse.getAccessToken()); assertNotEquals(refreshResponse.getRefreshToken(), createTokenResponse.getRefreshToken()); - PlainActionFuture authFuture = new PlainActionFuture<>(); - AuthenticateRequest request = new AuthenticateRequest(); - request.username(SecuritySettingsSource.TEST_SUPERUSER); - client.execute(AuthenticateAction.INSTANCE, request, authFuture); - AuthenticateResponse response = authFuture.actionGet(); - assertEquals(SecuritySettingsSource.TEST_SUPERUSER, response.authentication().getUser().principal()); - - authFuture = new PlainActionFuture<>(); - request = new AuthenticateRequest(); - request.username(SecuritySettingsSource.TEST_USER_NAME); - client.filterWithHeader(Collections.singletonMap("Authorization", "Bearer " + createTokenResponse.getTokenString())) - .execute(AuthenticateAction.INSTANCE, request, authFuture); - response = authFuture.actionGet(); - assertEquals(SecuritySettingsSource.TEST_USER_NAME, response.authentication().getUser().principal()); - - authFuture = new PlainActionFuture<>(); - request = new AuthenticateRequest(); - request.username(SecuritySettingsSource.TEST_USER_NAME); - client.filterWithHeader(Collections.singletonMap("Authorization", "Bearer " + refreshResponse.getTokenString())) - .execute(AuthenticateAction.INSTANCE, request, authFuture); - response = authFuture.actionGet(); - assertEquals(SecuritySettingsSource.TEST_USER_NAME, response.authentication().getUser().principal()); + AuthenticateResponse response = restClient.security().authenticate(superuserOptions);; + assertEquals(SecuritySettingsSource.TEST_SUPERUSER, response.getUser().getUsername()); + + RequestOptions tokenAuthOptions = + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "Bearer " + createTokenResponse.getAccessToken()).build(); + response = restClient.security().authenticate(tokenAuthOptions); + assertEquals(SecuritySettingsSource.TEST_USER_NAME, response.getUser().getUsername()); + + RequestOptions refreshedTokenAuthOptions = + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "Bearer " + refreshResponse.getAccessToken()).build(); + response = restClient.security().authenticate(refreshedTokenAuthOptions); + assertEquals(SecuritySettingsSource.TEST_USER_NAME, response.getUser().getUsername()); } public void testClientCredentialsGrant() throws Exception { - Client client = client().filterWithHeader(Collections.singletonMap("Authorization", + final RequestOptions superuserOptions = RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, - SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); - CreateTokenResponse createTokenResponse = securityClient.prepareCreateToken() - .setGrantType("client_credentials") - .get(); + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)).build(); + final RestHighLevelClient restClient = new TestRestHighLevelClient(); + CreateTokenResponse createTokenResponse = + restClient.security().createToken(CreateTokenRequest.clientCredentialsGrant(), superuserOptions); assertNull(createTokenResponse.getRefreshToken()); - AuthenticateRequest request = new AuthenticateRequest(); - request.username(SecuritySettingsSource.TEST_SUPERUSER); - PlainActionFuture authFuture = new PlainActionFuture<>(); - client.filterWithHeader(Collections.singletonMap("Authorization", "Bearer " + createTokenResponse.getTokenString())) - .execute(AuthenticateAction.INSTANCE, request, authFuture); - AuthenticateResponse response = authFuture.get(); - assertEquals(SecuritySettingsSource.TEST_SUPERUSER, response.authentication().getUser().principal()); + RequestOptions tokenAuthOptions = + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "Bearer " + createTokenResponse.getAccessToken()).build(); + AuthenticateResponse response = restClient.security().authenticate(tokenAuthOptions); + assertEquals(SecuritySettingsSource.TEST_SUPERUSER, response.getUser().getUsername()); // invalidate - PlainActionFuture invalidateResponseFuture = new PlainActionFuture<>(); - InvalidateTokenRequest invalidateTokenRequest = - new InvalidateTokenRequest(createTokenResponse.getTokenString(), InvalidateTokenRequest.Type.ACCESS_TOKEN.getValue()); - securityClient.invalidateToken(invalidateTokenRequest, invalidateResponseFuture); - assertThat(invalidateResponseFuture.get().getResult().getInvalidatedTokens().size(), equalTo(1)); - assertThat(invalidateResponseFuture.get().getResult().getPreviouslyInvalidatedTokens().size(), equalTo(0)); - assertThat(invalidateResponseFuture.get().getResult().getErrors().size(), equalTo(0)); - - ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> { - PlainActionFuture responseFuture = new PlainActionFuture<>(); - client.filterWithHeader(Collections.singletonMap("Authorization", "Bearer " + createTokenResponse.getTokenString())) - .execute(AuthenticateAction.INSTANCE, request, responseFuture); - responseFuture.actionGet(); - }); + InvalidateTokenResponse invalidateTokenResponse = restClient.security() + .invalidateToken(InvalidateTokenRequest.accessToken(createTokenResponse.getAccessToken()), superuserOptions); + assertThat(invalidateTokenResponse.getInvalidatedTokens(), equalTo(1)); + assertThat(invalidateTokenResponse.getPreviouslyInvalidatedTokens(), equalTo(0)); + assertThat(invalidateTokenResponse.getErrors().size(), equalTo(0)); + + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> restClient.security().authenticate(tokenAuthOptions)); + assertEquals(RestStatus.UNAUTHORIZED, e.status()); } @Before diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java index 7021873a02878..ac3367708e3ac 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -30,20 +30,29 @@ import org.elasticsearch.xpack.core.action.XPackUsageRequestBuilder; import org.elasticsearch.xpack.core.action.XPackUsageResponse; import org.elasticsearch.xpack.core.security.SecurityFeatureSetUsage; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheRequestBuilder; +import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheRequestBuilder; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequestBuilder; import org.elasticsearch.xpack.core.security.action.role.DeleteRoleResponse; +import org.elasticsearch.xpack.core.security.action.role.GetRolesRequestBuilder; import org.elasticsearch.xpack.core.security.action.role.GetRolesResponse; +import org.elasticsearch.xpack.core.security.action.role.PutRoleRequestBuilder; import org.elasticsearch.xpack.core.security.action.role.PutRoleResponse; import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordRequestBuilder; import org.elasticsearch.xpack.core.security.action.user.ChangePasswordResponse; +import org.elasticsearch.xpack.core.security.action.user.DeleteUserRequestBuilder; import org.elasticsearch.xpack.core.security.action.user.DeleteUserResponse; +import org.elasticsearch.xpack.core.security.action.user.GetUsersRequestBuilder; import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; +import org.elasticsearch.xpack.core.security.action.user.PutUserRequestBuilder; +import org.elasticsearch.xpack.core.security.action.user.SetEnabledRequestBuilder; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; @@ -66,8 +75,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; -import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_MAIN_ALIAS; import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.INTERNAL_SECURITY_MAIN_INDEX_7; +import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_MAIN_ALIAS; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; @@ -108,11 +117,11 @@ public void setupAnonymousRoleIfNecessary() throws Exception { if (anonymousEnabled) { if (roleExists) { logger.info("anonymous is enabled. creating [native_anonymous] role"); - PutRoleResponse response = securityClient() - .preparePutRole("native_anonymous") - .cluster("ALL") - .addIndices(new String[]{"*"}, new String[]{"ALL"}, null, null, null, randomBoolean()) - .get(); + PutRoleResponse response = new PutRoleRequestBuilder(client()) + .name("native_anonymous") + .cluster("ALL") + .addIndices(new String[]{"*"}, new String[]{"ALL"}, null, null, null, randomBoolean()) + .get(); assertTrue(response.isCreated()); } else { logger.info("anonymous is enabled, but configured with a missing role"); @@ -121,42 +130,39 @@ public void setupAnonymousRoleIfNecessary() throws Exception { } public void testDeletingNonexistingUserAndRole() throws Exception { - SecurityClient c = securityClient(); // first create the index so it exists - c.preparePutUser("joe", "s3kirt".toCharArray(), hasher, "role1", "user").get(); - DeleteUserResponse resp = c.prepareDeleteUser("missing").get(); + new PutUserRequestBuilder(client()).username("joe").password("s3kirt".toCharArray(), hasher).roles("role1", "user").get(); + DeleteUserResponse resp = new DeleteUserRequestBuilder(client()).username("missing").get(); assertFalse("user shouldn't be found", resp.found()); - DeleteRoleResponse resp2 = c.prepareDeleteRole("role").get(); + DeleteRoleResponse resp2 = new DeleteRoleRequestBuilder(client()).name("role").get(); assertFalse("role shouldn't be found", resp2.found()); } public void testGettingUserThatDoesntExist() throws Exception { - SecurityClient c = securityClient(); - GetUsersResponse resp = c.prepareGetUsers("joe").get(); + GetUsersResponse resp = new GetUsersRequestBuilder(client()).usernames("joe").get(); assertFalse("user should not exist", resp.hasUsers()); - GetRolesResponse resp2 = c.prepareGetRoles().names("role").get(); + GetRolesResponse resp2 = new GetRolesRequestBuilder(client()).names("role").get(); assertFalse("role should not exist", resp2.hasRoles()); } public void testAddAndGetUser() throws Exception { - SecurityClient c = securityClient(); - final List existingUsers = Arrays.asList(c.prepareGetUsers().get().users()); + final List existingUsers = Arrays.asList(new GetUsersRequestBuilder(client()).get().users()); final int existing = existingUsers.size(); logger.error("--> creating user"); - c.preparePutUser("joe", "s3kirt".toCharArray(), hasher, "role1", "user").get(); + preparePutUser("joe", "s3kirt".toCharArray(), hasher, "role1", "user").get(); logger.error("--> waiting for .security index"); ensureGreen(SECURITY_MAIN_ALIAS); logger.info("--> retrieving user"); - GetUsersResponse resp = c.prepareGetUsers("joe").get(); + GetUsersResponse resp = new GetUsersRequestBuilder(client()).usernames("joe").get(); assertTrue("user should exist", resp.hasUsers()); User joe = resp.users()[0]; assertEquals("joe", joe.principal()); assertArrayEquals(joe.roles(), new String[]{"role1", "user"}); logger.info("--> adding two more users"); - c.preparePutUser("joe2", "s3kirt2".toCharArray(), hasher, "role2", "user").get(); - c.preparePutUser("joe3", "s3kirt3".toCharArray(), hasher, "role3", "user").get(); - GetUsersResponse allUsersResp = c.prepareGetUsers().get(); + preparePutUser("joe2", "s3kirt2".toCharArray(), hasher, "role2", "user").get(); + preparePutUser("joe3", "s3kirt3".toCharArray(), hasher, "role3", "user").get(); + GetUsersResponse allUsersResp = new GetUsersRequestBuilder(client()).get(); assertTrue("users should exist", allUsersResp.hasUsers()); assertEquals("should be " + (3 + existing) + " users total", 3 + existing, allUsersResp.users().length); List names = new ArrayList<>(3); @@ -168,7 +174,7 @@ public void testAddAndGetUser() throws Exception { CollectionUtil.timSort(names); assertArrayEquals(new String[] { "joe", "joe2", "joe3" }, names.toArray(Strings.EMPTY_ARRAY)); - GetUsersResponse someUsersResp = c.prepareGetUsers("joe", "joe3").get(); + GetUsersResponse someUsersResp = new GetUsersRequestBuilder(client()).usernames("joe", "joe3").get(); assertTrue("users should exist", someUsersResp.hasUsers()); assertEquals("should be 2 users returned", 2, someUsersResp.users().length); names = new ArrayList<>(2); @@ -179,20 +185,19 @@ public void testAddAndGetUser() throws Exception { assertArrayEquals(new String[]{"joe", "joe3"}, names.toArray(Strings.EMPTY_ARRAY)); logger.info("--> deleting user"); - DeleteUserResponse delResp = c.prepareDeleteUser("joe").get(); + DeleteUserResponse delResp = new DeleteUserRequestBuilder(client()).username("joe").get(); assertTrue(delResp.found()); logger.info("--> retrieving user"); - resp = c.prepareGetUsers("joe").get(); + resp = new GetUsersRequestBuilder(client()).usernames("joe").get(); assertFalse("user should not exist after being deleted", resp.hasUsers()); } public void testAddAndGetRole() throws Exception { - SecurityClient c = securityClient(); - final List existingRoles = Arrays.asList(c.prepareGetRoles().get().roles()); + final List existingRoles = Arrays.asList(new GetRolesRequestBuilder(client()).get().roles()); final int existing = existingRoles.size(); final Map metadata = Collections.singletonMap("key", randomAlphaOfLengthBetween(1, 10)); logger.error("--> creating role"); - c.preparePutRole("test_role") + preparePutRole("test_role") .cluster("all", "none") .runAs("root", "nobody") .addIndices(new String[]{"index"}, new String[]{"read"}, new String[]{"body", "title"}, null, @@ -202,20 +207,20 @@ public void testAddAndGetRole() throws Exception { logger.error("--> waiting for .security index"); ensureGreen(SECURITY_MAIN_ALIAS); logger.info("--> retrieving role"); - GetRolesResponse resp = c.prepareGetRoles().names("test_role").get(); + GetRolesResponse resp = new GetRolesRequestBuilder(client()).names("test_role").get(); assertTrue("role should exist", resp.hasRoles()); RoleDescriptor testRole = resp.roles()[0]; assertNotNull(testRole); assertThat(testRole.getMetadata().size(), is(1)); assertThat(testRole.getMetadata().get("key"), is(metadata.get("key"))); - c.preparePutRole("test_role2") + preparePutRole("test_role2") .cluster("all", "none") .runAs("root", "nobody") .addIndices(new String[]{"index"}, new String[]{"read"}, new String[]{"body", "title"}, null, new BytesArray("{\"query\": {\"match_all\": {}}}"), randomBoolean()) .get(); - c.preparePutRole("test_role3") + preparePutRole("test_role3") .cluster("all", "none") .runAs("root", "nobody") .addIndices(new String[]{"index"}, new String[]{"read"}, new String[]{"body", "title"}, null, @@ -223,37 +228,36 @@ public void testAddAndGetRole() throws Exception { .get(); logger.info("--> retrieving all roles"); - GetRolesResponse allRolesResp = c.prepareGetRoles().get(); + GetRolesResponse allRolesResp = new GetRolesRequestBuilder(client()).get(); assertTrue("roles should exist", allRolesResp.hasRoles()); assertEquals("should be " + (3 + existing) + " roles total", 3 + existing, allRolesResp.roles().length); logger.info("--> retrieving test_role and test_role3"); - GetRolesResponse someRolesResp = c.prepareGetRoles().names("test_role", "test_role3").get(); + GetRolesResponse someRolesResp = new GetRolesRequestBuilder(client()).names("test_role", "test_role3").get(); assertTrue("roles should exist", someRolesResp.hasRoles()); assertEquals("should be 2 roles total", 2, someRolesResp.roles().length); logger.info("--> deleting role"); - DeleteRoleResponse delResp = c.prepareDeleteRole("test_role").get(); + DeleteRoleResponse delResp = new DeleteRoleRequestBuilder(client()).name("test_role").get(); assertTrue(delResp.found()); logger.info("--> retrieving role"); - GetRolesResponse resp2 = c.prepareGetRoles().names("test_role").get(); + GetRolesResponse resp2 = new GetRolesRequestBuilder(client()).names("test_role").get(); assertFalse("role should not exist after being deleted", resp2.hasRoles()); } public void testAddUserAndRoleThenAuth() throws Exception { - SecurityClient c = securityClient(); logger.error("--> creating role"); - c.preparePutRole("test_role") + preparePutRole("test_role") .cluster("all") .addIndices(new String[] { "*" }, new String[] { "read" }, new String[]{"body", "title"}, null, new BytesArray("{\"match_all\": {}}"), randomBoolean()) .get(); logger.error("--> creating user"); - c.preparePutUser("joe", "s3krit".toCharArray(), hasher, "test_role").get(); + preparePutUser("joe", "s3krit".toCharArray(), hasher, "test_role").get(); logger.error("--> waiting for .security index"); ensureGreen(SECURITY_MAIN_ALIAS); logger.info("--> retrieving user"); - GetUsersResponse resp = c.prepareGetUsers("joe").get(); + GetUsersResponse resp = new GetUsersRequestBuilder(client()).usernames("joe").get(); assertTrue("user should exist", resp.hasUsers()); createIndex("idx"); @@ -268,13 +272,12 @@ public void testAddUserAndRoleThenAuth() throws Exception { } public void testUpdatingUserAndAuthentication() throws Exception { - SecurityClient c = securityClient(); logger.error("--> creating user"); - c.preparePutUser("joe", "s3krit".toCharArray(), hasher, SecuritySettingsSource.TEST_ROLE).get(); + preparePutUser("joe", "s3krit".toCharArray(), hasher, SecuritySettingsSource.TEST_ROLE).get(); logger.error("--> waiting for .security index"); ensureGreen(SECURITY_MAIN_ALIAS); logger.info("--> retrieving user"); - GetUsersResponse resp = c.prepareGetUsers("joe").get(); + GetUsersResponse resp = new GetUsersRequestBuilder(client()).usernames("joe").get(); assertTrue("user should exist", resp.hasUsers()); assertThat(resp.users()[0].roles(), arrayContaining(SecuritySettingsSource.TEST_ROLE)); @@ -287,7 +290,7 @@ public void testUpdatingUserAndAuthentication() throws Exception { assertEquals(1L, searchResp.getHits().getTotalHits().value); - c.preparePutUser("joe", "s3krit2".toCharArray(), hasher, SecuritySettingsSource.TEST_ROLE).get(); + preparePutUser("joe", "s3krit2".toCharArray(), hasher, SecuritySettingsSource.TEST_ROLE).get(); try { client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); @@ -303,14 +306,13 @@ public void testUpdatingUserAndAuthentication() throws Exception { } public void testCreateDeleteAuthenticate() { - SecurityClient c = securityClient(); logger.error("--> creating user"); - c.preparePutUser("joe", "s3krit".toCharArray(), hasher, + preparePutUser("joe", "s3krit".toCharArray(), hasher, SecuritySettingsSource.TEST_ROLE).get(); logger.error("--> waiting for .security index"); ensureGreen(SECURITY_MAIN_ALIAS); logger.info("--> retrieving user"); - GetUsersResponse resp = c.prepareGetUsers("joe").get(); + GetUsersResponse resp = new GetUsersRequestBuilder(client()).usernames("joe").get(); assertTrue("user should exist", resp.hasUsers()); assertThat(resp.users()[0].roles(), arrayContaining(SecuritySettingsSource.TEST_ROLE)); @@ -323,7 +325,7 @@ public void testCreateDeleteAuthenticate() { assertEquals(1L, searchResp.getHits().getTotalHits().value); - DeleteUserResponse response = c.prepareDeleteUser("joe").get(); + DeleteUserResponse response = new DeleteUserRequestBuilder(client()).username("joe").get(); assertThat(response.found(), is(true)); try { client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); @@ -336,15 +338,14 @@ public void testCreateDeleteAuthenticate() { public void testCreateAndUpdateRole() { final boolean authenticate = randomBoolean(); - SecurityClient c = securityClient(); logger.error("--> creating role"); - c.preparePutRole("test_role") + preparePutRole("test_role") .cluster("all") .addIndices(new String[]{"*"}, new String[]{"read"}, new String[]{"body", "title"}, null, new BytesArray("{\"match_all\": {}}"), randomBoolean()) .get(); logger.error("--> creating user"); - c.preparePutUser("joe", "s3krit".toCharArray(), hasher, "test_role").get(); + preparePutUser("joe", "s3krit".toCharArray(), hasher, "test_role").get(); logger.error("--> waiting for .security index"); ensureGreen(SECURITY_MAIN_ALIAS); @@ -353,7 +354,7 @@ public void testCreateAndUpdateRole() { ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster() .prepareHealth().get(); assertFalse(response.isTimedOut()); - c.preparePutRole("test_role") + preparePutRole("test_role") .cluster("none") .addIndices(new String[]{"*"}, new String[]{"read"}, new String[]{"body", "title"}, null, new BytesArray("{\"match_all\": {}}"), randomBoolean()) @@ -368,17 +369,17 @@ public void testCreateAndUpdateRole() { } } else { final TransportRequest request = mock(TransportRequest.class); - GetRolesResponse getRolesResponse = c.prepareGetRoles().names("test_role").get(); + GetRolesResponse getRolesResponse = new GetRolesRequestBuilder(client()).names("test_role").get(); assertTrue("test_role does not exist!", getRolesResponse.hasRoles()); assertTrue("any cluster permission should be authorized", Role.builder(getRolesResponse.roles()[0], null).build().cluster().check("cluster:admin/foo", request)); - c.preparePutRole("test_role") + preparePutRole("test_role") .cluster("none") .addIndices(new String[]{"*"}, new String[]{"read"}, new String[]{"body", "title"}, null, new BytesArray("{\"match_all\": {}}"), randomBoolean()) .get(); - getRolesResponse = c.prepareGetRoles().names("test_role").get(); + getRolesResponse = new GetRolesRequestBuilder(client()).names("test_role").get(); assertTrue("test_role does not exist!", getRolesResponse.hasRoles()); assertFalse("no cluster permission should be authorized", @@ -388,12 +389,12 @@ public void testCreateAndUpdateRole() { public void testSnapshotDeleteRestore() { logger.error("--> creating role"); - securityClient().preparePutRole("test_role") - .cluster("all") - .addIndices(new String[]{"*"}, new String[]{"create_index"}, null, null, null, true) - .get(); + preparePutRole("test_role") + .cluster("all") + .addIndices(new String[]{"*"}, new String[]{"create_index"}, null, null, null, true) + .get(); logger.error("--> creating user"); - securityClient().preparePutUser("joe", "s3krit".toCharArray(), hasher, "test_role", "snapshot_user").get(); + preparePutUser("joe", "s3krit".toCharArray(), hasher, "test_role", "snapshot_user").get(); logger.error("--> waiting for .security index"); ensureGreen(SECURITY_MAIN_ALIAS); logger.info("--> creating repository"); @@ -415,15 +416,15 @@ public void testSnapshotDeleteRestore() { assertThat(snapshotInfo.indices(), contains(INTERNAL_SECURITY_MAIN_INDEX_7)); deleteSecurityIndex(); // the realm cache should clear itself but we don't wish to race it - securityClient().prepareClearRealmCache().get(); + new ClearRolesCacheRequestBuilder(client()).get(); // authn fails final ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> client() .filterWithHeader(Collections.singletonMap("Authorization", token)).admin().indices().prepareCreate("idx").get()); assertThat(e.status(), is(RestStatus.UNAUTHORIZED)); // users and roles are missing - GetUsersResponse getUsersResponse = securityClient().prepareGetUsers("joe").get(); + GetUsersResponse getUsersResponse = new GetUsersRequestBuilder(client()).usernames("joe").get(); assertThat(getUsersResponse.users().length, is(0)); - GetRolesResponse getRolesResponse = securityClient().prepareGetRoles("test_role").get(); + GetRolesResponse getRolesResponse = new GetRolesRequestBuilder(client()).names("test_role").get(); assertThat(getRolesResponse.roles().length, is(0)); // restore RestoreSnapshotResponse response = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-1") @@ -431,12 +432,12 @@ public void testSnapshotDeleteRestore() { assertThat(response.status(), equalTo(RestStatus.OK)); assertThat(response.getRestoreInfo().indices(), contains(RestrictedIndicesNames.INTERNAL_SECURITY_MAIN_INDEX_7)); // the realm cache should clear itself but we don't wish to race it - securityClient().prepareClearRealmCache().get(); + new ClearRealmCacheRequestBuilder(client()).get(); // users and roles are retrievable - getUsersResponse = securityClient().prepareGetUsers("joe").get(); + getUsersResponse = new GetUsersRequestBuilder(client()).usernames("joe").get(); assertThat(getUsersResponse.users().length, is(1)); assertThat(Arrays.asList(getUsersResponse.users()[0].roles()), contains("test_role", "snapshot_user")); - getRolesResponse = securityClient().prepareGetRoles("test_role").get(); + getRolesResponse = new GetRolesRequestBuilder(client()).names("test_role").get(); assertThat(getRolesResponse.roles().length, is(1)); assertThat(Arrays.asList(getRolesResponse.roles()[0].getClusterPrivileges()), contains("all")); assertThat(getRolesResponse.roles()[0].getIndicesPrivileges().length, is(1)); @@ -450,14 +451,13 @@ public void testSnapshotDeleteRestore() { } public void testAuthenticateWithDeletedRole() { - SecurityClient c = securityClient(); logger.error("--> creating role"); - c.preparePutRole("test_role") + preparePutRole("test_role") .cluster("all") .addIndices(new String[]{"*"}, new String[]{"read"}, new String[]{"body", "title"}, null, new BytesArray("{\"match_all\": {}}"), randomBoolean()) .get(); - c.preparePutUser("joe", "s3krit".toCharArray(), hasher, "test_role").get(); + preparePutUser("joe", "s3krit".toCharArray(), hasher, "test_role").get(); logger.error("--> waiting for .security index"); ensureGreen(SECURITY_MAIN_ALIAS); @@ -465,7 +465,7 @@ public void testAuthenticateWithDeletedRole() { ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster() .prepareHealth().get(); assertFalse(response.isTimedOut()); - c.prepareDeleteRole("test_role").get(); + new DeleteRoleRequestBuilder(client()).name("test_role").get(); if (anonymousEnabled && roleExists) { assertNoTimeout( client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get()); @@ -477,40 +477,39 @@ public void testAuthenticateWithDeletedRole() { } public void testPutUserWithoutPassword() { - SecurityClient client = securityClient(); // create some roles - client.preparePutRole("admin_role") + preparePutRole("admin_role") .cluster("all") .addIndices(new String[]{"*"}, new String[]{"all"}, null, null, null, randomBoolean()) .get(); - client.preparePutRole("read_role") + preparePutRole("read_role") .cluster("none") .addIndices(new String[]{"*"}, new String[]{"read"}, null, null, null, randomBoolean()) .get(); - assertThat(client.prepareGetUsers("joes").get().hasUsers(), is(false)); + assertThat(new GetUsersRequestBuilder(client()).usernames("joes").get().hasUsers(), is(false)); // check that putting a user without a password fails if the user doesn't exist try { - client.preparePutUser("joe", null, hasher, "admin_role").get(); + preparePutUser("joe", null, hasher, "admin_role").get(); fail("cannot create a user without a password"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("password must be specified")); } - assertThat(client.prepareGetUsers("joes").get().hasUsers(), is(false)); + assertThat(new GetUsersRequestBuilder(client()).usernames("joes").get().hasUsers(), is(false)); // create joe with a password and verify the user works - client.preparePutUser("joe", SecuritySettingsSourceField.TEST_PASSWORD.toCharArray(), + preparePutUser("joe", SecuritySettingsSourceField.TEST_PASSWORD.toCharArray(), hasher, "admin_role").get(); - assertThat(client.prepareGetUsers("joe").get().hasUsers(), is(true)); + assertThat(new GetUsersRequestBuilder(client()).usernames("joe").get().hasUsers(), is(true)); final String token = basicAuthHeaderValue("joe", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster() .prepareHealth().get(); assertFalse(response.isTimedOut()); // modify joe without sending the password - client.preparePutUser("joe", null, hasher, "read_role").fullName("Joe Smith").get(); - GetUsersResponse getUsersResponse = client.prepareGetUsers("joe").get(); + preparePutUser("joe", null, hasher, "read_role").fullName("Joe Smith").get(); + GetUsersResponse getUsersResponse = new GetUsersRequestBuilder(client()).usernames("joe").get(); assertThat(getUsersResponse.hasUsers(), is(true)); assertThat(getUsersResponse.users().length, is(1)); User joe = getUsersResponse.users()[0]; @@ -530,9 +529,9 @@ public void testPutUserWithoutPassword() { // update the user with password and admin role again String secondPassword = SecuritySettingsSourceField.TEST_PASSWORD + "2"; - client.preparePutUser("joe", secondPassword.toCharArray(), hasher, "admin_role"). + preparePutUser("joe", secondPassword.toCharArray(), hasher, "admin_role"). fullName("Joe Smith").get(); - getUsersResponse = client.prepareGetUsers("joe").get(); + getUsersResponse = new GetUsersRequestBuilder(client()).usernames("joe").get(); assertThat(getUsersResponse.hasUsers(), is(true)); assertThat(getUsersResponse.users().length, is(1)); joe = getUsersResponse.users()[0]; @@ -557,9 +556,8 @@ public void testPutUserWithoutPassword() { } public void testCannotCreateUserWithShortPassword() throws Exception { - SecurityClient client = securityClient(); try { - client.preparePutUser("joe", randomAlphaOfLengthBetween(0, 5).toCharArray(), hasher, + preparePutUser("joe", randomAlphaOfLengthBetween(0, 5).toCharArray(), hasher, "admin_role").get(); fail("cannot create a user without a password < 6 characters"); } catch (IllegalArgumentException v) { @@ -568,9 +566,8 @@ public void testCannotCreateUserWithShortPassword() throws Exception { } public void testCannotCreateUserWithInvalidCharactersInName() throws Exception { - SecurityClient client = securityClient(); IllegalArgumentException v = expectThrows(IllegalArgumentException.class, - () -> client.preparePutUser("fóóbár", "my-am@zing-password".toCharArray(), hasher, + () -> preparePutUser("fóóbár", "my-am@zing-password".toCharArray(), hasher, "admin_role").get() ); assertThat(v.getMessage(), containsString("names must be")); @@ -579,15 +576,14 @@ public void testCannotCreateUserWithInvalidCharactersInName() throws Exception { public void testUsersAndRolesDoNotInterfereWithIndicesStats() throws Exception { client().prepareIndex("foo", "bar").setSource("ignore", "me").get(); - SecurityClient client = securityClient(); if (randomBoolean()) { - client.preparePutUser("joe", "s3krit".toCharArray(), hasher, + preparePutUser("joe", "s3krit".toCharArray(), hasher, SecuritySettingsSource.TEST_ROLE).get(); } else { - client.preparePutRole("read_role") - .cluster("none") - .addIndices(new String[]{"*"}, new String[]{"read"}, null, null, null, randomBoolean()) - .get(); + preparePutRole("read_role") + .cluster("none") + .addIndices(new String[]{"*"}, new String[]{"read"}, null, null, null, randomBoolean()) + .get(); } IndicesStatsResponse response = client().admin().indices().prepareStats("foo", SECURITY_MAIN_ALIAS).get(); @@ -601,43 +597,43 @@ public void testUsersAndRolesDoNotInterfereWithIndicesStats() throws Exception { public void testOperationsOnReservedUsers() throws Exception { final String username = randomFrom(ElasticUser.NAME, KibanaUser.NAME); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, - () -> securityClient().preparePutUser(username, randomBoolean() ? SecuritySettingsSourceField.TEST_PASSWORD.toCharArray() + () -> preparePutUser(username, randomBoolean() ? SecuritySettingsSourceField.TEST_PASSWORD.toCharArray() : null, hasher, "admin").get()); assertThat(exception.getMessage(), containsString("user [" + username + "] is reserved")); exception = expectThrows(IllegalArgumentException.class, - () -> securityClient().prepareDeleteUser(username).get()); + () -> new DeleteUserRequestBuilder(client()).username(username).get()); assertThat(exception.getMessage(), containsString("user [" + username + "] is reserved")); exception = expectThrows(IllegalArgumentException.class, - () -> securityClient().prepareDeleteUser(AnonymousUser.DEFAULT_ANONYMOUS_USERNAME).get()); + () -> new DeleteUserRequestBuilder(client()).username(AnonymousUser.DEFAULT_ANONYMOUS_USERNAME).get()); assertThat(exception.getMessage(), containsString("user [" + AnonymousUser.DEFAULT_ANONYMOUS_USERNAME + "] is anonymous")); exception = expectThrows(IllegalArgumentException.class, - () -> securityClient().prepareChangePassword(AnonymousUser.DEFAULT_ANONYMOUS_USERNAME, "foobar".toCharArray(), - hasher).get()); + () -> new ChangePasswordRequestBuilder(client()).username(AnonymousUser.DEFAULT_ANONYMOUS_USERNAME) + .password("foobar".toCharArray(), hasher).get()); assertThat(exception.getMessage(), containsString("user [" + AnonymousUser.DEFAULT_ANONYMOUS_USERNAME + "] is anonymous")); exception = expectThrows(IllegalArgumentException.class, - () -> securityClient().preparePutUser(AnonymousUser.DEFAULT_ANONYMOUS_USERNAME, "foobar".toCharArray(), + () -> preparePutUser(AnonymousUser.DEFAULT_ANONYMOUS_USERNAME, "foobar".toCharArray(), hasher).get()); assertThat(exception.getMessage(), containsString("user [" + AnonymousUser.DEFAULT_ANONYMOUS_USERNAME + "] is anonymous")); exception = expectThrows(IllegalArgumentException.class, - () -> securityClient().preparePutUser(SystemUser.NAME, "foobar".toCharArray(), hasher).get()); + () -> preparePutUser(SystemUser.NAME, "foobar".toCharArray(), hasher).get()); assertThat(exception.getMessage(), containsString("user [" + SystemUser.NAME + "] is internal")); exception = expectThrows(IllegalArgumentException.class, - () -> securityClient().prepareChangePassword(SystemUser.NAME, "foobar".toCharArray(), - hasher).get()); + () -> new ChangePasswordRequestBuilder(client()).username(SystemUser.NAME) + .password("foobar".toCharArray(), hasher).get()); assertThat(exception.getMessage(), containsString("user [" + SystemUser.NAME + "] is internal")); exception = expectThrows(IllegalArgumentException.class, - () -> securityClient().prepareDeleteUser(SystemUser.NAME).get()); + () -> new DeleteUserRequestBuilder(client()).username(SystemUser.NAME).get()); assertThat(exception.getMessage(), containsString("user [" + SystemUser.NAME + "] is internal")); // get should work - GetUsersResponse response = securityClient().prepareGetUsers(username).get(); + GetUsersResponse response = new GetUsersRequestBuilder(client()).usernames(username).get(); assertThat(response.hasUsers(), is(true)); assertThat(response.users()[0].principal(), is(username)); @@ -656,30 +652,30 @@ public void testOperationsOnReservedUsers() throws Exception { public void testOperationsOnReservedRoles() throws Exception { final String name = randomFrom(ReservedRolesStore.names()); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, - () -> securityClient().preparePutRole(name).cluster("monitor").get()); + () -> preparePutRole(name).cluster("monitor").get()); assertThat(exception.getMessage(), containsString("role [" + name + "] is reserved")); exception = expectThrows(IllegalArgumentException.class, - () -> securityClient().prepareDeleteRole(name).get()); + () -> new DeleteRoleRequestBuilder(client()).name(name).get()); assertThat(exception.getMessage(), containsString("role [" + name + "] is reserved")); // get role is allowed - GetRolesResponse response = securityClient().prepareGetRoles(name).get(); + GetRolesResponse response = new GetRolesRequestBuilder(client()).names(name).get(); assertThat(response.hasRoles(), is(true)); assertThat(response.roles()[0].getName(), is(name)); } public void testCreateAndChangePassword() throws Exception { - securityClient().preparePutUser("joe", "s3krit".toCharArray(), hasher, + preparePutUser("joe", "s3krit".toCharArray(), hasher, SecuritySettingsSource.TEST_ROLE).get(); final String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)) .admin().cluster().prepareHealth().get(); assertThat(response.isTimedOut(), is(false)); - ChangePasswordResponse passwordResponse = securityClient( - client().filterWithHeader(Collections.singletonMap("Authorization", token))) - .prepareChangePassword("joe", SecuritySettingsSourceField.TEST_PASSWORD.toCharArray(), hasher).get(); + ChangePasswordResponse passwordResponse = + new ChangePasswordRequestBuilder(client().filterWithHeader(Collections.singletonMap("Authorization", token))) + .username("joe").password(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray(), hasher).get(); assertThat(passwordResponse, notNullValue()); @@ -709,8 +705,7 @@ public void testRolesUsageStats() throws Exception { final boolean fls = randomBoolean(); final boolean dls = randomBoolean(); - SecurityClient client = new SecurityClient(client()); - PutRoleResponse putRoleResponse = client.preparePutRole("admin_role") + PutRoleResponse putRoleResponse = new PutRoleRequestBuilder(client()).name("admin_role") .cluster("all") .addIndices(new String[]{"*"}, new String[]{"all"}, null, null, null, randomBoolean()) .get(); @@ -728,7 +723,7 @@ public void testRolesUsageStats() throws Exception { grantedFields = null; deniedFields = fields; } - roleResponse = client.preparePutRole("admin_role_fls") + roleResponse = new PutRoleRequestBuilder(client()).name("admin_role_fls") .cluster("all") .addIndices(new String[]{"*"}, new String[]{"all"}, grantedFields, deniedFields, null, randomBoolean()) .get(); @@ -737,7 +732,7 @@ public void testRolesUsageStats() throws Exception { } if (dls) { - PutRoleResponse roleResponse = client.preparePutRole("admin_role_dls") + PutRoleResponse roleResponse = new PutRoleRequestBuilder(client()).name("admin_role_dls") .cluster("all") .addIndices(new String[]{"*"}, new String[]{"all"}, null, null, new BytesArray("{\"match_all\": {}}"), randomBoolean()) .get(); @@ -745,7 +740,7 @@ public void testRolesUsageStats() throws Exception { roles++; } - client.prepareClearRolesCache().get(); + new ClearRolesCacheRequestBuilder(client()).get(); future = new PlainActionFuture<>(); rolesStore.usageStats(future); @@ -757,10 +752,11 @@ public void testRolesUsageStats() throws Exception { public void testRealmUsageStats() { final int numNativeUsers = scaledRandomIntBetween(1, 32); - SecurityClient securityClient = new SecurityClient(client()); for (int i = 0; i < numNativeUsers; i++) { - securityClient.preparePutUser("joe" + i, "s3krit".toCharArray(), hasher, - "superuser").get(); + new PutUserRequestBuilder(client()).username("joe" + i) + .password("s3krit".toCharArray(), hasher) + .roles("superuser") + .get(); } XPackUsageResponse response = new XPackUsageRequestBuilder(client()).get(); @@ -779,33 +775,31 @@ public void testRealmUsageStats() { } public void testSetEnabled() throws Exception { - - securityClient().preparePutUser("joe", "s3krit".toCharArray(), hasher, + preparePutUser("joe", "s3krit".toCharArray(), hasher, SecuritySettingsSource.TEST_ROLE).get(); final String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)) .admin().cluster().prepareHealth().get(); assertThat(response.isTimedOut(), is(false)); - securityClient(client()).prepareSetEnabled("joe", false).get(); + new SetEnabledRequestBuilder(client()).username("joe").enabled(false).get(); ElasticsearchSecurityException expected = expectThrows(ElasticsearchSecurityException.class, () -> client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get()); assertThat(expected.status(), is(RestStatus.UNAUTHORIZED)); - securityClient(client()).prepareSetEnabled("joe", true).get(); + new SetEnabledRequestBuilder(client()).username("joe").enabled(true).get(); response = client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get(); assertThat(response.isTimedOut(), is(false)); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> securityClient(client()).prepareSetEnabled("not_a_real_user", false).get()); + () -> new SetEnabledRequestBuilder(client()).username("not_a_real_user").enabled(false).get()); assertThat(e.getMessage(), containsString("only existing users can be disabled")); } public void testNegativeLookupsThenCreateRole() throws Exception { - SecurityClient securityClient = new SecurityClient(client()); - securityClient.preparePutUser("joe", "s3krit".toCharArray(), hasher, "unknown_role").get(); + preparePutUser("joe", "s3krit".toCharArray(), hasher, "unknown_role").get(); final int negativeLookups = scaledRandomIntBetween(1, 10); for (int i = 0; i < negativeLookups; i++) { @@ -824,7 +818,7 @@ public void testNegativeLookupsThenCreateRole() throws Exception { } } - securityClient.preparePutRole("unknown_role").cluster("all").get(); + preparePutRole("unknown_role").cluster("all").get(); ClusterHealthResponse response = client() .filterWithHeader(Collections.singletonMap("Authorization", basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())))) @@ -841,9 +835,8 @@ public void testNegativeLookupsThenCreateRole() throws Exception { * the loader returned a null value, while the other caller(s) would get a null value unexpectedly */ public void testConcurrentRunAs() throws Exception { - securityClient().preparePutUser("joe", "s3krit".toCharArray(), hasher, SecuritySettingsSource - .TEST_ROLE).get(); - securityClient().preparePutUser("executor", "s3krit".toCharArray(), hasher, "superuser").get(); + preparePutUser("joe", "s3krit".toCharArray(), hasher, SecuritySettingsSource.TEST_ROLE).get(); + preparePutUser("executor", "s3krit".toCharArray(), hasher, "superuser").get(); final String token = basicAuthHeaderValue("executor", new SecureString("s3krit".toCharArray())); final Client client = client().filterWithHeader(Map.of( "Authorization", token, @@ -862,6 +855,7 @@ public void testConcurrentRunAs() throws Exception { assertNoTimeout(response); } } catch (InterruptedException e) { + Thread.currentThread().interrupt(); } })); } @@ -874,4 +868,12 @@ public void testConcurrentRunAs() throws Exception { thread.join(); } } + + private PutUserRequestBuilder preparePutUser(String username, char[] password, Hasher hasher, String... roles) { + return new PutUserRequestBuilder(client()).username(username).password(password, hasher).roles(roles); + } + + private PutRoleRequestBuilder preparePutRole(String name) { + return new PutRoleRequestBuilder(client()).name(name); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java index 59612d6227a71..137ba2279d96e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java @@ -7,12 +7,15 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.security.ChangePasswordRequest; +import org.elasticsearch.client.security.DisableUserRequest; +import org.elasticsearch.client.security.EnableUserRequest; +import org.elasticsearch.client.security.RefreshPolicy; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.NativeRealmIntegTestCase; -import org.elasticsearch.xpack.core.security.action.user.ChangePasswordResponse; import org.elasticsearch.xpack.core.security.authc.support.Hasher; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.core.security.user.APMSystemUser; import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; @@ -21,14 +24,15 @@ import org.elasticsearch.xpack.core.security.user.RemoteMonitoringUser; import org.junit.BeforeClass; +import java.io.IOException; import java.util.Arrays; import java.util.List; import static java.util.Collections.singletonMap; +import static org.elasticsearch.test.SecuritySettingsSource.SECURITY_REQUEST_OPTIONS; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; /** * Integration tests for the built in realm @@ -51,6 +55,11 @@ public Settings nodeSettings(int nodeOrdinal) { return settings; } + @Override + protected boolean addMockHttpTransport() { + return false; + } + public void testAuthenticate() { final List usernames = Arrays.asList(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME, APMSystemUser.NAME, RemoteMonitoringUser.NAME); @@ -70,12 +79,12 @@ public void testAuthenticate() { * Enabling a user forces a doc to be written to the security index, and "user doc with empty password" has a special case code in * the reserved realm. */ - public void testAuthenticateAfterEnablingUser() { - final SecurityClient c = securityClient(); + public void testAuthenticateAfterEnablingUser() throws IOException { + final RestHighLevelClient restClient = new TestRestHighLevelClient(); final List usernames = Arrays.asList(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME, APMSystemUser.NAME, RemoteMonitoringUser.NAME); for (String username : usernames) { - c.prepareSetEnabled(username, true).get(); + restClient.security().enableUser(new EnableUserRequest(username, RefreshPolicy.getDefault()), SECURITY_REQUEST_OPTIONS); ClusterHealthResponse response = client() .filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(username, getReservedPassword()))) .admin() @@ -87,7 +96,7 @@ public void testAuthenticateAfterEnablingUser() { } } - public void testChangingPassword() { + public void testChangingPassword() throws IOException { String username = randomFrom(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME, APMSystemUser.NAME, RemoteMonitoringUser.NAME); final char[] newPassword = "supersecretvalue".toCharArray(); @@ -102,10 +111,11 @@ public void testChangingPassword() { assertThat(response.getClusterName(), is(cluster().getClusterName())); } - ChangePasswordResponse response = securityClient() - .prepareChangePassword(username, Arrays.copyOf(newPassword, newPassword.length), hasher) - .get(); - assertThat(response, notNullValue()); + final RestHighLevelClient restClient = new TestRestHighLevelClient(); + final boolean changed = restClient.security() + .changePassword(new ChangePasswordRequest(username, Arrays.copyOf(newPassword, newPassword.length), RefreshPolicy.IMMEDIATE), + SECURITY_REQUEST_OPTIONS); + assertTrue(changed); ElasticsearchSecurityException elasticsearchSecurityException = expectThrows(ElasticsearchSecurityException.class, () -> client() .filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(username, getReservedPassword()))) @@ -125,6 +135,7 @@ public void testChangingPassword() { } public void testDisablingUser() throws Exception { + final RestHighLevelClient restClient = new TestRestHighLevelClient(); // validate the user works ClusterHealthResponse response = client() .filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(ElasticUser.NAME, getReservedPassword()))) @@ -135,7 +146,10 @@ public void testDisablingUser() throws Exception { assertThat(response.getClusterName(), is(cluster().getClusterName())); // disable user - securityClient().prepareSetEnabled(ElasticUser.NAME, false).get(); + final boolean disabled = + restClient.security().disableUser(new DisableUserRequest(ElasticUser.NAME, RefreshPolicy.getDefault()), + SECURITY_REQUEST_OPTIONS); + assertTrue(disabled); ElasticsearchSecurityException elasticsearchSecurityException = expectThrows(ElasticsearchSecurityException.class, () -> client() .filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(ElasticUser.NAME, getReservedPassword()))) .admin() @@ -145,7 +159,9 @@ public void testDisablingUser() throws Exception { assertThat(elasticsearchSecurityException.getMessage(), containsString("authenticate")); //enable - securityClient().prepareSetEnabled(ElasticUser.NAME, true).get(); + final boolean enabled = + restClient.security().enableUser(new EnableUserRequest(ElasticUser.NAME, RefreshPolicy.getDefault()), SECURITY_REQUEST_OPTIONS); + assertTrue(enabled); response = client() .filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(ElasticUser.NAME, getReservedPassword()))) .admin() diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java index 3290aba27e37f..8e4dd66527c27 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java @@ -14,6 +14,8 @@ import org.elasticsearch.search.SearchContextMissingException; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.xpack.core.security.action.role.PutRoleRequestBuilder; +import org.elasticsearch.xpack.core.security.action.user.PutUserRequestBuilder; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.junit.After; @@ -28,11 +30,12 @@ public class SecurityScrollTests extends SecurityIntegTestCase { public void testScrollIsPerUser() throws Exception { assertSecurityIndexActive(); - securityClient().preparePutRole("scrollable") + new PutRoleRequestBuilder(client()).name("scrollable") .addIndices(new String[] { randomAlphaOfLengthBetween(4, 12) }, new String[] { "read" }, null, null, null, randomBoolean()) .get(); - securityClient().preparePutUser("other", SecuritySettingsSourceField.TEST_PASSWORD.toCharArray(), getFastStoredHashAlgoForTests(), - "scrollable") + new PutUserRequestBuilder(client()).username("other") + .password(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray(), getFastStoredHashAlgoForTests()) + .roles("scrollable") .get(); final int numDocs = randomIntBetween(4, 16); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java index 66ea23b518c29..0f533477b94fd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java @@ -6,31 +6,41 @@ package org.elasticsearch.xpack.security.authz; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.security.DeleteRoleRequest; +import org.elasticsearch.client.security.PutRoleRequest; +import org.elasticsearch.client.security.PutUserRequest; +import org.elasticsearch.client.security.PutUserResponse; +import org.elasticsearch.client.security.RefreshPolicy; +import org.elasticsearch.client.security.user.User; +import org.elasticsearch.client.security.user.privileges.Role; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.NativeRealmIntegTestCase; -import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.junit.Before; +import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Locale; -import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.INTERNAL_SECURITY_MAIN_INDEX_7; -import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_MAIN_ALIAS; - +import static org.elasticsearch.test.SecuritySettingsSource.SECURITY_REQUEST_OPTIONS; +import static org.elasticsearch.test.SecurityTestsUtils.assertThrowsAuthorizationException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.containsString; +import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.INTERNAL_SECURITY_MAIN_INDEX_7; +import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_MAIN_ALIAS; import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.elasticsearch.test.SecurityTestsUtils.assertThrowsAuthorizationException; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; public class SnapshotUserRoleIntegTests extends NativeRealmIntegTestCase { @@ -38,7 +48,7 @@ public class SnapshotUserRoleIntegTests extends NativeRealmIntegTestCase { private String ordinaryIndex; @Before - public void setupClusterBeforeSnapshot() { + public void setupClusterBeforeSnapshot() throws IOException { logger.info("--> creating repository"); assertAcked(client().admin().cluster().preparePutRepository("repo") .setType("fs") @@ -55,7 +65,10 @@ public void setupClusterBeforeSnapshot() { final char[] password = new char[] {'p', 'a', 's', 's', 'w', 'o', 'r', 'd'}; final String snapshotUserToken = basicAuthHeaderValue(user, new SecureString(password)); client = client().filterWithHeader(Collections.singletonMap("Authorization", snapshotUserToken)); - securityClient().preparePutUser(user, password, Hasher.BCRYPT, "snapshot_user").get(); + PutUserResponse response = new TestRestHighLevelClient().security().putUser( + PutUserRequest.withPassword(new User(user, List.of("snapshot_user")), password, true, RefreshPolicy.IMMEDIATE), + SECURITY_REQUEST_OPTIONS); + assertTrue(response.isCreated()); ensureGreen(INTERNAL_SECURITY_MAIN_INDEX_7); } @@ -81,11 +94,14 @@ public void testSnapshotUserRoleCanSnapshotAndSeeAllIndices() { } public void testSnapshotUserRoleIsReserved() { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> securityClient().preparePutRole("snapshot_user").get()); + final RestHighLevelClient restClient = new TestRestHighLevelClient(); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> restClient.security().putRole( + new PutRoleRequest(Role.builder().name("snapshot_user").build(), RefreshPolicy.IMMEDIATE), SECURITY_REQUEST_OPTIONS)); assertThat(e.getMessage(), containsString("role [snapshot_user] is reserved and cannot be modified")); - e = expectThrows(IllegalArgumentException.class, - () -> securityClient().prepareDeleteRole("snapshot_user").get()); + e = expectThrows(ElasticsearchStatusException.class, + () -> restClient.security().deleteRole( + new DeleteRoleRequest("snapshot_user", RefreshPolicy.IMMEDIATE), SECURITY_REQUEST_OPTIONS)); assertThat(e.getMessage(), containsString("role [snapshot_user] is reserved and cannot be deleted")); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java index 55cd659509bfe..f5bd4336fa425 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java @@ -6,10 +6,9 @@ package org.elasticsearch.xpack.security.support; import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.xpack.core.security.action.user.PutUserRequest; +import org.elasticsearch.xpack.core.security.action.user.PutUserRequestBuilder; import org.elasticsearch.xpack.core.security.action.user.PutUserResponse; import org.hamcrest.Matchers; import org.junit.After; @@ -44,21 +43,18 @@ public void onFailure(Exception e) { @Override protected void doRun() throws Exception { - final List requests = new ArrayList<>(numRequests); + final List requests = new ArrayList<>(numRequests); for (int i = 0; i < numRequests; i++) { - requests.add(securityClient() - .preparePutUser("user" + userNumber.getAndIncrement(), "password".toCharArray(), - getFastStoredHashAlgoForTests(), - randomAlphaOfLengthBetween(1, 16)) - .request()); + requests.add(new PutUserRequestBuilder(client()) + .username("user" + userNumber.getAndIncrement()) + .password("password".toCharArray(), getFastStoredHashAlgoForTests()) + .roles(randomAlphaOfLengthBetween(1, 16))); } barrier.await(10L, TimeUnit.SECONDS); - for (PutUserRequest request : requests) { - PlainActionFuture responsePlainActionFuture = new PlainActionFuture<>(); - securityClient().putUser(request, responsePlainActionFuture); - futures.add(responsePlainActionFuture); + for (PutUserRequestBuilder request : requests) { + futures.add(request.execute()); } } }, "create_users_thread" + i); diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java index 86d411053afe9..94c23b77d3ab0 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java @@ -26,7 +26,6 @@ import org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.After; import org.junit.AfterClass; @@ -177,12 +176,11 @@ public void setupRoleMappings() throws Exception { if (content.isEmpty()) { return; } - SecurityClient securityClient = securityClient(); Map> futures = new LinkedHashMap<>(content.size()); for (int i = 0; i < content.size(); i++) { final String name = "external_" + i; - final PutRoleMappingRequestBuilder builder = securityClient.preparePutRoleMapping( - name, new BytesArray(content.get(i)), XContentType.JSON); + final PutRoleMappingRequestBuilder builder = new PutRoleMappingRequestBuilder(client()) + .source(name, new BytesArray(content.get(i)), XContentType.JSON); futures.put(name, builder.execute()); } for (String mappingName : futures.keySet()) { From c9fb1ee0dbfc4c7265fba0247221125c27a1a98e Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 30 May 2019 16:06:11 -0400 Subject: [PATCH 201/224] Remove Log4j 1.2 API as a dependency (#42702) We had this as a dependency for legacy dependencies that still needed the Log4j 1.2 API. This appears to no longer be necessary, so this commit removes this artifact as a dependency. To remove this dependency, we had to fix a few places where we were accidentally relying on Log4j 1.2 instead of Log4j 2 (easy to do, since both APIs were on the compile-time classpath). Finally, we can remove our custom Netty logger factory. This was needed when we were on Log4j 1.2 and handled logging in our own unique way. When we migrated to Log4j 2 we could have dropped this dependency. However, even then Netty would still pick up Log4j 1.2 since it was on the classpath, thus the advantage to removing this as a dependency now. --- modules/transport-netty4/build.gradle | 4 + .../netty4/Netty4HttpServerTransport.java | 4 - .../netty4/Netty4InternalESLogger.java | 187 ---------------- .../transport/netty4/Netty4Transport.java | 4 - .../transport/netty4/Netty4Utils.java | 17 -- plugins/discovery-azure-classic/build.gradle | 1 + .../licenses/log4j-1.2-api-2.11.1.jar.sha1 | 0 .../licenses/log4j-LICENSE.txt | 0 .../licenses/log4j-NOTICE.txt | 0 plugins/discovery-ec2/build.gradle | 1 + .../licenses/log4j-1.2-api-2.11.1.jar.sha1 | 1 + .../discovery-ec2/licenses/log4j-LICENSE.txt | 202 ++++++++++++++++++ .../discovery-ec2/licenses/log4j-NOTICE.txt | 5 + plugins/discovery-gce/build.gradle | 1 + .../licenses/log4j-1.2-api-2.11.1.jar.sha1 | 1 + .../discovery-gce/licenses/log4j-LICENSE.txt | 202 ++++++++++++++++++ .../discovery-gce/licenses/log4j-NOTICE.txt | 5 + plugins/repository-gcs/build.gradle | 1 + .../licenses/log4j-1.2-api-2.11.1.jar.sha1 | 1 + .../repository-gcs/licenses/log4j-LICENSE.txt | 202 ++++++++++++++++++ .../repository-gcs/licenses/log4j-NOTICE.txt | 5 + plugins/repository-hdfs/build.gradle | 1 + .../licenses/log4j-1.2-api-2.11.1.jar.sha1 | 1 + .../licenses/log4j-LICENSE.txt | 202 ++++++++++++++++++ .../repository-hdfs/licenses/log4j-NOTICE.txt | 5 + plugins/repository-s3/build.gradle | 1 + .../licenses/log4j-1.2-api-2.11.1.jar.sha1 | 1 + .../repository-s3/licenses/log4j-LICENSE.txt | 202 ++++++++++++++++++ .../repository-s3/licenses/log4j-NOTICE.txt | 5 + plugins/transport-nio/build.gradle | 4 + .../common/logging/JsonLoggerTests.java | 2 +- .../org/elasticsearch/wildfly/WildflyIT.java | 5 +- server/build.gradle | 2 - .../plugins/PluginsServiceTests.java | 2 +- test/framework/build.gradle | 6 +- x-pack/plugin/core/build.gradle | 1 + .../licenses/log4j-1.2-api-2.11.1.jar.sha1 | 1 + x-pack/plugin/core/licenses/log4j-LICENSE.txt | 202 ++++++++++++++++++ x-pack/plugin/core/licenses/log4j-NOTICE.txt | 5 + .../core/indexing/AsyncTwoPhaseIndexer.java | 5 +- ...ransportStartDataFrameTransformAction.java | 4 +- .../xpack/rollup/job/IndexerUtils.java | 5 +- .../xpack/rollup/job/RollupJobTask.java | 5 +- 43 files changed, 1284 insertions(+), 227 deletions(-) delete mode 100644 modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java rename {server => plugins/discovery-azure-classic}/licenses/log4j-1.2-api-2.11.1.jar.sha1 (100%) rename {server => plugins/discovery-azure-classic}/licenses/log4j-LICENSE.txt (100%) rename {server => plugins/discovery-azure-classic}/licenses/log4j-NOTICE.txt (100%) create mode 100644 plugins/discovery-ec2/licenses/log4j-1.2-api-2.11.1.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/log4j-LICENSE.txt create mode 100644 plugins/discovery-ec2/licenses/log4j-NOTICE.txt create mode 100644 plugins/discovery-gce/licenses/log4j-1.2-api-2.11.1.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/log4j-LICENSE.txt create mode 100644 plugins/discovery-gce/licenses/log4j-NOTICE.txt create mode 100644 plugins/repository-gcs/licenses/log4j-1.2-api-2.11.1.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/log4j-LICENSE.txt create mode 100644 plugins/repository-gcs/licenses/log4j-NOTICE.txt create mode 100644 plugins/repository-hdfs/licenses/log4j-1.2-api-2.11.1.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/log4j-LICENSE.txt create mode 100644 plugins/repository-hdfs/licenses/log4j-NOTICE.txt create mode 100644 plugins/repository-s3/licenses/log4j-1.2-api-2.11.1.jar.sha1 create mode 100644 plugins/repository-s3/licenses/log4j-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/log4j-NOTICE.txt create mode 100644 x-pack/plugin/core/licenses/log4j-1.2-api-2.11.1.jar.sha1 create mode 100644 x-pack/plugin/core/licenses/log4j-LICENSE.txt create mode 100644 x-pack/plugin/core/licenses/log4j-NOTICE.txt diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index d64e0aff7749d..b4ec74355d2af 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -76,6 +76,10 @@ thirdPartyAudit { 'org.apache.commons.logging.Log', 'org.apache.commons.logging.LogFactory', + // from Log4j (deliberate, Netty will fallback to Log4j 2) + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + // from io.netty.handler.ssl.OpenSslEngine (netty) 'io.netty.internal.tcnative.Buffer', 'io.netty.internal.tcnative.Library', diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 8b31e0bcb28a2..5602aaba912d1 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -96,10 +96,6 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport { private static final Logger logger = LogManager.getLogger(Netty4HttpServerTransport.class); - static { - Netty4Utils.setup(); - } - /* * Size in bytes of an individual message received by io.netty.handler.codec.MessageAggregator which accumulates the content for an * HTTP request. This number is used for estimating the maximum number of allowed buffers before the MessageAggregator's internal diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java deleted file mode 100644 index 4eca1803b6381..0000000000000 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport.netty4; - -import io.netty.util.internal.logging.AbstractInternalLogger; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.SuppressLoggerChecks; - -@SuppressLoggerChecks(reason = "safely delegates to logger") -class Netty4InternalESLogger extends AbstractInternalLogger { - - private final Logger logger; - - Netty4InternalESLogger(final String name) { - super(name); - this.logger = LogManager.getLogger(name); - } - - @Override - public boolean isTraceEnabled() { - return logger.isTraceEnabled(); - } - - @Override - public void trace(String msg) { - logger.trace(msg); - } - - @Override - public void trace(String format, Object arg) { - logger.trace(format, arg); - } - - @Override - public void trace(String format, Object argA, Object argB) { - logger.trace(format, argA, argB); - } - - @Override - public void trace(String format, Object... arguments) { - logger.trace(format, arguments); - } - - @Override - public void trace(String msg, Throwable t) { - logger.trace(msg, t); - } - - @Override - public boolean isDebugEnabled() { - return logger.isDebugEnabled(); - } - - @Override - public void debug(String msg) { - logger.debug(msg); - } - - @Override - public void debug(String format, Object arg) { - logger.debug(format, arg); - } - - @Override - public void debug(String format, Object argA, Object argB) { - logger.debug(format, argA, argB); - } - - @Override - public void debug(String format, Object... arguments) { - logger.debug(format, arguments); - } - - @Override - public void debug(String msg, Throwable t) { - logger.debug(msg, t); - } - - @Override - public boolean isInfoEnabled() { - return logger.isInfoEnabled(); - } - - @Override - public void info(String msg) { - logger.info(msg); - } - - @Override - public void info(String format, Object arg) { - logger.info(format, arg); - } - - @Override - public void info(String format, Object argA, Object argB) { - logger.info(format, argA, argB); - } - - @Override - public void info(String format, Object... arguments) { - logger.info(format, arguments); - } - - @Override - public void info(String msg, Throwable t) { - logger.info(msg, t); - } - - @Override - public boolean isWarnEnabled() { - return logger.isWarnEnabled(); - } - - @Override - public void warn(String msg) { - logger.warn(msg); - } - - @Override - public void warn(String format, Object arg) { - logger.warn(format, arg); - } - - @Override - public void warn(String format, Object... arguments) { - logger.warn(format, arguments); - } - - @Override - public void warn(String format, Object argA, Object argB) { - logger.warn(format, argA, argB); - } - - @Override - public void warn(String msg, Throwable t) { - logger.warn(msg, t); - } - - @Override - public boolean isErrorEnabled() { - return logger.isErrorEnabled(); - } - - @Override - public void error(String msg) { - logger.error(msg); - } - - @Override - public void error(String format, Object arg) { - logger.error(format, arg); - } - - @Override - public void error(String format, Object argA, Object argB) { - logger.error(format, argA, argB); - } - - @Override - public void error(String format, Object... arguments) { - logger.error(format, arguments); - } - - @Override - public void error(String msg, Throwable t) { - logger.error(msg, t); - } - -} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index f2871ff34e8b7..5f29c51a1ce60 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -78,10 +78,6 @@ public class Netty4Transport extends TcpTransport { private static final Logger logger = LogManager.getLogger(Netty4Transport.class); - static { - Netty4Utils.setup(); - } - public static final Setting WORKER_COUNT = new Setting<>("transport.netty.worker_count", (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java index 35928b6c3c8a2..211a574dc980d 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java @@ -23,8 +23,6 @@ import io.netty.buffer.CompositeByteBuf; import io.netty.buffer.Unpooled; import io.netty.util.NettyRuntime; -import io.netty.util.internal.logging.InternalLogger; -import io.netty.util.internal.logging.InternalLoggerFactory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.common.Booleans; @@ -38,21 +36,6 @@ public class Netty4Utils { - static { - InternalLoggerFactory.setDefaultFactory(new InternalLoggerFactory() { - - @Override - public InternalLogger newInstance(final String name) { - return new Netty4InternalESLogger(name); - } - - }); - } - - public static void setup() { - - } - private static AtomicBoolean isAvailableProcessorsSet = new AtomicBoolean(); /** diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index cb024d8ac00d7..8a9f207f0b467 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -35,6 +35,7 @@ dependencies { compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" compile "commons-logging:commons-logging:${versions.commonslogging}" + compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" compile "commons-codec:commons-codec:${versions.commonscodec}" compile "commons-lang:commons-lang:2.6" compile "commons-io:commons-io:2.4" diff --git a/server/licenses/log4j-1.2-api-2.11.1.jar.sha1 b/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.11.1.jar.sha1 similarity index 100% rename from server/licenses/log4j-1.2-api-2.11.1.jar.sha1 rename to plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.11.1.jar.sha1 diff --git a/server/licenses/log4j-LICENSE.txt b/plugins/discovery-azure-classic/licenses/log4j-LICENSE.txt similarity index 100% rename from server/licenses/log4j-LICENSE.txt rename to plugins/discovery-azure-classic/licenses/log4j-LICENSE.txt diff --git a/server/licenses/log4j-NOTICE.txt b/plugins/discovery-azure-classic/licenses/log4j-NOTICE.txt similarity index 100% rename from server/licenses/log4j-NOTICE.txt rename to plugins/discovery-azure-classic/licenses/log4j-NOTICE.txt diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 50dc6ac5d85b5..ffa9da733ac55 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -32,6 +32,7 @@ dependencies { compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" compile "commons-logging:commons-logging:${versions.commonslogging}" + compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" compile "commons-codec:commons-codec:${versions.commonscodec}" compile 'com.fasterxml.jackson.core:jackson-databind:2.8.11.3' compile "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" diff --git a/plugins/discovery-ec2/licenses/log4j-1.2-api-2.11.1.jar.sha1 b/plugins/discovery-ec2/licenses/log4j-1.2-api-2.11.1.jar.sha1 new file mode 100644 index 0000000000000..575d75dbda8c5 --- /dev/null +++ b/plugins/discovery-ec2/licenses/log4j-1.2-api-2.11.1.jar.sha1 @@ -0,0 +1 @@ +3aba3398fe064a3eab4331f88161c7480e848418 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/log4j-LICENSE.txt b/plugins/discovery-ec2/licenses/log4j-LICENSE.txt new file mode 100644 index 0000000000000..6279e5206de13 --- /dev/null +++ b/plugins/discovery-ec2/licenses/log4j-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2005 The Apache Software Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-ec2/licenses/log4j-NOTICE.txt b/plugins/discovery-ec2/licenses/log4j-NOTICE.txt new file mode 100644 index 0000000000000..0375732360047 --- /dev/null +++ b/plugins/discovery-ec2/licenses/log4j-NOTICE.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 697cc3780a1fd..25baa4b17ce4a 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -17,6 +17,7 @@ dependencies { compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" compile "commons-logging:commons-logging:${versions.commonslogging}" + compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" compile "commons-codec:commons-codec:${versions.commonscodec}" } diff --git a/plugins/discovery-gce/licenses/log4j-1.2-api-2.11.1.jar.sha1 b/plugins/discovery-gce/licenses/log4j-1.2-api-2.11.1.jar.sha1 new file mode 100644 index 0000000000000..575d75dbda8c5 --- /dev/null +++ b/plugins/discovery-gce/licenses/log4j-1.2-api-2.11.1.jar.sha1 @@ -0,0 +1 @@ +3aba3398fe064a3eab4331f88161c7480e848418 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/log4j-LICENSE.txt b/plugins/discovery-gce/licenses/log4j-LICENSE.txt new file mode 100644 index 0000000000000..6279e5206de13 --- /dev/null +++ b/plugins/discovery-gce/licenses/log4j-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2005 The Apache Software Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-gce/licenses/log4j-NOTICE.txt b/plugins/discovery-gce/licenses/log4j-NOTICE.txt new file mode 100644 index 0000000000000..0375732360047 --- /dev/null +++ b/plugins/discovery-gce/licenses/log4j-NOTICE.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 288ab3c99f17b..ff1f5bc61edfd 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -33,6 +33,7 @@ dependencies { compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" compile "commons-logging:commons-logging:${versions.commonslogging}" + compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" compile "commons-codec:commons-codec:${versions.commonscodec}" compile 'com.google.api:api-common:1.7.0' compile 'com.google.api:gax:1.30.0' diff --git a/plugins/repository-gcs/licenses/log4j-1.2-api-2.11.1.jar.sha1 b/plugins/repository-gcs/licenses/log4j-1.2-api-2.11.1.jar.sha1 new file mode 100644 index 0000000000000..575d75dbda8c5 --- /dev/null +++ b/plugins/repository-gcs/licenses/log4j-1.2-api-2.11.1.jar.sha1 @@ -0,0 +1 @@ +3aba3398fe064a3eab4331f88161c7480e848418 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/log4j-LICENSE.txt b/plugins/repository-gcs/licenses/log4j-LICENSE.txt new file mode 100644 index 0000000000000..6279e5206de13 --- /dev/null +++ b/plugins/repository-gcs/licenses/log4j-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2005 The Apache Software Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/log4j-NOTICE.txt b/plugins/repository-gcs/licenses/log4j-NOTICE.txt new file mode 100644 index 0000000000000..0375732360047 --- /dev/null +++ b/plugins/repository-gcs/licenses/log4j-NOTICE.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 3d6dcd29d6d19..d2aef6274fa5c 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -52,6 +52,7 @@ dependencies { compile 'com.google.guava:guava:11.0.2' compile 'com.google.protobuf:protobuf-java:2.5.0' compile 'commons-logging:commons-logging:1.1.3' + compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" compile 'commons-cli:commons-cli:1.2' compile "commons-codec:commons-codec:${versions.commonscodec}" compile 'commons-collections:commons-collections:3.2.2' diff --git a/plugins/repository-hdfs/licenses/log4j-1.2-api-2.11.1.jar.sha1 b/plugins/repository-hdfs/licenses/log4j-1.2-api-2.11.1.jar.sha1 new file mode 100644 index 0000000000000..575d75dbda8c5 --- /dev/null +++ b/plugins/repository-hdfs/licenses/log4j-1.2-api-2.11.1.jar.sha1 @@ -0,0 +1 @@ +3aba3398fe064a3eab4331f88161c7480e848418 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/log4j-LICENSE.txt b/plugins/repository-hdfs/licenses/log4j-LICENSE.txt new file mode 100644 index 0000000000000..6279e5206de13 --- /dev/null +++ b/plugins/repository-hdfs/licenses/log4j-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2005 The Apache Software Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-hdfs/licenses/log4j-NOTICE.txt b/plugins/repository-hdfs/licenses/log4j-NOTICE.txt new file mode 100644 index 0000000000000..0375732360047 --- /dev/null +++ b/plugins/repository-hdfs/licenses/log4j-NOTICE.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index bf32b99f9dc38..e1c4da54ec182 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -39,6 +39,7 @@ dependencies { compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" compile "commons-logging:commons-logging:${versions.commonslogging}" + compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" compile "commons-codec:commons-codec:${versions.commonscodec}" compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" compile 'com.fasterxml.jackson.core:jackson-databind:2.8.11.3' diff --git a/plugins/repository-s3/licenses/log4j-1.2-api-2.11.1.jar.sha1 b/plugins/repository-s3/licenses/log4j-1.2-api-2.11.1.jar.sha1 new file mode 100644 index 0000000000000..575d75dbda8c5 --- /dev/null +++ b/plugins/repository-s3/licenses/log4j-1.2-api-2.11.1.jar.sha1 @@ -0,0 +1 @@ +3aba3398fe064a3eab4331f88161c7480e848418 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/log4j-LICENSE.txt b/plugins/repository-s3/licenses/log4j-LICENSE.txt new file mode 100644 index 0000000000000..6279e5206de13 --- /dev/null +++ b/plugins/repository-s3/licenses/log4j-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2005 The Apache Software Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/log4j-NOTICE.txt b/plugins/repository-s3/licenses/log4j-NOTICE.txt new file mode 100644 index 0000000000000..0375732360047 --- /dev/null +++ b/plugins/repository-s3/licenses/log4j-NOTICE.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 7800ff6951a89..a982758482cb6 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -53,6 +53,10 @@ thirdPartyAudit { 'org.apache.commons.logging.Log', 'org.apache.commons.logging.LogFactory', + // from Log4j (deliberate, Netty will fallback to Log4j 2) + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) 'org.bouncycastle.cert.X509v3CertificateBuilder', 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', diff --git a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java index b62e1a78e82ca..2416eb02bfd4e 100644 --- a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java +++ b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.logging; -import org.apache.log4j.Level; +import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LoggerContext; diff --git a/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java b/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java index 28e11f021a1c7..b201563f4dce3 100644 --- a/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java +++ b/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java @@ -27,7 +27,8 @@ import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.util.EntityUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestRuleLimitSysouts; import org.elasticsearch.cluster.ClusterModule; @@ -53,7 +54,7 @@ @TestRuleLimitSysouts.Limit(bytes = 14000) public class WildflyIT extends LuceneTestCase { - private Logger logger = Logger.getLogger(WildflyIT.class); + private Logger logger = LogManager.getLogger(WildflyIT.class); public void testTransportClient() throws URISyntaxException, IOException { try (CloseableHttpClient client = HttpClientBuilder.create().build()) { diff --git a/server/build.gradle b/server/build.gradle index 391fdf46469f0..d05d4a9f01c08 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -118,8 +118,6 @@ dependencies { // logging compile "org.apache.logging.log4j:log4j-api:${versions.log4j}" compile "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional - // to bridge dependencies that are still on Log4j 1 to Log4j 2 - compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}", optional // repackaged jna with native bits linked against all elastic supported platforms compile "org.elasticsearch:jna:${versions.jna}" diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index 562bb3b4d9ecb..b9459b926d372 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.plugins; -import org.apache.log4j.Level; +import org.apache.logging.log4j.Level; import org.apache.lucene.util.Constants; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 686756c6e53ba..18978bd2d7560 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -52,7 +52,11 @@ thirdPartyAudit.ignoreMissingClasses ( 'javax.servlet.ServletContextListener', 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', - 'org.apache.log.Logger' + 'org.apache.log.Logger', + 'org.apache.log4j.Category', + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + 'org.apache.log4j.Priority' ) test { diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index d805a491e093a..611e3fe7ade36 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -30,6 +30,7 @@ dependencies { compile "org.apache.httpcomponents:httpasyncclient:${versions.httpasyncclient}" compile "commons-logging:commons-logging:${versions.commonslogging}" + compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" compile "commons-codec:commons-codec:${versions.commonscodec}" // security deps diff --git a/x-pack/plugin/core/licenses/log4j-1.2-api-2.11.1.jar.sha1 b/x-pack/plugin/core/licenses/log4j-1.2-api-2.11.1.jar.sha1 new file mode 100644 index 0000000000000..575d75dbda8c5 --- /dev/null +++ b/x-pack/plugin/core/licenses/log4j-1.2-api-2.11.1.jar.sha1 @@ -0,0 +1 @@ +3aba3398fe064a3eab4331f88161c7480e848418 \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/log4j-LICENSE.txt b/x-pack/plugin/core/licenses/log4j-LICENSE.txt new file mode 100644 index 0000000000000..6279e5206de13 --- /dev/null +++ b/x-pack/plugin/core/licenses/log4j-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2005 The Apache Software Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/core/licenses/log4j-NOTICE.txt b/x-pack/plugin/core/licenses/log4j-NOTICE.txt new file mode 100644 index 0000000000000..0375732360047 --- /dev/null +++ b/x-pack/plugin/core/licenses/log4j-NOTICE.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java index 80b0378ae35ff..0c4477b6b700e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java @@ -6,7 +6,8 @@ package org.elasticsearch.xpack.core.indexing; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -35,7 +36,7 @@ * @param Type that defines a job position to be defined by the implementation. */ public abstract class AsyncTwoPhaseIndexer { - private static final Logger logger = Logger.getLogger(AsyncTwoPhaseIndexer.class.getName()); + private static final Logger logger = LogManager.getLogger(AsyncTwoPhaseIndexer.class.getName()); private final JobStats stats; diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java index 39c0c74bbd504..8b7bcb8d764e3 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java @@ -6,8 +6,8 @@ package org.elasticsearch.xpack.dataframe.action; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java index d8dbc0ceade7a..4e3789afa0133 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java @@ -5,7 +5,8 @@ */ package org.elasticsearch.xpack.rollup.job; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.search.aggregations.Aggregation; @@ -32,7 +33,7 @@ * They are extracted out as static classes mainly to make testing easier. */ class IndexerUtils { - private static final Logger logger = Logger.getLogger(IndexerUtils.class.getName()); + private static final Logger logger = LogManager.getLogger(IndexerUtils.class); /** * The only entry point in this class. You hand this method an aggregation and an index diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java index 038c69365c839..93d7c5065cc29 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java @@ -5,7 +5,8 @@ */ package org.elasticsearch.xpack.rollup.job; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BulkAction; @@ -45,7 +46,7 @@ * Each RollupJobTask also registers itself into the Scheduler so that it can be triggered on the cron's interval. */ public class RollupJobTask extends AllocatedPersistentTask implements SchedulerEngine.Listener { - private static final Logger logger = Logger.getLogger(RollupJobTask.class.getName()); + private static final Logger logger = LogManager.getLogger(RollupJobTask.class.getName()); static final String SCHEDULE_NAME = RollupField.TASK_NAME + "/schedule"; From e917390e9983d4382833884e57fdc1d28e544fd7 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 30 May 2019 13:21:48 -0700 Subject: [PATCH 202/224] Remove client jar support from build (#42640) The client jars were a way for modules and plugins to produce an additional jar that contained classes for use by the transport client. This commit removes that configuration as the transport client is being removed. relates #42638 --- .../gradle/plugin/PluginBuildPlugin.groovy | 26 +++---------------- .../plugin/PluginPropertiesExtension.groovy | 4 --- modules/aggs-matrix-stats/build.gradle | 1 - modules/lang-mustache/build.gradle | 1 - modules/parent-join/build.gradle | 1 - modules/percolator/build.gradle | 1 - modules/rank-eval/build.gradle | 1 - modules/reindex/build.gradle | 1 - modules/transport-netty4/build.gradle | 1 - plugins/analysis-icu/build.gradle | 1 - plugins/transport-nio/build.gradle | 1 - 11 files changed, 3 insertions(+), 36 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index e04d0966c412d..c391757099097 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -19,7 +19,6 @@ package org.elasticsearch.gradle.plugin import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin -import nebula.plugin.publishing.maven.MavenScmPlugin import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.NoticeTask import org.elasticsearch.gradle.Version @@ -33,11 +32,9 @@ import org.gradle.api.Project import org.gradle.api.Task import org.gradle.api.publish.maven.MavenPublication import org.gradle.api.publish.maven.plugins.MavenPublishPlugin -import org.gradle.api.publish.maven.tasks.GenerateMavenPom import org.gradle.api.tasks.Copy import org.gradle.api.tasks.SourceSet import org.gradle.api.tasks.bundling.Zip -import org.gradle.jvm.tasks.Jar import java.util.regex.Matcher import java.util.regex.Pattern @@ -131,25 +128,10 @@ class PluginBuildPlugin implements Plugin { } private void configurePublishing(Project project, PluginPropertiesExtension extension) { - // Only configure publishing if applied externally - if (extension.hasClientJar) { - project.plugins.apply(MavenScmPlugin.class) - // Only change Jar tasks, we don't want a -client zip so we can't change archivesBaseName - project.tasks.withType(Jar) { - baseName = baseName + "-client" - } - // always configure publishing for client jars - project.plugins.apply(MavenScmPlugin.class) - project.publishing.publications.nebula(MavenPublication).artifactId(extension.name + "-client") - project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask -> - generatePOMTask.ext.pomFileName = "${project.archivesBaseName}-client-${project.versions.elasticsearch}.pom" - } - } else { - if (project.plugins.hasPlugin(MavenPublishPlugin)) { - project.publishing.publications.nebula(MavenPublication).artifactId(extension.name) - } - + if (project.plugins.hasPlugin(MavenPublishPlugin)) { + project.publishing.publications.nebula(MavenPublication).artifactId(extension.name) } + } private static void configureDependencies(Project project) { @@ -260,8 +242,6 @@ class PluginBuildPlugin implements Plugin { project.artifacts.add('zip', bundle) } - /** Adds a task to move jar and associated files to a "-client" name. */ - static final Pattern GIT_PATTERN = Pattern.compile(/git@([^:]+):([^\.]+)\.git/) /** Find the reponame. */ diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy index c250d7695a832..953aa257496ec 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy @@ -47,10 +47,6 @@ class PluginPropertiesExtension { @Input boolean hasNativeController = false - /** Indicates whether the plugin jar should be made available for the transport client. */ - @Input - boolean hasClientJar = false - /** True if the plugin requires the elasticsearch keystore to exist, false otherwise. */ @Input boolean requiresKeystore = false diff --git a/modules/aggs-matrix-stats/build.gradle b/modules/aggs-matrix-stats/build.gradle index d3af124b8af71..b3060fa178621 100644 --- a/modules/aggs-matrix-stats/build.gradle +++ b/modules/aggs-matrix-stats/build.gradle @@ -20,5 +20,4 @@ esplugin { description 'Adds aggregations whose input are a list of numeric fields and output includes a matrix.' classname 'org.elasticsearch.search.aggregations.matrix.MatrixAggregationPlugin' - hasClientJar = true } diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle index 2a46bd9ed2efa..f493a85ab67d4 100644 --- a/modules/lang-mustache/build.gradle +++ b/modules/lang-mustache/build.gradle @@ -20,7 +20,6 @@ esplugin { description 'Mustache scripting integration for Elasticsearch' classname 'org.elasticsearch.script.mustache.MustachePlugin' - hasClientJar = true // For the template apis and query } dependencies { diff --git a/modules/parent-join/build.gradle b/modules/parent-join/build.gradle index 67bcc9d54e8e7..756a65a371a9b 100644 --- a/modules/parent-join/build.gradle +++ b/modules/parent-join/build.gradle @@ -20,5 +20,4 @@ esplugin { description 'This module adds the support parent-child queries and aggregations' classname 'org.elasticsearch.join.ParentJoinPlugin' - hasClientJar = true } diff --git a/modules/percolator/build.gradle b/modules/percolator/build.gradle index 0826c91676dea..1296491748c35 100644 --- a/modules/percolator/build.gradle +++ b/modules/percolator/build.gradle @@ -20,7 +20,6 @@ esplugin { description 'Percolator module adds capability to index queries and query these queries by specifying documents' classname 'org.elasticsearch.percolator.PercolatorPlugin' - hasClientJar = true } dependencies { diff --git a/modules/rank-eval/build.gradle b/modules/rank-eval/build.gradle index a0fe3bef0a617..3e8e40ae89c40 100644 --- a/modules/rank-eval/build.gradle +++ b/modules/rank-eval/build.gradle @@ -20,7 +20,6 @@ esplugin { description 'The Rank Eval module adds APIs to evaluate ranking quality.' classname 'org.elasticsearch.index.rankeval.RankEvalPlugin' - hasClientJar = true } testClusters.integTest { diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 957d502f6fcea..33de853e839e3 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -26,7 +26,6 @@ apply plugin: 'elasticsearch.test-with-dependencies' esplugin { description 'The Reindex module adds APIs to reindex from one index to another or update documents in place.' classname 'org.elasticsearch.index.reindex.ReindexPlugin' - hasClientJar = true } testClusters.integTest { diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index b4ec74355d2af..49d9f3693fdc7 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -26,7 +26,6 @@ esplugin { description 'Netty 4 based transport implementation' classname 'org.elasticsearch.transport.Netty4Plugin' - hasClientJar = true } compileTestJava.options.compilerArgs << "-Xlint:-cast,-rawtypes,-unchecked" diff --git a/plugins/analysis-icu/build.gradle b/plugins/analysis-icu/build.gradle index 90132e2c58fcd..f818959e706de 100644 --- a/plugins/analysis-icu/build.gradle +++ b/plugins/analysis-icu/build.gradle @@ -22,7 +22,6 @@ import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis esplugin { description 'The ICU Analysis plugin integrates Lucene ICU module into elasticsearch, adding ICU relates analysis components.' classname 'org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin' - hasClientJar = true } tasks.withType(CheckForbiddenApis) { diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index a982758482cb6..dd9bccf3799d0 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -21,7 +21,6 @@ apply plugin: "nebula.maven-scm" esplugin { description 'The nio transport.' classname 'org.elasticsearch.transport.nio.NioTransportPlugin' - hasClientJar = true } dependencies { From 76809d69e58dcd7896e6b8acd9097d244a3c4eaa Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Thu, 30 May 2019 14:07:12 -0700 Subject: [PATCH 203/224] mute failing search template test (#42730) tracking issue #42664. --- .../rest-api-spec/test/lang_mustache/30_search_template.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/30_search_template.yml b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/30_search_template.yml index c0cc784250b90..58f42c5d870bb 100644 --- a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/30_search_template.yml +++ b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/30_search_template.yml @@ -128,6 +128,11 @@ --- "Test with new response format": + + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42664" + - do: index: index: test From 622cfa80e7ad6a0c6cdd911ed5d4dc0f97012616 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 30 May 2019 14:38:09 -0700 Subject: [PATCH 204/224] Remove groovy client docs (#42731) The groovy client api was a wrapper around the transport client. However, it has not been published since 2.4, as it had many issues with the java security manager. This commit removes the docs from master for the groovy client. relates #42638 --- docs/groovy-api/anatomy.asciidoc | 102 --------------------------- docs/groovy-api/client.asciidoc | 59 ---------------- docs/groovy-api/delete.asciidoc | 16 ----- docs/groovy-api/get.asciidoc | 19 ----- docs/groovy-api/index.asciidoc | 48 ------------- docs/groovy-api/index_.asciidoc | 32 --------- docs/groovy-api/search.asciidoc | 116 ------------------------------- 7 files changed, 392 deletions(-) delete mode 100644 docs/groovy-api/anatomy.asciidoc delete mode 100644 docs/groovy-api/client.asciidoc delete mode 100644 docs/groovy-api/delete.asciidoc delete mode 100644 docs/groovy-api/get.asciidoc delete mode 100644 docs/groovy-api/index.asciidoc delete mode 100644 docs/groovy-api/index_.asciidoc delete mode 100644 docs/groovy-api/search.asciidoc diff --git a/docs/groovy-api/anatomy.asciidoc b/docs/groovy-api/anatomy.asciidoc deleted file mode 100644 index ba7cf83bb00cf..0000000000000 --- a/docs/groovy-api/anatomy.asciidoc +++ /dev/null @@ -1,102 +0,0 @@ -[[anatomy]] -== API Anatomy - -Once a <> has been -obtained, all of Elasticsearch APIs can be executed on it. Each Groovy -API is exposed using three different mechanisms. - - -[[closure]] -=== Closure Request - -The first type is to simply provide the request as a Closure, which -automatically gets resolved into the respective request instance (for -the index API, its the `IndexRequest` class). The API returns a special -future, called `GActionFuture`. This is a groovier version of -Elasticsearch Java `ActionFuture` (in turn a nicer extension to Java own -`Future`) which allows to register listeners (closures) on it for -success and failures, as well as blocking for the response. For example: - -[source,groovy] --------------------------------------------------- -def indexR = client.index { - index "test" - type "_doc" - id "1" - source { - test = "value" - complex { - value1 = "value1" - value2 = "value2" - } - } -} - -println "Indexed $indexR.response.id into $indexR.response.index/$indexR.response.type" --------------------------------------------------- - -In the above example, calling `indexR.response` will simply block for -the response. We can also block for the response for a specific timeout: - -[source,groovy] --------------------------------------------------- -IndexResponse response = indexR.response "5s" // block for 5 seconds, same as: -response = indexR.response 5, TimeValue.SECONDS // --------------------------------------------------- - -We can also register closures that will be called on success and on -failure: - -[source,groovy] --------------------------------------------------- -indexR.success = {IndexResponse response -> - println "Indexed $response.id into $response.index/$response.type" -} -indexR.failure = {Throwable t -> - println "Failed to index: $t.message" -} --------------------------------------------------- - - -[[request]] -=== Request - -This option allows to pass the actual instance of the request (instead -of a closure) as a parameter. The rest is similar to the closure as a -parameter option (the `GActionFuture` handling). For example: - -[source,groovy] --------------------------------------------------- -def indexR = client.index (new IndexRequest( - index: "test", - type: "_doc", - id: "1", - source: { - test = "value" - complex { - value1 = "value1" - value2 = "value2" - } - })) - -println "Indexed $indexR.response.id into $indexR.response.index/$indexR.response.type" --------------------------------------------------- - - -[[java-like]] -=== Java Like - -The last option is to provide an actual instance of the API request, and -an `ActionListener` for the callback. This is exactly like the Java API -with the added `gexecute` which returns the `GActionFuture`: - -[source,groovy] --------------------------------------------------- -def indexR = node.client.prepareIndex("test", "_doc", "1").setSource({ - test = "value" - complex { - value1 = "value1" - value2 = "value2" - } -}).gexecute() --------------------------------------------------- diff --git a/docs/groovy-api/client.asciidoc b/docs/groovy-api/client.asciidoc deleted file mode 100644 index c3c89e71bc535..0000000000000 --- a/docs/groovy-api/client.asciidoc +++ /dev/null @@ -1,59 +0,0 @@ -[[client]] -== Client - -Obtaining an Elasticsearch Groovy `GClient` (a `GClient` is a simple -wrapper on top of the Java `Client`) is simple. The most common way to -get a client is by starting an embedded `Node` which acts as a node -within the cluster. - - -[[node-client]] -=== Node Client - -A Node based client is the simplest form to get a `GClient` to start -executing operations against Elasticsearch. - -[source,groovy] --------------------------------------------------- -import org.elasticsearch.groovy.client.GClient -import org.elasticsearch.groovy.node.GNode -import static org.elasticsearch.groovy.node.GNodeBuilder.nodeBuilder - -// on startup - -GNode node = nodeBuilder().node(); -GClient client = node.client(); - -// on shutdown - -node.close(); --------------------------------------------------- - -Since Elasticsearch allows to configure it using JSON based settings, -the configuration itself can be done using a closure that represent the -JSON: - -[source,groovy] --------------------------------------------------- -import org.elasticsearch.groovy.node.GNode -import org.elasticsearch.groovy.node.GNodeBuilder -import static org.elasticsearch.groovy.node.GNodeBuilder.* - -// on startup - -GNodeBuilder nodeBuilder = nodeBuilder(); -nodeBuilder.settings { - node { - client = true - } - cluster { - name = "test" - } -} - -GNode node = nodeBuilder.node() - -// on shutdown - -node.stop().close() --------------------------------------------------- diff --git a/docs/groovy-api/delete.asciidoc b/docs/groovy-api/delete.asciidoc deleted file mode 100644 index 3d6547820041a..0000000000000 --- a/docs/groovy-api/delete.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -[[delete]] -== Delete API - -The delete API is very similar to the -// {javaclient}/java-docs-delete.html[] -Java delete API, here is an -example: - -[source,groovy] --------------------------------------------------- -def deleteF = node.client.delete { - index "test" - type "_doc" - id "1" -} --------------------------------------------------- diff --git a/docs/groovy-api/get.asciidoc b/docs/groovy-api/get.asciidoc deleted file mode 100644 index 2cac8429c3e79..0000000000000 --- a/docs/groovy-api/get.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -[[get]] -== Get API - -The get API is very similar to the -// {javaclient}/java-docs-get.html[] -Java get API. The main benefit -of using groovy is handling the source content. It can be automatically -converted to a `Map` which means using Groovy to navigate it is simple: - -[source,groovy] --------------------------------------------------- -def getF = node.client.get { - index "test" - type "_doc" - id "1" -} - -println "Result of field2: $getF.response.source.complex.field2" --------------------------------------------------- diff --git a/docs/groovy-api/index.asciidoc b/docs/groovy-api/index.asciidoc deleted file mode 100644 index e1bb81856f15a..0000000000000 --- a/docs/groovy-api/index.asciidoc +++ /dev/null @@ -1,48 +0,0 @@ -= Groovy API - -include::../Versions.asciidoc[] - -[preface] -== Preface - -This section describes the http://groovy-lang.org/[Groovy] API -Elasticsearch provides. All Elasticsearch APIs are executed using a -<>, and are completely -asynchronous in nature (they either accept a listener, or return a -future). - -The Groovy API is a wrapper on top of the -{javaclient}[Java API] exposing it in a groovier -manner. The execution options for each API follow a similar manner and -covered in <>. - - -[[maven]] -=== Maven Repository - -The Groovy API is hosted on -http://search.maven.org/#search%7Cga%7C1%7Ca%3A%22elasticsearch-groovy%22[Maven -Central]. - -For example, you can define the latest version in your `pom.xml` file: - -["source","xml",subs="attributes"] --------------------------------------------------- - - org.elasticsearch - elasticsearch-groovy - {version} - --------------------------------------------------- - -include::anatomy.asciidoc[] - -include::client.asciidoc[] - -include::index_.asciidoc[] - -include::get.asciidoc[] - -include::delete.asciidoc[] - -include::search.asciidoc[] diff --git a/docs/groovy-api/index_.asciidoc b/docs/groovy-api/index_.asciidoc deleted file mode 100644 index deefb30e031a3..0000000000000 --- a/docs/groovy-api/index_.asciidoc +++ /dev/null @@ -1,32 +0,0 @@ -[[index_]] -== Index API - -The index API is very similar to the -// {javaclient}/java-docs-index.html[] -Java index API. The Groovy -extension to it is the ability to provide the indexed source using a -closure. For example: - -[source,groovy] --------------------------------------------------- -def indexR = client.index { - index "test" - type "_doc" - id "1" - source { - test = "value" - complex { - value1 = "value1" - value2 = "value2" - } - } -} --------------------------------------------------- - -In the above example, the source closure itself gets transformed into an -XContent (defaults to JSON). In order to change how the source closure -is serialized, a global (static) setting can be set on the `GClient` by -changing the `indexContentType` field. - -Note also that the `source` can be set using the typical Java based -APIs, the `Closure` option is a Groovy extension. diff --git a/docs/groovy-api/search.asciidoc b/docs/groovy-api/search.asciidoc deleted file mode 100644 index 7834e45abc81a..0000000000000 --- a/docs/groovy-api/search.asciidoc +++ /dev/null @@ -1,116 +0,0 @@ -[[search]] -== Search API - -The search API is very similar to the -// {javaclient}/java-search.html[] -Java search API. The Groovy -extension allows to provide the search source to execute as a `Closure` -including the query itself (similar to GORM criteria builder): - -[source,groovy] --------------------------------------------------- -def search = node.client.search { - indices "test" - types "_doc" - source { - query { - term(test: "value") - } - } -} - -search.response.hits.each {SearchHit hit -> - println "Got hit $hit.id from $hit.index/$hit.type" -} --------------------------------------------------- - -It can also be executed using the "Java API" while still using a closure -for the query: - -[source,groovy] --------------------------------------------------- -def search = node.client.prepareSearch("test").setQuery({ - term(test: "value") -}).gexecute(); - -search.response.hits.each {SearchHit hit -> - println "Got hit $hit.id from $hit.index/$hit.type" -} --------------------------------------------------- - -The format of the search `Closure` follows the same JSON syntax as the -{ref}/search-search.html[Search API] request. - - -[[more-examples]] -=== More examples - -Term query where multiple values are provided (see -{ref}/query-dsl-terms-query.html[terms]): - -[source,groovy] --------------------------------------------------- -def search = node.client.search { - indices "test" - types "_doc" - source { - query { - terms(test: ["value1", "value2"]) - } - } -} --------------------------------------------------- - -Query string (see -{ref}/query-dsl-query-string-query.html[query string]): - -[source,groovy] --------------------------------------------------- -def search = node.client.search { - indices "test" - types "_doc" - source { - query { - query_string( - fields: ["test"], - query: "value1 value2") - } - } -} --------------------------------------------------- - -Pagination (see -{ref}/search-request-from-size.html[from/size]): - -[source,groovy] --------------------------------------------------- -def search = node.client.search { - indices "test" - types "_doc" - source { - from = 0 - size = 10 - query { - term(test: "value") - } - } -} --------------------------------------------------- - -Sorting (see {ref}/search-request-sort.html[sort]): - -[source,groovy] --------------------------------------------------- -def search = node.client.search { - indices "test" - types "_doc" - source { - query { - term(test: "value") - } - sort = [ - date : [ order: "desc"] - ] - } -} --------------------------------------------------- From 88befb54c53a71b4fc8d8d2cf881b087706dbd40 Mon Sep 17 00:00:00 2001 From: Alex Pang Date: Thu, 30 May 2019 18:01:04 -0400 Subject: [PATCH 205/224] Fix docs typo in the certutil CSR mode (#42593) Changes the mention of `cert` to `csr`. Co-Authored-By: Alex Pang --- docs/reference/commands/certutil.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/commands/certutil.asciidoc b/docs/reference/commands/certutil.asciidoc index 6f4d3224d7aeb..07a3f96738dac 100644 --- a/docs/reference/commands/certutil.asciidoc +++ b/docs/reference/commands/certutil.asciidoc @@ -103,7 +103,7 @@ which prompts you for details about each instance. Alternatively, you can use the `--in` parameter to specify a YAML file that contains details about the instances. -The `cert` mode produces a single zip file which contains the CSRs and the +The `csr` mode produces a single zip file which contains the CSRs and the private keys for each instance. Each CSR is provided as a standard PEM encoding of a PKCS#10 CSR. Each key is provided as a PEM encoding of an RSA private key. From 9ffed176940ec2ea8f0432fadc3d549dbb800486 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 30 May 2019 15:03:48 -0700 Subject: [PATCH 206/224] Remove transport client docs (#42483) This commit removes the transport client documentation. --- docs/java-api/admin/cluster/health.asciidoc | 76 ------ docs/java-api/admin/cluster/index.asciidoc | 16 -- .../admin/cluster/stored-scripts.asciidoc | 29 -- docs/java-api/admin/index.asciidoc | 18 -- .../admin/indices/create-index.asciidoc | 28 -- .../admin/indices/get-settings.asciidoc | 22 -- docs/java-api/admin/indices/index.asciidoc | 21 -- .../admin/indices/put-mapping.asciidoc | 30 --- docs/java-api/admin/indices/refresh.asciidoc | 19 -- .../admin/indices/update-settings.asciidoc | 16 -- docs/java-api/aggregations/bucket.asciidoc | 33 --- .../bucket/children-aggregation.asciidoc | 35 --- .../bucket/datehistogram-aggregation.asciidoc | 73 ----- .../bucket/daterange-aggregation.asciidoc | 59 ----- .../bucket/filter-aggregation.asciidoc | 34 --- .../bucket/filters-aggregation.asciidoc | 51 ---- .../bucket/geodistance-aggregation.asciidoc | 58 ---- .../bucket/geohashgrid-aggregation.asciidoc | 57 ---- .../bucket/global-aggregation.asciidoc | 35 --- .../bucket/histogram-aggregation.asciidoc | 48 ---- .../bucket/iprange-aggregation.asciidoc | 79 ------ .../bucket/missing-aggregation.asciidoc | 34 --- .../bucket/nested-aggregation.asciidoc | 34 --- .../bucket/range-aggregation.asciidoc | 58 ---- .../reverse-nested-aggregation.asciidoc | 50 ---- .../significantterms-aggregation.asciidoc | 47 ---- .../bucket/terms-aggregation.asciidoc | 97 ------- docs/java-api/aggregations/metrics.asciidoc | 27 -- .../metrics/avg-aggregation.asciidoc | 37 --- .../metrics/cardinality-aggregation.asciidoc | 38 --- .../extendedstats-aggregation.asciidoc | 44 --- .../metrics/geobounds-aggregation.asciidoc | 46 ---- .../metrics/max-aggregation.asciidoc | 37 --- .../metrics/min-aggregation.asciidoc | 37 --- .../metrics/percentile-aggregation.asciidoc | 68 ----- .../percentile-rank-aggregation.asciidoc | 55 ---- .../scripted-metric-aggregation.asciidoc | 100 ------- .../metrics/stats-aggregation.asciidoc | 41 --- .../metrics/sum-aggregation.asciidoc | 37 --- .../metrics/tophits-aggregation.asciidoc | 79 ------ .../metrics/valuecount-aggregation.asciidoc | 37 --- docs/java-api/aggs.asciidoc | 63 ----- docs/java-api/client.asciidoc | 110 -------- docs/java-api/docs.asciidoc | 36 --- docs/java-api/docs/bulk.asciidoc | 190 ------------- docs/java-api/docs/delete.asciidoc | 42 --- docs/java-api/docs/get.asciidoc | 14 - docs/java-api/docs/index_.asciidoc | 167 ------------ docs/java-api/docs/multi-get.asciidoc | 30 --- docs/java-api/docs/reindex.asciidoc | 11 - docs/java-api/docs/update-by-query.asciidoc | 166 ------------ docs/java-api/docs/update.asciidoc | 118 --------- docs/java-api/index.asciidoc | 149 ----------- docs/java-api/query-dsl.asciidoc | 40 --- docs/java-api/query-dsl/bool-query.asciidoc | 13 - .../query-dsl/boosting-query.asciidoc | 12 - .../query-dsl/common-terms-query.asciidoc | 11 - .../query-dsl/compound-queries.asciidoc | 45 ---- .../query-dsl/constant-score-query.asciidoc | 11 - .../java-api/query-dsl/dis-max-query.asciidoc | 13 - docs/java-api/query-dsl/exists-query.asciidoc | 10 - .../query-dsl/full-text-queries.asciidoc | 44 --- .../query-dsl/function-score-query.asciidoc | 19 -- docs/java-api/query-dsl/fuzzy-query.asciidoc | 11 - .../query-dsl/geo-bounding-box-query.asciidoc | 12 - .../query-dsl/geo-distance-query.asciidoc | 12 - .../query-dsl/geo-polygon-query.asciidoc | 11 - docs/java-api/query-dsl/geo-queries.asciidoc | 34 --- .../query-dsl/geo-shape-query.asciidoc | 56 ---- .../query-dsl/has-child-query.asciidoc | 23 -- .../query-dsl/has-parent-query.asciidoc | 23 -- docs/java-api/query-dsl/ids-query.asciidoc | 10 - .../query-dsl/joining-queries.asciidoc | 28 -- .../query-dsl/match-all-query.asciidoc | 9 - docs/java-api/query-dsl/match-query.asciidoc | 11 - docs/java-api/query-dsl/mlt-query.asciidoc | 13 - .../query-dsl/multi-match-query.asciidoc | 11 - docs/java-api/query-dsl/nested-query.asciidoc | 12 - .../query-dsl/percolate-query.asciidoc | 61 ----- docs/java-api/query-dsl/prefix-query.asciidoc | 11 - .../query-dsl/query-string-query.asciidoc | 9 - docs/java-api/query-dsl/range-query.asciidoc | 22 -- docs/java-api/query-dsl/regexp-query.asciidoc | 11 - docs/java-api/query-dsl/script-query.asciidoc | 29 -- .../simple-query-string-query.asciidoc | 9 - .../query-dsl/span-containing-query.asciidoc | 11 - .../query-dsl/span-first-query.asciidoc | 11 - .../query-dsl/span-multi-term-query.asciidoc | 11 - .../query-dsl/span-near-query.asciidoc | 12 - .../query-dsl/span-not-query.asciidoc | 11 - .../java-api/query-dsl/span-or-query.asciidoc | 10 - docs/java-api/query-dsl/span-queries.asciidoc | 65 ----- .../query-dsl/span-term-query.asciidoc | 11 - .../query-dsl/span-within-query.asciidoc | 11 - .../query-dsl/special-queries.asciidoc | 31 --- .../query-dsl/term-level-queries.asciidoc | 77 ------ docs/java-api/query-dsl/term-query.asciidoc | 11 - docs/java-api/query-dsl/terms-query.asciidoc | 11 - .../query-dsl/wildcard-query.asciidoc | 11 - .../java-api/query-dsl/wrapper-query.asciidoc | 11 - docs/java-api/search.asciidoc | 250 ------------------ docs/reference/modules/network.asciidoc | 7 +- docs/reference/modules/node.asciidoc | 5 +- docs/reference/redirects.asciidoc | 8 +- 104 files changed, 8 insertions(+), 4149 deletions(-) delete mode 100644 docs/java-api/admin/cluster/health.asciidoc delete mode 100644 docs/java-api/admin/cluster/index.asciidoc delete mode 100644 docs/java-api/admin/cluster/stored-scripts.asciidoc delete mode 100644 docs/java-api/admin/index.asciidoc delete mode 100644 docs/java-api/admin/indices/create-index.asciidoc delete mode 100644 docs/java-api/admin/indices/get-settings.asciidoc delete mode 100644 docs/java-api/admin/indices/index.asciidoc delete mode 100644 docs/java-api/admin/indices/put-mapping.asciidoc delete mode 100644 docs/java-api/admin/indices/refresh.asciidoc delete mode 100644 docs/java-api/admin/indices/update-settings.asciidoc delete mode 100644 docs/java-api/aggregations/bucket.asciidoc delete mode 100644 docs/java-api/aggregations/bucket/children-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/bucket/datehistogram-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/bucket/daterange-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/bucket/filter-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/bucket/filters-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/bucket/geodistance-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/bucket/geohashgrid-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/bucket/global-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/bucket/histogram-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/bucket/iprange-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/bucket/missing-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/bucket/nested-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/bucket/range-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/bucket/reverse-nested-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/bucket/significantterms-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/bucket/terms-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/metrics.asciidoc delete mode 100644 docs/java-api/aggregations/metrics/avg-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/metrics/cardinality-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/metrics/extendedstats-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/metrics/geobounds-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/metrics/max-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/metrics/min-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/metrics/percentile-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/metrics/percentile-rank-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/metrics/scripted-metric-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/metrics/stats-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/metrics/sum-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/metrics/tophits-aggregation.asciidoc delete mode 100644 docs/java-api/aggregations/metrics/valuecount-aggregation.asciidoc delete mode 100644 docs/java-api/aggs.asciidoc delete mode 100644 docs/java-api/client.asciidoc delete mode 100644 docs/java-api/docs.asciidoc delete mode 100644 docs/java-api/docs/bulk.asciidoc delete mode 100644 docs/java-api/docs/delete.asciidoc delete mode 100644 docs/java-api/docs/get.asciidoc delete mode 100644 docs/java-api/docs/index_.asciidoc delete mode 100644 docs/java-api/docs/multi-get.asciidoc delete mode 100644 docs/java-api/docs/reindex.asciidoc delete mode 100644 docs/java-api/docs/update-by-query.asciidoc delete mode 100644 docs/java-api/docs/update.asciidoc delete mode 100644 docs/java-api/index.asciidoc delete mode 100644 docs/java-api/query-dsl.asciidoc delete mode 100644 docs/java-api/query-dsl/bool-query.asciidoc delete mode 100644 docs/java-api/query-dsl/boosting-query.asciidoc delete mode 100644 docs/java-api/query-dsl/common-terms-query.asciidoc delete mode 100644 docs/java-api/query-dsl/compound-queries.asciidoc delete mode 100644 docs/java-api/query-dsl/constant-score-query.asciidoc delete mode 100644 docs/java-api/query-dsl/dis-max-query.asciidoc delete mode 100644 docs/java-api/query-dsl/exists-query.asciidoc delete mode 100644 docs/java-api/query-dsl/full-text-queries.asciidoc delete mode 100644 docs/java-api/query-dsl/function-score-query.asciidoc delete mode 100644 docs/java-api/query-dsl/fuzzy-query.asciidoc delete mode 100644 docs/java-api/query-dsl/geo-bounding-box-query.asciidoc delete mode 100644 docs/java-api/query-dsl/geo-distance-query.asciidoc delete mode 100644 docs/java-api/query-dsl/geo-polygon-query.asciidoc delete mode 100644 docs/java-api/query-dsl/geo-queries.asciidoc delete mode 100644 docs/java-api/query-dsl/geo-shape-query.asciidoc delete mode 100644 docs/java-api/query-dsl/has-child-query.asciidoc delete mode 100644 docs/java-api/query-dsl/has-parent-query.asciidoc delete mode 100644 docs/java-api/query-dsl/ids-query.asciidoc delete mode 100644 docs/java-api/query-dsl/joining-queries.asciidoc delete mode 100644 docs/java-api/query-dsl/match-all-query.asciidoc delete mode 100644 docs/java-api/query-dsl/match-query.asciidoc delete mode 100644 docs/java-api/query-dsl/mlt-query.asciidoc delete mode 100644 docs/java-api/query-dsl/multi-match-query.asciidoc delete mode 100644 docs/java-api/query-dsl/nested-query.asciidoc delete mode 100644 docs/java-api/query-dsl/percolate-query.asciidoc delete mode 100644 docs/java-api/query-dsl/prefix-query.asciidoc delete mode 100644 docs/java-api/query-dsl/query-string-query.asciidoc delete mode 100644 docs/java-api/query-dsl/range-query.asciidoc delete mode 100644 docs/java-api/query-dsl/regexp-query.asciidoc delete mode 100644 docs/java-api/query-dsl/script-query.asciidoc delete mode 100644 docs/java-api/query-dsl/simple-query-string-query.asciidoc delete mode 100644 docs/java-api/query-dsl/span-containing-query.asciidoc delete mode 100644 docs/java-api/query-dsl/span-first-query.asciidoc delete mode 100644 docs/java-api/query-dsl/span-multi-term-query.asciidoc delete mode 100644 docs/java-api/query-dsl/span-near-query.asciidoc delete mode 100644 docs/java-api/query-dsl/span-not-query.asciidoc delete mode 100644 docs/java-api/query-dsl/span-or-query.asciidoc delete mode 100644 docs/java-api/query-dsl/span-queries.asciidoc delete mode 100644 docs/java-api/query-dsl/span-term-query.asciidoc delete mode 100644 docs/java-api/query-dsl/span-within-query.asciidoc delete mode 100644 docs/java-api/query-dsl/special-queries.asciidoc delete mode 100644 docs/java-api/query-dsl/term-level-queries.asciidoc delete mode 100644 docs/java-api/query-dsl/term-query.asciidoc delete mode 100644 docs/java-api/query-dsl/terms-query.asciidoc delete mode 100644 docs/java-api/query-dsl/wildcard-query.asciidoc delete mode 100644 docs/java-api/query-dsl/wrapper-query.asciidoc delete mode 100644 docs/java-api/search.asciidoc diff --git a/docs/java-api/admin/cluster/health.asciidoc b/docs/java-api/admin/cluster/health.asciidoc deleted file mode 100644 index 615a011cf72c9..0000000000000 --- a/docs/java-api/admin/cluster/health.asciidoc +++ /dev/null @@ -1,76 +0,0 @@ -[[java-admin-cluster-health]] -==== Cluster Health - -[[java-admin-cluster-health-health]] -===== Health - -The cluster health API allows to get a very simple status on the health of the cluster and also can give you -some technical information about the cluster status per index: - -[source,java] --------------------------------------------------- -ClusterHealthResponse healths = client.admin().cluster().prepareHealth().get(); <1> -String clusterName = healths.getClusterName(); <2> -int numberOfDataNodes = healths.getNumberOfDataNodes(); <3> -int numberOfNodes = healths.getNumberOfNodes(); <4> - -for (ClusterIndexHealth health : healths.getIndices().values()) { <5> - String index = health.getIndex(); <6> - int numberOfShards = health.getNumberOfShards(); <7> - int numberOfReplicas = health.getNumberOfReplicas(); <8> - ClusterHealthStatus status = health.getStatus(); <9> -} --------------------------------------------------- -<1> Get information for all indices -<2> Access the cluster name -<3> Get the total number of data nodes -<4> Get the total number of nodes -<5> Iterate over all indices -<6> Index name -<7> Number of shards -<8> Number of replicas -<9> Index status - -[[java-admin-cluster-health-wait-status]] -===== Wait for status - -You can use the cluster health API to wait for a specific status for the whole cluster or for a given index: - -[source,java] --------------------------------------------------- -client.admin().cluster().prepareHealth() <1> - .setWaitForYellowStatus() <2> - .get(); -client.admin().cluster().prepareHealth("company") <3> - .setWaitForGreenStatus() <4> - .get(); - -client.admin().cluster().prepareHealth("employee") <5> - .setWaitForGreenStatus() <6> - .setTimeout(TimeValue.timeValueSeconds(2)) <7> - .get(); --------------------------------------------------- -<1> Prepare a health request -<2> Wait for the cluster being yellow -<3> Prepare the health request for index `company` -<4> Wait for the index being green -<5> Prepare the health request for index `employee` -<6> Wait for the index being green -<7> Wait at most for 2 seconds - -If the index does not have the expected status and you want to fail in that case, you need -to explicitly interpret the result: - -[source,java] --------------------------------------------------- -ClusterHealthResponse response = client.admin().cluster().prepareHealth("company") - .setWaitForGreenStatus() <1> - .get(); - -ClusterHealthStatus status = response.getIndices().get("company").getStatus(); -if (!status.equals(ClusterHealthStatus.GREEN)) { - throw new RuntimeException("Index is in " + status + " state"); <2> -} --------------------------------------------------- -<1> Wait for the index being green -<2> Throw an exception if not `GREEN` diff --git a/docs/java-api/admin/cluster/index.asciidoc b/docs/java-api/admin/cluster/index.asciidoc deleted file mode 100644 index 4e1850a34fe47..0000000000000 --- a/docs/java-api/admin/cluster/index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -[[java-admin-cluster]] -=== Cluster Administration - -To access cluster Java API, you need to call `cluster()` method from an <>: - -[source,java] --------------------------------------------------- -ClusterAdminClient clusterAdminClient = client.admin().cluster(); --------------------------------------------------- - -[NOTE] -In the rest of this guide, we will use `client.admin().cluster()`. - -include::health.asciidoc[] - -include::stored-scripts.asciidoc[] diff --git a/docs/java-api/admin/cluster/stored-scripts.asciidoc b/docs/java-api/admin/cluster/stored-scripts.asciidoc deleted file mode 100644 index 5ebf89e92be55..0000000000000 --- a/docs/java-api/admin/cluster/stored-scripts.asciidoc +++ /dev/null @@ -1,29 +0,0 @@ -[[stored-scripts]] -==== Stored Scripts API - -The stored script API allows one to interact with scripts and templates -stored in Elasticsearch. It can be used to create, update, get, -and delete stored scripts and templates. - -[source,java] --------------------------------------------------- -PutStoredScriptResponse response = client.admin().cluster().preparePutStoredScript() - .setId("script1") - .setContent(new BytesArray("{\"script\": {\"lang\": \"painless\", \"source\": \"_score * doc['my_numeric_field'].value\"} }"), XContentType.JSON) - .get(); - -GetStoredScriptResponse response = client().admin().cluster().prepareGetStoredScript() - .setId("script1") - .get(); - -DeleteStoredScriptResponse response = client().admin().cluster().prepareDeleteStoredScript() - .setId("script1") - .get(); --------------------------------------------------- - -To store templates simply use "mustache" for the scriptLang. - -===== Script Language - -The put stored script API allows one to set the language of the stored script. -If one is not provided the default scripting language will be used. diff --git a/docs/java-api/admin/index.asciidoc b/docs/java-api/admin/index.asciidoc deleted file mode 100644 index 41599a82c7b3a..0000000000000 --- a/docs/java-api/admin/index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -[[java-admin]] -== Java API Administration - -Elasticsearch provides a full Java API to deal with administration tasks. - -To access them, you need to call `admin()` method from a client to get an `AdminClient`: - -[source,java] --------------------------------------------------- -AdminClient adminClient = client.admin(); --------------------------------------------------- - -[NOTE] -In the rest of this guide, we will use `client.admin()`. - -include::indices/index.asciidoc[] - -include::cluster/index.asciidoc[] diff --git a/docs/java-api/admin/indices/create-index.asciidoc b/docs/java-api/admin/indices/create-index.asciidoc deleted file mode 100644 index 34b776bd04e23..0000000000000 --- a/docs/java-api/admin/indices/create-index.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -[[java-admin-indices-create-index]] -==== Create Index - -Using an <>, you can create an index with all default settings and no mapping: - -[source,java] --------------------------------------------------- -client.admin().indices().prepareCreate("twitter").get(); --------------------------------------------------- - -[float] -[[java-admin-indices-create-index-settings]] -===== Index Settings - -Each index created can have specific settings associated with it. - -[source,java] --------------------------------------------------- -client.admin().indices().prepareCreate("twitter") - .setSettings(Settings.builder() <1> - .put("index.number_of_shards", 3) - .put("index.number_of_replicas", 2) - ) - .get(); <2> --------------------------------------------------- -<1> Settings for this index -<2> Execute the action and wait for the result - diff --git a/docs/java-api/admin/indices/get-settings.asciidoc b/docs/java-api/admin/indices/get-settings.asciidoc deleted file mode 100644 index 844aaf65ec9b5..0000000000000 --- a/docs/java-api/admin/indices/get-settings.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -[[java-admin-indices-get-settings]] -==== Get Settings - -The get settings API allows to retrieve settings of index/indices: - -[source,java] --------------------------------------------------- -GetSettingsResponse response = client.admin().indices() - .prepareGetSettings("company", "employee").get(); <1> -for (ObjectObjectCursor cursor : response.getIndexToSettings()) { <2> - String index = cursor.key; <3> - Settings settings = cursor.value; <4> - Integer shards = settings.getAsInt("index.number_of_shards", null); <5> - Integer replicas = settings.getAsInt("index.number_of_replicas", null); <6> -} --------------------------------------------------- -<1> Get settings for indices `company` and `employee` -<2> Iterate over results -<3> Index name -<4> Settings for the given index -<5> Number of shards for this index -<6> Number of replicas for this index diff --git a/docs/java-api/admin/indices/index.asciidoc b/docs/java-api/admin/indices/index.asciidoc deleted file mode 100644 index bbd365076c72e..0000000000000 --- a/docs/java-api/admin/indices/index.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -[[java-admin-indices]] -=== Indices Administration - -To access indices Java API, you need to call `indices()` method from an <>: - -[source,java] --------------------------------------------------- -IndicesAdminClient indicesAdminClient = client.admin().indices(); --------------------------------------------------- - -[NOTE] -In the rest of this guide, we will use `client.admin().indices()`. - -include::create-index.asciidoc[] - -include::put-mapping.asciidoc[] - -include::refresh.asciidoc[] - -include::get-settings.asciidoc[] -include::update-settings.asciidoc[] diff --git a/docs/java-api/admin/indices/put-mapping.asciidoc b/docs/java-api/admin/indices/put-mapping.asciidoc deleted file mode 100644 index d63a498d994d2..0000000000000 --- a/docs/java-api/admin/indices/put-mapping.asciidoc +++ /dev/null @@ -1,30 +0,0 @@ -[[java-admin-indices-put-mapping]] - -==== Put Mapping - -You can add mappings at index creation time: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{client-tests}/IndicesDocumentationIT.java[index-with-mapping] --------------------------------------------------- -<1> <> called `twitter` -<2> Add a `_doc` type with a field called `message` that has the datatype `text`. - -There are several variants of the above `addMapping` method, some taking an -`XContentBuilder` or a `Map` with the mapping definition as arguments. Make sure -to check the javadocs to pick the simplest one for your use case. - -The PUT mapping API also allows for updating the mapping after index -creation. In this case you can provide the mapping as a String similar -to the REST API syntax: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{client-tests}/IndicesDocumentationIT.java[putMapping-request-source] --------------------------------------------------- -<1> Puts a mapping on existing index called `twitter` -<2> Adds a new field `name` to the mapping -<3> The type can be also provided within the source - -:base-dir!: diff --git a/docs/java-api/admin/indices/refresh.asciidoc b/docs/java-api/admin/indices/refresh.asciidoc deleted file mode 100644 index 856c270daf368..0000000000000 --- a/docs/java-api/admin/indices/refresh.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -[[java-admin-indices-refresh]] -==== Refresh - -The refresh API allows to explicitly refresh one or more index: - -[source,java] --------------------------------------------------- -client.admin().indices().prepareRefresh().get(); <1> -client.admin().indices() - .prepareRefresh("twitter") <2> - .get(); -client.admin().indices() - .prepareRefresh("twitter", "company") <3> - .get(); --------------------------------------------------- -<1> Refresh all indices -<2> Refresh one index -<3> Refresh many indices - diff --git a/docs/java-api/admin/indices/update-settings.asciidoc b/docs/java-api/admin/indices/update-settings.asciidoc deleted file mode 100644 index 9c2cba2adf03b..0000000000000 --- a/docs/java-api/admin/indices/update-settings.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -[[java-admin-indices-update-settings]] -==== Update Indices Settings - -You can change index settings by calling: - -[source,java] --------------------------------------------------- -client.admin().indices().prepareUpdateSettings("twitter") <1> - .setSettings(Settings.builder() <2> - .put("index.number_of_replicas", 0) - ) - .get(); --------------------------------------------------- -<1> Index to update -<2> Settings - diff --git a/docs/java-api/aggregations/bucket.asciidoc b/docs/java-api/aggregations/bucket.asciidoc deleted file mode 100644 index fe2e0ea9be309..0000000000000 --- a/docs/java-api/aggregations/bucket.asciidoc +++ /dev/null @@ -1,33 +0,0 @@ -[[java-aggregations-bucket]] - -include::bucket/global-aggregation.asciidoc[] - -include::bucket/filter-aggregation.asciidoc[] - -include::bucket/filters-aggregation.asciidoc[] - -include::bucket/missing-aggregation.asciidoc[] - -include::bucket/nested-aggregation.asciidoc[] - -include::bucket/reverse-nested-aggregation.asciidoc[] - -include::bucket/children-aggregation.asciidoc[] - -include::bucket/terms-aggregation.asciidoc[] - -include::bucket/significantterms-aggregation.asciidoc[] - -include::bucket/range-aggregation.asciidoc[] - -include::bucket/daterange-aggregation.asciidoc[] - -include::bucket/iprange-aggregation.asciidoc[] - -include::bucket/histogram-aggregation.asciidoc[] - -include::bucket/datehistogram-aggregation.asciidoc[] - -include::bucket/geodistance-aggregation.asciidoc[] - -include::bucket/geohashgrid-aggregation.asciidoc[] diff --git a/docs/java-api/aggregations/bucket/children-aggregation.asciidoc b/docs/java-api/aggregations/bucket/children-aggregation.asciidoc deleted file mode 100644 index f6a23fdafe976..0000000000000 --- a/docs/java-api/aggregations/bucket/children-aggregation.asciidoc +++ /dev/null @@ -1,35 +0,0 @@ -[[java-aggs-bucket-children]] -==== Children Aggregation - -Here is how you can use -{ref}/search-aggregations-bucket-children-aggregation.html[Children Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -AggregationBuilder aggregation = - AggregationBuilders - .children("agg", "reseller"); <1> --------------------------------------------------- -1. `"agg"` is the name of the aggregation and `"reseller"` is the child type - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.join.aggregations.Children; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Children agg = sr.getAggregations().get("agg"); -agg.getDocCount(); // Doc count --------------------------------------------------- diff --git a/docs/java-api/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/java-api/aggregations/bucket/datehistogram-aggregation.asciidoc deleted file mode 100644 index 610262b046c21..0000000000000 --- a/docs/java-api/aggregations/bucket/datehistogram-aggregation.asciidoc +++ /dev/null @@ -1,73 +0,0 @@ -[[java-aggs-bucket-datehistogram]] -==== Date Histogram Aggregation - -Here is how you can use -{ref}/search-aggregations-bucket-datehistogram-aggregation.html[Date Histogram Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -AggregationBuilder aggregation = - AggregationBuilders - .dateHistogram("agg") - .field("dateOfBirth") - .calendarInterval(DateHistogramInterval.YEAR); --------------------------------------------------- - -Or if you want to set an interval of 10 days: - -[source,java] --------------------------------------------------- -AggregationBuilder aggregation = - AggregationBuilders - .dateHistogram("agg") - .field("dateOfBirth") - .fixedInterval(DateHistogramInterval.days(10)); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Histogram agg = sr.getAggregations().get("agg"); - -// For each entry -for (Histogram.Bucket entry : agg.getBuckets()) { - DateTime key = (DateTime) entry.getKey(); // Key - String keyAsString = entry.getKeyAsString(); // Key as String - long docCount = entry.getDocCount(); // Doc count - - logger.info("key [{}], date [{}], doc_count [{}]", keyAsString, key.getYear(), docCount); -} --------------------------------------------------- - -This will basically produce for the first example: - -[source,text] --------------------------------------------------- -key [1942-01-01T00:00:00.000Z], date [1942], doc_count [1] -key [1945-01-01T00:00:00.000Z], date [1945], doc_count [1] -key [1946-01-01T00:00:00.000Z], date [1946], doc_count [1] -... -key [2005-01-01T00:00:00.000Z], date [2005], doc_count [1] -key [2007-01-01T00:00:00.000Z], date [2007], doc_count [2] -key [2008-01-01T00:00:00.000Z], date [2008], doc_count [3] --------------------------------------------------- - -===== Order - -Supports the same order functionality as the <>. diff --git a/docs/java-api/aggregations/bucket/daterange-aggregation.asciidoc b/docs/java-api/aggregations/bucket/daterange-aggregation.asciidoc deleted file mode 100644 index fa8f31e8cd0b7..0000000000000 --- a/docs/java-api/aggregations/bucket/daterange-aggregation.asciidoc +++ /dev/null @@ -1,59 +0,0 @@ -[[java-aggs-bucket-daterange]] -==== Date Range Aggregation - -Here is how you can use -{ref}/search-aggregations-bucket-daterange-aggregation.html[Date Range Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -AggregationBuilder aggregation = - AggregationBuilders - .dateRange("agg") - .field("dateOfBirth") - .format("yyyy") - .addUnboundedTo("1950") // from -infinity to 1950 (excluded) - .addRange("1950", "1960") // from 1950 to 1960 (excluded) - .addUnboundedFrom("1960"); // from 1960 to +infinity --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.range.Range; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Range agg = sr.getAggregations().get("agg"); - -// For each entry -for (Range.Bucket entry : agg.getBuckets()) { - String key = entry.getKeyAsString(); // Date range as key - DateTime fromAsDate = (DateTime) entry.getFrom(); // Date bucket from as a Date - DateTime toAsDate = (DateTime) entry.getTo(); // Date bucket to as a Date - long docCount = entry.getDocCount(); // Doc count - - logger.info("key [{}], from [{}], to [{}], doc_count [{}]", key, fromAsDate, toAsDate, docCount); -} --------------------------------------------------- - -This will basically produce: - -[source,text] --------------------------------------------------- -key [*-1950], from [null], to [1950-01-01T00:00:00.000Z], doc_count [8] -key [1950-1960], from [1950-01-01T00:00:00.000Z], to [1960-01-01T00:00:00.000Z], doc_count [5] -key [1960-*], from [1960-01-01T00:00:00.000Z], to [null], doc_count [37] --------------------------------------------------- - diff --git a/docs/java-api/aggregations/bucket/filter-aggregation.asciidoc b/docs/java-api/aggregations/bucket/filter-aggregation.asciidoc deleted file mode 100644 index 3ffb05202bbef..0000000000000 --- a/docs/java-api/aggregations/bucket/filter-aggregation.asciidoc +++ /dev/null @@ -1,34 +0,0 @@ -[[java-aggs-bucket-filter]] -==== Filter Aggregation - -Here is how you can use -{ref}/search-aggregations-bucket-filter-aggregation.html[Filter Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -AggregationBuilders - .filter("agg", QueryBuilders.termQuery("gender", "male")); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.filter.Filter; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Filter agg = sr.getAggregations().get("agg"); -agg.getDocCount(); // Doc count --------------------------------------------------- diff --git a/docs/java-api/aggregations/bucket/filters-aggregation.asciidoc b/docs/java-api/aggregations/bucket/filters-aggregation.asciidoc deleted file mode 100644 index 0b782304dacc0..0000000000000 --- a/docs/java-api/aggregations/bucket/filters-aggregation.asciidoc +++ /dev/null @@ -1,51 +0,0 @@ -[[java-aggs-bucket-filters]] -==== Filters Aggregation - -Here is how you can use -{ref}/search-aggregations-bucket-filters-aggregation.html[Filters Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -AggregationBuilder aggregation = - AggregationBuilders - .filters("agg", - new FiltersAggregator.KeyedFilter("men", QueryBuilders.termQuery("gender", "male")), - new FiltersAggregator.KeyedFilter("women", QueryBuilders.termQuery("gender", "female"))); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.filters.Filters; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Filters agg = sr.getAggregations().get("agg"); - -// For each entry -for (Filters.Bucket entry : agg.getBuckets()) { - String key = entry.getKeyAsString(); // bucket key - long docCount = entry.getDocCount(); // Doc count - logger.info("key [{}], doc_count [{}]", key, docCount); -} --------------------------------------------------- - -This will basically produce: - -[source,text] --------------------------------------------------- -key [men], doc_count [4982] -key [women], doc_count [5018] --------------------------------------------------- diff --git a/docs/java-api/aggregations/bucket/geodistance-aggregation.asciidoc b/docs/java-api/aggregations/bucket/geodistance-aggregation.asciidoc deleted file mode 100644 index 472c3ac59bf48..0000000000000 --- a/docs/java-api/aggregations/bucket/geodistance-aggregation.asciidoc +++ /dev/null @@ -1,58 +0,0 @@ -[[java-aggs-bucket-geodistance]] -==== Geo Distance Aggregation - -Here is how you can use -{ref}/search-aggregations-bucket-geodistance-aggregation.html[Geo Distance Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -AggregationBuilder aggregation = - AggregationBuilders - .geoDistance("agg", new GeoPoint(48.84237171118314,2.33320027692004)) - .field("address.location") - .unit(DistanceUnit.KILOMETERS) - .addUnboundedTo(3.0) - .addRange(3.0, 10.0) - .addRange(10.0, 500.0); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.range.Range; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Range agg = sr.getAggregations().get("agg"); - -// For each entry -for (Range.Bucket entry : agg.getBuckets()) { - String key = entry.getKeyAsString(); // key as String - Number from = (Number) entry.getFrom(); // bucket from value - Number to = (Number) entry.getTo(); // bucket to value - long docCount = entry.getDocCount(); // Doc count - - logger.info("key [{}], from [{}], to [{}], doc_count [{}]", key, from, to, docCount); -} --------------------------------------------------- - -This will basically produce: - -[source,text] --------------------------------------------------- -key [*-3.0], from [0.0], to [3.0], doc_count [161] -key [3.0-10.0], from [3.0], to [10.0], doc_count [460] -key [10.0-500.0], from [10.0], to [500.0], doc_count [4925] --------------------------------------------------- diff --git a/docs/java-api/aggregations/bucket/geohashgrid-aggregation.asciidoc b/docs/java-api/aggregations/bucket/geohashgrid-aggregation.asciidoc deleted file mode 100644 index 19e3f03349397..0000000000000 --- a/docs/java-api/aggregations/bucket/geohashgrid-aggregation.asciidoc +++ /dev/null @@ -1,57 +0,0 @@ -[[java-aggs-bucket-geohashgrid]] -==== Geo Hash Grid Aggregation - -Here is how you can use -{ref}/search-aggregations-bucket-geohashgrid-aggregation.html[Geo Hash Grid Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -AggregationBuilder aggregation = - AggregationBuilders - .geohashGrid("agg") - .field("address.location") - .precision(4); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGrid; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -GeoHashGrid agg = sr.getAggregations().get("agg"); - -// For each entry -for (GeoHashGrid.Bucket entry : agg.getBuckets()) { - String keyAsString = entry.getKeyAsString(); // key as String - GeoPoint key = (GeoPoint) entry.getKey(); // key as geo point - long docCount = entry.getDocCount(); // Doc count - - logger.info("key [{}], point {}, doc_count [{}]", keyAsString, key, docCount); -} --------------------------------------------------- - -This will basically produce: - -[source,text] --------------------------------------------------- -key [gbqu], point [47.197265625, -1.58203125], doc_count [1282] -key [gbvn], point [50.361328125, -4.04296875], doc_count [1248] -key [u1j0], point [50.712890625, 7.20703125], doc_count [1156] -key [u0j2], point [45.087890625, 7.55859375], doc_count [1138] -... --------------------------------------------------- - diff --git a/docs/java-api/aggregations/bucket/global-aggregation.asciidoc b/docs/java-api/aggregations/bucket/global-aggregation.asciidoc deleted file mode 100644 index e0a731159adf5..0000000000000 --- a/docs/java-api/aggregations/bucket/global-aggregation.asciidoc +++ /dev/null @@ -1,35 +0,0 @@ -[[java-aggs-bucket-global]] -==== Global Aggregation - -Here is how you can use -{ref}/search-aggregations-bucket-global-aggregation.html[Global Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -AggregationBuilders - .global("agg") - .subAggregation(AggregationBuilders.terms("genders").field("gender")); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.global.Global; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Global agg = sr.getAggregations().get("agg"); -agg.getDocCount(); // Doc count --------------------------------------------------- diff --git a/docs/java-api/aggregations/bucket/histogram-aggregation.asciidoc b/docs/java-api/aggregations/bucket/histogram-aggregation.asciidoc deleted file mode 100644 index 59bb555401c5b..0000000000000 --- a/docs/java-api/aggregations/bucket/histogram-aggregation.asciidoc +++ /dev/null @@ -1,48 +0,0 @@ -[[java-aggs-bucket-histogram]] -==== Histogram Aggregation - -Here is how you can use -{ref}/search-aggregations-bucket-histogram-aggregation.html[Histogram Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -AggregationBuilder aggregation = - AggregationBuilders - .histogram("agg") - .field("height") - .interval(1); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Histogram agg = sr.getAggregations().get("agg"); - -// For each entry -for (Histogram.Bucket entry : agg.getBuckets()) { - Number key = (Number) entry.getKey(); // Key - long docCount = entry.getDocCount(); // Doc count - - logger.info("key [{}], doc_count [{}]", key, docCount); -} --------------------------------------------------- - -===== Order - -Supports the same order functionality as the <>. diff --git a/docs/java-api/aggregations/bucket/iprange-aggregation.asciidoc b/docs/java-api/aggregations/bucket/iprange-aggregation.asciidoc deleted file mode 100644 index a2c07df1b26e7..0000000000000 --- a/docs/java-api/aggregations/bucket/iprange-aggregation.asciidoc +++ /dev/null @@ -1,79 +0,0 @@ -[[java-aggs-bucket-iprange]] -==== Ip Range Aggregation - -Here is how you can use -{ref}/search-aggregations-bucket-iprange-aggregation.html[Ip Range Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -AggregationBuilder aggregation = - AggregationBuilders - .ipRange("agg") - .field("ip") - .addUnboundedTo("192.168.1.0") // from -infinity to 192.168.1.0 (excluded) - .addRange("192.168.1.0", "192.168.2.0") // from 192.168.1.0 to 192.168.2.0 (excluded) - .addUnboundedFrom("192.168.2.0"); // from 192.168.2.0 to +infinity --------------------------------------------------- - -Note that you could also use ip masks as ranges: - -[source,java] --------------------------------------------------- -AggregationBuilder aggregation = - AggregationBuilders - .ipRange("agg") - .field("ip") - .addMaskRange("192.168.0.0/32") - .addMaskRange("192.168.0.0/24") - .addMaskRange("192.168.0.0/16"); --------------------------------------------------- - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.range.Range; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Range agg = sr.getAggregations().get("agg"); - -// For each entry -for (Range.Bucket entry : agg.getBuckets()) { - String key = entry.getKeyAsString(); // Ip range as key - String fromAsString = entry.getFromAsString(); // Ip bucket from as a String - String toAsString = entry.getToAsString(); // Ip bucket to as a String - long docCount = entry.getDocCount(); // Doc count - - logger.info("key [{}], from [{}], to [{}], doc_count [{}]", key, fromAsString, toAsString, docCount); -} --------------------------------------------------- - -This will basically produce for the first example: - -[source,text] --------------------------------------------------- -key [*-192.168.1.0], from [null], to [192.168.1.0], doc_count [13] -key [192.168.1.0-192.168.2.0], from [192.168.1.0], to [192.168.2.0], doc_count [14] -key [192.168.2.0-*], from [192.168.2.0], to [null], doc_count [23] --------------------------------------------------- - -And for the second one (using Ip masks): - -[source,text] --------------------------------------------------- -key [192.168.0.0/32], from [192.168.0.0], to [192.168.0.1], doc_count [0] -key [192.168.0.0/24], from [192.168.0.0], to [192.168.1.0], doc_count [13] -key [192.168.0.0/16], from [192.168.0.0], to [192.169.0.0], doc_count [50] --------------------------------------------------- - diff --git a/docs/java-api/aggregations/bucket/missing-aggregation.asciidoc b/docs/java-api/aggregations/bucket/missing-aggregation.asciidoc deleted file mode 100644 index 31d21604dc57a..0000000000000 --- a/docs/java-api/aggregations/bucket/missing-aggregation.asciidoc +++ /dev/null @@ -1,34 +0,0 @@ -[[java-aggs-bucket-missing]] -==== Missing Aggregation - -Here is how you can use -{ref}/search-aggregations-bucket-missing-aggregation.html[Missing Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -AggregationBuilders.missing("agg").field("gender"); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.missing.Missing; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Missing agg = sr.getAggregations().get("agg"); -agg.getDocCount(); // Doc count --------------------------------------------------- - diff --git a/docs/java-api/aggregations/bucket/nested-aggregation.asciidoc b/docs/java-api/aggregations/bucket/nested-aggregation.asciidoc deleted file mode 100644 index b1ebad7a63bfa..0000000000000 --- a/docs/java-api/aggregations/bucket/nested-aggregation.asciidoc +++ /dev/null @@ -1,34 +0,0 @@ -[[java-aggs-bucket-nested]] -==== Nested Aggregation - -Here is how you can use -{ref}/search-aggregations-bucket-nested-aggregation.html[Nested Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -AggregationBuilders - .nested("agg", "resellers"); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.nested.Nested; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Nested agg = sr.getAggregations().get("agg"); -agg.getDocCount(); // Doc count --------------------------------------------------- diff --git a/docs/java-api/aggregations/bucket/range-aggregation.asciidoc b/docs/java-api/aggregations/bucket/range-aggregation.asciidoc deleted file mode 100644 index b30c856ebeada..0000000000000 --- a/docs/java-api/aggregations/bucket/range-aggregation.asciidoc +++ /dev/null @@ -1,58 +0,0 @@ -[[java-aggs-bucket-range]] -==== Range Aggregation - -Here is how you can use -{ref}/search-aggregations-bucket-range-aggregation.html[Range Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -AggregationBuilder aggregation = - AggregationBuilders - .range("agg") - .field("height") - .addUnboundedTo(1.0f) // from -infinity to 1.0 (excluded) - .addRange(1.0f, 1.5f) // from 1.0 to 1.5 (excluded) - .addUnboundedFrom(1.5f); // from 1.5 to +infinity --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.range.Range; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Range agg = sr.getAggregations().get("agg"); - -// For each entry -for (Range.Bucket entry : agg.getBuckets()) { - String key = entry.getKeyAsString(); // Range as key - Number from = (Number) entry.getFrom(); // Bucket from - Number to = (Number) entry.getTo(); // Bucket to - long docCount = entry.getDocCount(); // Doc count - - logger.info("key [{}], from [{}], to [{}], doc_count [{}]", key, from, to, docCount); -} --------------------------------------------------- - -This will basically produce for the first example: - -[source,text] --------------------------------------------------- -key [*-1.0], from [-Infinity], to [1.0], doc_count [9] -key [1.0-1.5], from [1.0], to [1.5], doc_count [21] -key [1.5-*], from [1.5], to [Infinity], doc_count [20] --------------------------------------------------- - diff --git a/docs/java-api/aggregations/bucket/reverse-nested-aggregation.asciidoc b/docs/java-api/aggregations/bucket/reverse-nested-aggregation.asciidoc deleted file mode 100644 index 635b0e8cf77ee..0000000000000 --- a/docs/java-api/aggregations/bucket/reverse-nested-aggregation.asciidoc +++ /dev/null @@ -1,50 +0,0 @@ -[[java-aggs-bucket-reverse-nested]] -==== Reverse Nested Aggregation - -Here is how you can use -{ref}/search-aggregations-bucket-reverse-nested-aggregation.html[Reverse Nested Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -AggregationBuilder aggregation = - AggregationBuilders - .nested("agg", "resellers") - .subAggregation( - AggregationBuilders - .terms("name").field("resellers.name") - .subAggregation( - AggregationBuilders - .reverseNested("reseller_to_product") - ) - ); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.nested.Nested; -import org.elasticsearch.search.aggregations.bucket.nested.ReverseNested; -import org.elasticsearch.search.aggregations.bucket.terms.Terms; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Nested agg = sr.getAggregations().get("agg"); -Terms name = agg.getAggregations().get("name"); -for (Terms.Bucket bucket : name.getBuckets()) { - ReverseNested resellerToProduct = bucket.getAggregations().get("reseller_to_product"); - resellerToProduct.getDocCount(); // Doc count -} --------------------------------------------------- - diff --git a/docs/java-api/aggregations/bucket/significantterms-aggregation.asciidoc b/docs/java-api/aggregations/bucket/significantterms-aggregation.asciidoc deleted file mode 100644 index 4450c324c8209..0000000000000 --- a/docs/java-api/aggregations/bucket/significantterms-aggregation.asciidoc +++ /dev/null @@ -1,47 +0,0 @@ -[[java-aggs-bucket-significantterms]] -==== Significant Terms Aggregation - -Here is how you can use -{ref}/search-aggregations-bucket-significantterms-aggregation.html[Significant Terms Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -AggregationBuilder aggregation = - AggregationBuilders - .significantTerms("significant_countries") - .field("address.country"); - -// Let say you search for men only -SearchResponse sr = client.prepareSearch() - .setQuery(QueryBuilders.termQuery("gender", "male")) - .addAggregation(aggregation) - .get(); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -SignificantTerms agg = sr.getAggregations().get("significant_countries"); - -// For each entry -for (SignificantTerms.Bucket entry : agg.getBuckets()) { - entry.getKey(); // Term - entry.getDocCount(); // Doc count -} --------------------------------------------------- diff --git a/docs/java-api/aggregations/bucket/terms-aggregation.asciidoc b/docs/java-api/aggregations/bucket/terms-aggregation.asciidoc deleted file mode 100644 index db584fd4cedd2..0000000000000 --- a/docs/java-api/aggregations/bucket/terms-aggregation.asciidoc +++ /dev/null @@ -1,97 +0,0 @@ -[[java-aggs-bucket-terms]] -==== Terms Aggregation - -Here is how you can use -{ref}/search-aggregations-bucket-terms-aggregation.html[Terms Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -AggregationBuilders - .terms("genders") - .field("gender"); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.terms.Terms; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Terms genders = sr.getAggregations().get("genders"); - -// For each entry -for (Terms.Bucket entry : genders.getBuckets()) { - entry.getKey(); // Term - entry.getDocCount(); // Doc count -} --------------------------------------------------- - -===== Order - -Import bucket ordering strategy classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.BucketOrder; --------------------------------------------------- - -Ordering the buckets by their `doc_count` in an ascending manner: - -[source,java] --------------------------------------------------- -AggregationBuilders - .terms("genders") - .field("gender") - .order(BucketOrder.count(true)) --------------------------------------------------- - -Ordering the buckets alphabetically by their terms in an ascending manner: - -[source,java] --------------------------------------------------- -AggregationBuilders - .terms("genders") - .field("gender") - .order(BucketOrder.key(true)) --------------------------------------------------- - -Ordering the buckets by single value metrics sub-aggregation (identified by the aggregation name): - -[source,java] --------------------------------------------------- -AggregationBuilders - .terms("genders") - .field("gender") - .order(BucketOrder.aggregation("avg_height", false)) - .subAggregation( - AggregationBuilders.avg("avg_height").field("height") - ) --------------------------------------------------- - -Ordering the buckets by multiple criteria: - -[source,java] --------------------------------------------------- -AggregationBuilders - .terms("genders") - .field("gender") - .order(BucketOrder.compound( // in order of priority: - BucketOrder.aggregation("avg_height", false), // sort by sub-aggregation first - BucketOrder.count(true))) // then bucket count as a tie-breaker - .subAggregation( - AggregationBuilders.avg("avg_height").field("height") - ) --------------------------------------------------- diff --git a/docs/java-api/aggregations/metrics.asciidoc b/docs/java-api/aggregations/metrics.asciidoc deleted file mode 100644 index c9afb4c39d484..0000000000000 --- a/docs/java-api/aggregations/metrics.asciidoc +++ /dev/null @@ -1,27 +0,0 @@ -[[java-aggregations-metrics]] - -include::metrics/min-aggregation.asciidoc[] - -include::metrics/max-aggregation.asciidoc[] - -include::metrics/sum-aggregation.asciidoc[] - -include::metrics/avg-aggregation.asciidoc[] - -include::metrics/stats-aggregation.asciidoc[] - -include::metrics/extendedstats-aggregation.asciidoc[] - -include::metrics/valuecount-aggregation.asciidoc[] - -include::metrics/percentile-aggregation.asciidoc[] - -include::metrics/percentile-rank-aggregation.asciidoc[] - -include::metrics/cardinality-aggregation.asciidoc[] - -include::metrics/geobounds-aggregation.asciidoc[] - -include::metrics/tophits-aggregation.asciidoc[] - -include::metrics/scripted-metric-aggregation.asciidoc[] diff --git a/docs/java-api/aggregations/metrics/avg-aggregation.asciidoc b/docs/java-api/aggregations/metrics/avg-aggregation.asciidoc deleted file mode 100644 index 511cbabf5c848..0000000000000 --- a/docs/java-api/aggregations/metrics/avg-aggregation.asciidoc +++ /dev/null @@ -1,37 +0,0 @@ -[[java-aggs-metrics-avg]] -==== Avg Aggregation - -Here is how you can use -{ref}/search-aggregations-metrics-avg-aggregation.html[Avg Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -AvgAggregationBuilder aggregation = - AggregationBuilders - .avg("agg") - .field("height"); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.metrics.avg.Avg; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Avg agg = sr.getAggregations().get("agg"); -double value = agg.getValue(); --------------------------------------------------- - diff --git a/docs/java-api/aggregations/metrics/cardinality-aggregation.asciidoc b/docs/java-api/aggregations/metrics/cardinality-aggregation.asciidoc deleted file mode 100644 index 8a854e553f4a3..0000000000000 --- a/docs/java-api/aggregations/metrics/cardinality-aggregation.asciidoc +++ /dev/null @@ -1,38 +0,0 @@ -[[java-aggs-metrics-cardinality]] -==== Cardinality Aggregation - -Here is how you can use -{ref}/search-aggregations-metrics-cardinality-aggregation.html[Cardinality Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -CardinalityAggregationBuilder aggregation = - AggregationBuilders - .cardinality("agg") - .field("tags"); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.metrics.cardinality.Cardinality; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Cardinality agg = sr.getAggregations().get("agg"); -long value = agg.getValue(); --------------------------------------------------- - - diff --git a/docs/java-api/aggregations/metrics/extendedstats-aggregation.asciidoc b/docs/java-api/aggregations/metrics/extendedstats-aggregation.asciidoc deleted file mode 100644 index 8f2f12ede6849..0000000000000 --- a/docs/java-api/aggregations/metrics/extendedstats-aggregation.asciidoc +++ /dev/null @@ -1,44 +0,0 @@ -[[java-aggs-metrics-extendedstats]] -==== Extended Stats Aggregation - -Here is how you can use -{ref}/search-aggregations-metrics-extendedstats-aggregation.html[Extended Stats Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -ExtendedStatsAggregationBuilder aggregation = - AggregationBuilders - .extendedStats("agg") - .field("height"); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -ExtendedStats agg = sr.getAggregations().get("agg"); -double min = agg.getMin(); -double max = agg.getMax(); -double avg = agg.getAvg(); -double sum = agg.getSum(); -long count = agg.getCount(); -double stdDeviation = agg.getStdDeviation(); -double sumOfSquares = agg.getSumOfSquares(); -double variance = agg.getVariance(); --------------------------------------------------- - diff --git a/docs/java-api/aggregations/metrics/geobounds-aggregation.asciidoc b/docs/java-api/aggregations/metrics/geobounds-aggregation.asciidoc deleted file mode 100644 index 571a61f12e7cc..0000000000000 --- a/docs/java-api/aggregations/metrics/geobounds-aggregation.asciidoc +++ /dev/null @@ -1,46 +0,0 @@ -[[java-aggs-metrics-geobounds]] -==== Geo Bounds Aggregation - -Here is how you can use -{ref}/search-aggregations-metrics-geobounds-aggregation.html[Geo Bounds Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -GeoBoundsAggregationBuilder aggregation = - GeoBoundsAggregationBuilder - .geoBounds("agg") - .field("address.location") - .wrapLongitude(true); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBounds; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -GeoBounds agg = sr.getAggregations().get("agg"); -GeoPoint bottomRight = agg.bottomRight(); -GeoPoint topLeft = agg.topLeft(); -logger.info("bottomRight {}, topLeft {}", bottomRight, topLeft); --------------------------------------------------- - -This will basically produce: - -[source,text] --------------------------------------------------- -bottomRight [40.70500764381921, 13.952946866893775], topLeft [53.49603022435221, -4.190029308156676] --------------------------------------------------- diff --git a/docs/java-api/aggregations/metrics/max-aggregation.asciidoc b/docs/java-api/aggregations/metrics/max-aggregation.asciidoc deleted file mode 100644 index 9bd393698429b..0000000000000 --- a/docs/java-api/aggregations/metrics/max-aggregation.asciidoc +++ /dev/null @@ -1,37 +0,0 @@ -[[java-aggs-metrics-max]] -==== Max Aggregation - -Here is how you can use -{ref}/search-aggregations-metrics-max-aggregation.html[Max Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -MaxAggregationBuilder aggregation = - AggregationBuilders - .max("agg") - .field("height"); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.metrics.max.Max; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Max agg = sr.getAggregations().get("agg"); -double value = agg.getValue(); --------------------------------------------------- - diff --git a/docs/java-api/aggregations/metrics/min-aggregation.asciidoc b/docs/java-api/aggregations/metrics/min-aggregation.asciidoc deleted file mode 100644 index 0205cae44d8f8..0000000000000 --- a/docs/java-api/aggregations/metrics/min-aggregation.asciidoc +++ /dev/null @@ -1,37 +0,0 @@ -[[java-aggs-metrics-min]] -==== Min Aggregation - -Here is how you can use -{ref}/search-aggregations-metrics-min-aggregation.html[Min Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -MinAggregationBuilder aggregation = - AggregationBuilders - .min("agg") - .field("height"); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.metrics.min.Min; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Min agg = sr.getAggregations().get("agg"); -double value = agg.getValue(); --------------------------------------------------- - diff --git a/docs/java-api/aggregations/metrics/percentile-aggregation.asciidoc b/docs/java-api/aggregations/metrics/percentile-aggregation.asciidoc deleted file mode 100644 index ad54fbf5a46be..0000000000000 --- a/docs/java-api/aggregations/metrics/percentile-aggregation.asciidoc +++ /dev/null @@ -1,68 +0,0 @@ -[[java-aggs-metrics-percentile]] -==== Percentile Aggregation - -Here is how you can use -{ref}/search-aggregations-metrics-percentile-aggregation.html[Percentile Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -PercentilesAggregationBuilder aggregation = - AggregationBuilders - .percentiles("agg") - .field("height"); --------------------------------------------------- - -You can provide your own percentiles instead of using defaults: - -[source,java] --------------------------------------------------- -PercentilesAggregationBuilder aggregation = - AggregationBuilders - .percentiles("agg") - .field("height") - .percentiles(1.0, 5.0, 10.0, 20.0, 30.0, 75.0, 95.0, 99.0); --------------------------------------------------- - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Percentiles agg = sr.getAggregations().get("agg"); -// For each entry -for (Percentile entry : agg) { - double percent = entry.getPercent(); // Percent - double value = entry.getValue(); // Value - - logger.info("percent [{}], value [{}]", percent, value); -} --------------------------------------------------- - - -This will basically produce for the first example: - -[source,text] --------------------------------------------------- -percent [1.0], value [0.814338896154595] -percent [5.0], value [0.8761912455821302] -percent [25.0], value [1.173346540141847] -percent [50.0], value [1.5432023318692198] -percent [75.0], value [1.923915462033674] -percent [95.0], value [2.2273644908535335] -percent [99.0], value [2.284989339108279] --------------------------------------------------- - diff --git a/docs/java-api/aggregations/metrics/percentile-rank-aggregation.asciidoc b/docs/java-api/aggregations/metrics/percentile-rank-aggregation.asciidoc deleted file mode 100644 index a846d59f82029..0000000000000 --- a/docs/java-api/aggregations/metrics/percentile-rank-aggregation.asciidoc +++ /dev/null @@ -1,55 +0,0 @@ -[[java-aggs-metrics-percentile-rank]] -==== Percentile Ranks Aggregation - -Here is how you can use -{ref}/search-aggregations-metrics-percentile-rank-aggregation.html[Percentile Ranks Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -PercentileRanksAggregationBuilder aggregation = - AggregationBuilders - .percentileRanks("agg") - .field("height") - .values(1.24, 1.91, 2.22); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -PercentileRanks agg = sr.getAggregations().get("agg"); -// For each entry -for (Percentile entry : agg) { - double percent = entry.getPercent(); // Percent - double value = entry.getValue(); // Value - - logger.info("percent [{}], value [{}]", percent, value); -} --------------------------------------------------- - - -This will basically produce: - -[source,text] --------------------------------------------------- -percent [29.664353095090945], value [1.24] -percent [73.9335313461868], value [1.91] -percent [94.40095147327283], value [2.22] --------------------------------------------------- - diff --git a/docs/java-api/aggregations/metrics/scripted-metric-aggregation.asciidoc b/docs/java-api/aggregations/metrics/scripted-metric-aggregation.asciidoc deleted file mode 100644 index 5b68fa7be451f..0000000000000 --- a/docs/java-api/aggregations/metrics/scripted-metric-aggregation.asciidoc +++ /dev/null @@ -1,100 +0,0 @@ -[[java-aggs-metrics-scripted-metric]] -==== Scripted Metric Aggregation - -Here is how you can use -{ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Scripted Metric Aggregation] -with Java API. - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -ScriptedMetricAggregationBuilder aggregation = AggregationBuilders - .scriptedMetric("agg") - .initScript(new Script("state.heights = []")) - .mapScript(new Script("state.heights.add(doc.gender.value == 'male' ? doc.height.value : -1.0 * doc.height.value)")); --------------------------------------------------- - -You can also specify a `combine` script which will be executed on each shard: - -[source,java] --------------------------------------------------- -ScriptedMetricAggregationBuilder aggregation = AggregationBuilders - .scriptedMetric("agg") - .initScript(new Script("state.heights = []")) - .mapScript(new Script("state.heights.add(doc.gender.value == 'male' ? doc.height.value : -1.0 * doc.height.value)")) - .combineScript(new Script("double heights_sum = 0.0; for (t in state.heights) { heights_sum += t } return heights_sum")); --------------------------------------------------- - -You can also specify a `reduce` script which will be executed on the node which gets the request: - -[source,java] --------------------------------------------------- -ScriptedMetricAggregationBuilder aggregation = AggregationBuilders - .scriptedMetric("agg") - .initScript(new Script("state.heights = []")) - .mapScript(new Script("state.heights.add(doc.gender.value == 'male' ? doc.height.value : -1.0 * doc.height.value)")) - .combineScript(new Script("double heights_sum = 0.0; for (t in state.heights) { heights_sum += t } return heights_sum")) - .reduceScript(new Script("double heights_sum = 0.0; for (a in states) { heights_sum += a } return heights_sum")); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.tophits.TopHits; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -ScriptedMetric agg = sr.getAggregations().get("agg"); -Object scriptedResult = agg.aggregation(); -logger.info("scriptedResult [{}]", scriptedResult); --------------------------------------------------- - -Note that the result depends on the script you built. -For the first example, this will basically produce: - -[source,text] --------------------------------------------------- -scriptedResult object [ArrayList] -scriptedResult [ { -"heights" : [ 1.122218480146643, -1.8148918111233887, -1.7626731575142909, ... ] -}, { -"heights" : [ -0.8046067304119863, -2.0785486707864553, -1.9183567430207953, ... ] -}, { -"heights" : [ 2.092635728868694, 1.5697545960886536, 1.8826954461968808, ... ] -}, { -"heights" : [ -2.1863201099468403, 1.6328549117346856, -1.7078288405893842, ... ] -}, { -"heights" : [ 1.6043904836424177, -2.0736538674414025, 0.9898266674373053, ... ] -} ] --------------------------------------------------- - -The second example will produce: - -[source,text] --------------------------------------------------- -scriptedResult object [ArrayList] -scriptedResult [-41.279615707402876, - -60.88007362339038, - 38.823270659734256, - 14.840192739445632, - 11.300902755741326] --------------------------------------------------- - -The last example will produce: - -[source,text] --------------------------------------------------- -scriptedResult object [Double] -scriptedResult [2.171917696507009] --------------------------------------------------- - diff --git a/docs/java-api/aggregations/metrics/stats-aggregation.asciidoc b/docs/java-api/aggregations/metrics/stats-aggregation.asciidoc deleted file mode 100644 index 260d9c01cb944..0000000000000 --- a/docs/java-api/aggregations/metrics/stats-aggregation.asciidoc +++ /dev/null @@ -1,41 +0,0 @@ -[[java-aggs-metrics-stats]] -==== Stats Aggregation - -Here is how you can use -{ref}/search-aggregations-metrics-stats-aggregation.html[Stats Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -StatsAggregationBuilder aggregation = - AggregationBuilders - .stats("agg") - .field("height"); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.metrics.stats.Stats; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Stats agg = sr.getAggregations().get("agg"); -double min = agg.getMin(); -double max = agg.getMax(); -double avg = agg.getAvg(); -double sum = agg.getSum(); -long count = agg.getCount(); --------------------------------------------------- - diff --git a/docs/java-api/aggregations/metrics/sum-aggregation.asciidoc b/docs/java-api/aggregations/metrics/sum-aggregation.asciidoc deleted file mode 100644 index 453616916d755..0000000000000 --- a/docs/java-api/aggregations/metrics/sum-aggregation.asciidoc +++ /dev/null @@ -1,37 +0,0 @@ -[[java-aggs-metrics-sum]] -==== Sum Aggregation - -Here is how you can use -{ref}/search-aggregations-metrics-sum-aggregation.html[Sum Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -SumAggregationBuilder aggregation = - AggregationBuilders - .sum("agg") - .field("height"); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.metrics.sum.Sum; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Sum agg = sr.getAggregations().get("agg"); -double value = agg.getValue(); --------------------------------------------------- - diff --git a/docs/java-api/aggregations/metrics/tophits-aggregation.asciidoc b/docs/java-api/aggregations/metrics/tophits-aggregation.asciidoc deleted file mode 100644 index 2473b4b89d77b..0000000000000 --- a/docs/java-api/aggregations/metrics/tophits-aggregation.asciidoc +++ /dev/null @@ -1,79 +0,0 @@ -[[java-aggs-metrics-tophits]] -==== Top Hits Aggregation - -Here is how you can use -{ref}/search-aggregations-metrics-top-hits-aggregation.html[Top Hits Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -AggregationBuilder aggregation = - AggregationBuilders - .terms("agg").field("gender") - .subAggregation( - AggregationBuilders.topHits("top") - ); --------------------------------------------------- - -You can use most of the options available for standard search such as `from`, `size`, `sort`, `highlight`, `explain`... - -[source,java] --------------------------------------------------- -AggregationBuilder aggregation = - AggregationBuilders - .terms("agg").field("gender") - .subAggregation( - AggregationBuilders.topHits("top") - .explain(true) - .size(1) - .from(10) - ); --------------------------------------------------- - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.tophits.TopHits; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -Terms agg = sr.getAggregations().get("agg"); - -// For each entry -for (Terms.Bucket entry : agg.getBuckets()) { - String key = entry.getKey(); // bucket key - long docCount = entry.getDocCount(); // Doc count - logger.info("key [{}], doc_count [{}]", key, docCount); - - // We ask for top_hits for each bucket - TopHits topHits = entry.getAggregations().get("top"); - for (SearchHit hit : topHits.getHits().getHits()) { - logger.info(" -> id [{}], _source [{}]", hit.getId(), hit.getSourceAsString()); - } -} --------------------------------------------------- - -This will basically produce for the first example: - -[source,text] --------------------------------------------------- -key [male], doc_count [5107] - -> id [AUnzSZze9k7PKXtq04x2], _source [{"gender":"male",...}] - -> id [AUnzSZzj9k7PKXtq04x4], _source [{"gender":"male",...}] - -> id [AUnzSZzl9k7PKXtq04x5], _source [{"gender":"male",...}] -key [female], doc_count [4893] - -> id [AUnzSZzM9k7PKXtq04xy], _source [{"gender":"female",...}] - -> id [AUnzSZzp9k7PKXtq04x8], _source [{"gender":"female",...}] - -> id [AUnzSZ0W9k7PKXtq04yS], _source [{"gender":"female",...}] --------------------------------------------------- diff --git a/docs/java-api/aggregations/metrics/valuecount-aggregation.asciidoc b/docs/java-api/aggregations/metrics/valuecount-aggregation.asciidoc deleted file mode 100644 index b180d22af33cd..0000000000000 --- a/docs/java-api/aggregations/metrics/valuecount-aggregation.asciidoc +++ /dev/null @@ -1,37 +0,0 @@ -[[java-aggs-metrics-valuecount]] -==== Value Count Aggregation - -Here is how you can use -{ref}/search-aggregations-metrics-valuecount-aggregation.html[Value Count Aggregation] -with Java API. - - -===== Prepare aggregation request - -Here is an example on how to create the aggregation request: - -[source,java] --------------------------------------------------- -ValueCountAggregationBuilder aggregation = - AggregationBuilders - .count("agg") - .field("height"); --------------------------------------------------- - - -===== Use aggregation response - -Import Aggregation definition classes: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCount; --------------------------------------------------- - -[source,java] --------------------------------------------------- -// sr is here your SearchResponse object -ValueCount agg = sr.getAggregations().get("agg"); -long value = agg.getValue(); --------------------------------------------------- - diff --git a/docs/java-api/aggs.asciidoc b/docs/java-api/aggs.asciidoc deleted file mode 100644 index c2e09b4901e87..0000000000000 --- a/docs/java-api/aggs.asciidoc +++ /dev/null @@ -1,63 +0,0 @@ -[[java-aggs]] -== Aggregations - -Elasticsearch provides a full Java API to play with aggregations. See the -{ref}/search-aggregations.html[Aggregations guide]. - -Use the factory for aggregation builders (`AggregationBuilders`) and add each aggregation -you want to compute when querying and add it to your search request: - -[source,java] --------------------------------------------------- -SearchResponse sr = node.client().prepareSearch() - .setQuery( /* your query */ ) - .addAggregation( /* add an aggregation */ ) - .execute().actionGet(); --------------------------------------------------- - -Note that you can add more than one aggregation. See -{ref}/search-search.html[Search Java API] for details. - -To build aggregation requests, use `AggregationBuilders` helpers. Just import them -in your class: - -[source,java] --------------------------------------------------- -import org.elasticsearch.search.aggregations.AggregationBuilders; --------------------------------------------------- - -=== Structuring aggregations - -As explained in the -{ref}/search-aggregations.html[Aggregations guide], you can define -sub aggregations inside an aggregation. - -An aggregation could be a metrics aggregation or a bucket aggregation. - -For example, here is a 3 levels aggregation composed of: - -* Terms aggregation (bucket) -* Date Histogram aggregation (bucket) -* Average aggregation (metric) - -[source,java] --------------------------------------------------- -SearchResponse sr = node.client().prepareSearch() - .addAggregation( - AggregationBuilders.terms("by_country").field("country") - .subAggregation(AggregationBuilders.dateHistogram("by_year") - .field("dateOfBirth") - .calendarInterval(DateHistogramInterval.YEAR) - .subAggregation(AggregationBuilders.avg("avg_children").field("children")) - ) - ) - .execute().actionGet(); --------------------------------------------------- - -=== Metrics aggregations - -include::aggregations/metrics.asciidoc[] - -=== Bucket aggregations - -include::aggregations/bucket.asciidoc[] diff --git a/docs/java-api/client.asciidoc b/docs/java-api/client.asciidoc deleted file mode 100644 index 811d7c398d940..0000000000000 --- a/docs/java-api/client.asciidoc +++ /dev/null @@ -1,110 +0,0 @@ -[[client]] -== Client - -You can use the *Java client* in multiple ways: - -* Perform standard <>, <>, - <> and <> operations on an - existing cluster -* Perform administrative tasks on a running cluster - -Obtaining an Elasticsearch `Client` is simple. The most common way to -get a client is by creating a <> -that connects to a cluster. - -[IMPORTANT] -============================== - -The client must have the same major version (e.g. `2.x`, or `5.x`) as the -nodes in the cluster. Clients may connect to clusters which have a different -minor version (e.g. `2.3.x`) but it is possible that new functionality may not -be supported. Ideally, the client should have the same version as the -cluster. - -============================== - -[[transport-client]] -=== Transport Client - -deprecated[7.0.0, The `TransportClient` is deprecated in favour of the {java-rest}/java-rest-high.html[Java High Level REST Client] and will be removed in Elasticsearch 8.0. The {java-rest}/java-rest-high-level-migration.html[migration guide] describes all the steps needed to migrate.] - -The `TransportClient` connects remotely to an Elasticsearch cluster -using the transport module. It does not join the cluster, but simply -gets one or more initial transport addresses and communicates with them -in round robin fashion on each action (though most actions will probably -be "two hop" operations). - -[source,java] --------------------------------------------------- -// on startup - -TransportClient client = new PreBuiltTransportClient(Settings.EMPTY) - .addTransportAddress(new TransportAddress(InetAddress.getByName("host1"), 9300)) - .addTransportAddress(new TransportAddress(InetAddress.getByName("host2"), 9300)); - -// on shutdown - -client.close(); --------------------------------------------------- - -Note that you have to set the cluster name if you use one different than -"elasticsearch": - -[source,java] --------------------------------------------------- -Settings settings = Settings.builder() - .put("cluster.name", "myClusterName").build(); -TransportClient client = new PreBuiltTransportClient(settings); -//Add transport addresses and do something with the client... --------------------------------------------------- - -The Transport client comes with a cluster sniffing feature which -allows it to dynamically add new hosts and remove old ones. -When sniffing is enabled, the transport client will connect to the nodes in its -internal node list, which is built via calls to `addTransportAddress`. -After this, the client will call the internal cluster state API on those nodes -to discover available data nodes. The internal node list of the client will -be replaced with those data nodes only. This list is refreshed every five seconds by default. -Note that the IP addresses the sniffer connects to are the ones declared as the 'publish' -address in those node's Elasticsearch config. - -Keep in mind that the list might possibly not include the original node it connected to -if that node is not a data node. If, for instance, you initially connect to a -master node, after sniffing, no further requests will go to that master node, -but rather to any data nodes instead. The reason the transport client excludes non-data -nodes is to avoid sending search traffic to master only nodes. - -In order to enable sniffing, set `client.transport.sniff` to `true`: - -[source,java] --------------------------------------------------- -Settings settings = Settings.builder() - .put("client.transport.sniff", true).build(); -TransportClient client = new PreBuiltTransportClient(settings); --------------------------------------------------- - -Other transport client level settings include: - -[cols="<,<",options="header",] -|======================================================================= -|Parameter |Description -|`client.transport.ignore_cluster_name` |Set to `true` to ignore cluster -name validation of connected nodes. (since 0.19.4) - -|`client.transport.ping_timeout` |The time to wait for a ping response -from a node. Defaults to `5s`. - -|`client.transport.nodes_sampler_interval` |How often to sample / ping -the nodes listed and connected. Defaults to `5s`. -|======================================================================= - - -[[client-connected-to-client-node]] -=== Connecting a Client to a Coordinating Only Node - -You can start locally a {ref}/modules-node.html#coordinating-only-node[Coordinating Only Node] -and then simply create a <> in your -application which connects to this Coordinating Only Node. - -This way, the coordinating only node will be able to load whatever plugin you -need (think about discovery plugins for example). diff --git a/docs/java-api/docs.asciidoc b/docs/java-api/docs.asciidoc deleted file mode 100644 index 181c5d8e0bd99..0000000000000 --- a/docs/java-api/docs.asciidoc +++ /dev/null @@ -1,36 +0,0 @@ -[[java-docs]] -== Document APIs - -This section describes the following CRUD APIs: - -.Single document APIs -* <> -* <> -* <> -* <> - -.Multi-document APIs -* <> -* <> -* <> -* <> -* <> - -NOTE: All CRUD APIs are single-index APIs. The `index` parameter accepts a single -index name, or an `alias` which points to a single index. - -include::docs/index_.asciidoc[] - -include::docs/get.asciidoc[] - -include::docs/delete.asciidoc[] - -include::docs/update.asciidoc[] - -include::docs/multi-get.asciidoc[] - -include::docs/bulk.asciidoc[] - -include::docs/update-by-query.asciidoc[] - -include::docs/reindex.asciidoc[] \ No newline at end of file diff --git a/docs/java-api/docs/bulk.asciidoc b/docs/java-api/docs/bulk.asciidoc deleted file mode 100644 index 1c2882d9c07e7..0000000000000 --- a/docs/java-api/docs/bulk.asciidoc +++ /dev/null @@ -1,190 +0,0 @@ -[[java-docs-bulk]] -=== Bulk API - -The bulk API allows one to index and delete several documents in a -single request. Here is a sample usage: - -[source,java] --------------------------------------------------- -import static org.elasticsearch.common.xcontent.XContentFactory.*; - -BulkRequestBuilder bulkRequest = client.prepareBulk(); - -// either use client#prepare, or use Requests# to directly build index/delete requests -bulkRequest.add(client.prepareIndex("twitter", "_doc", "1") - .setSource(jsonBuilder() - .startObject() - .field("user", "kimchy") - .field("postDate", new Date()) - .field("message", "trying out Elasticsearch") - .endObject() - ) - ); - -bulkRequest.add(client.prepareIndex("twitter", "_doc", "2") - .setSource(jsonBuilder() - .startObject() - .field("user", "kimchy") - .field("postDate", new Date()) - .field("message", "another post") - .endObject() - ) - ); - -BulkResponse bulkResponse = bulkRequest.get(); -if (bulkResponse.hasFailures()) { - // process failures by iterating through each bulk response item -} --------------------------------------------------- - -[[java-docs-bulk-processor]] -=== Using Bulk Processor - -The `BulkProcessor` class offers a simple interface to flush bulk operations automatically based on the number or size -of requests, or after a given period. - -To use it, first create a `BulkProcessor` instance: - -[source,java] --------------------------------------------------- -import org.elasticsearch.action.bulk.BackoffPolicy; -import org.elasticsearch.action.bulk.BulkProcessor; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; - -BulkProcessor bulkProcessor = BulkProcessor.builder( - client, <1> - new BulkProcessor.Listener() { - @Override - public void beforeBulk(long executionId, - BulkRequest request) { ... } <2> - - @Override - public void afterBulk(long executionId, - BulkRequest request, - BulkResponse response) { ... } <3> - - @Override - public void afterBulk(long executionId, - BulkRequest request, - Throwable failure) { ... } <4> - }) - .setBulkActions(10000) <5> - .setBulkSize(new ByteSizeValue(5, ByteSizeUnit.MB)) <6> - .setFlushInterval(TimeValue.timeValueSeconds(5)) <7> - .setConcurrentRequests(1) <8> - .setBackoffPolicy( - BackoffPolicy.exponentialBackoff(TimeValue.timeValueMillis(100), 3)) <9> - .build(); --------------------------------------------------- -<1> Add your Elasticsearch client -<2> This method is called just before bulk is executed. You can for example see the numberOfActions with - `request.numberOfActions()` -<3> This method is called after bulk execution. You can for example check if there was some failing requests - with `response.hasFailures()` -<4> This method is called when the bulk failed and raised a `Throwable` -<5> We want to execute the bulk every 10 000 requests -<6> We want to flush the bulk every 5mb -<7> We want to flush the bulk every 5 seconds whatever the number of requests -<8> Set the number of concurrent requests. A value of 0 means that only a single request will be allowed to be - executed. A value of 1 means 1 concurrent request is allowed to be executed while accumulating new bulk requests. -<9> Set a custom backoff policy which will initially wait for 100ms, increase exponentially and retries up to three - times. A retry is attempted whenever one or more bulk item requests have failed with an `EsRejectedExecutionException` - which indicates that there were too little compute resources available for processing the request. To disable backoff, - pass `BackoffPolicy.noBackoff()`. - -By default, `BulkProcessor`: - -* sets bulkActions to `1000` -* sets bulkSize to `5mb` -* does not set flushInterval -* sets concurrentRequests to 1, which means an asynchronous execution of the flush operation. -* sets backoffPolicy to an exponential backoff with 8 retries and a start delay of 50ms. The total wait time is roughly 5.1 seconds. - -[[java-docs-bulk-processor-requests]] -==== Add requests - -Then you can simply add your requests to the `BulkProcessor`: - -[source,java] --------------------------------------------------- -bulkProcessor.add(new IndexRequest("twitter", "_doc", "1").source(/* your doc here */)); -bulkProcessor.add(new DeleteRequest("twitter", "_doc", "2")); --------------------------------------------------- - -[[java-docs-bulk-processor-close]] -==== Closing the Bulk Processor - -When all documents are loaded to the `BulkProcessor` it can be closed by using `awaitClose` or `close` methods: - -[source,java] --------------------------------------------------- -bulkProcessor.awaitClose(10, TimeUnit.MINUTES); --------------------------------------------------- - -or - -[source,java] --------------------------------------------------- -bulkProcessor.close(); --------------------------------------------------- - -Both methods flush any remaining documents and disable all other scheduled flushes if they were scheduled by setting -`flushInterval`. If concurrent requests were enabled the `awaitClose` method waits for up to the specified timeout for -all bulk requests to complete then returns `true`, if the specified waiting time elapses before all bulk requests complete, -`false` is returned. The `close` method doesn't wait for any remaining bulk requests to complete and exits immediately. - -[[java-docs-bulk-processor-tests]] -==== Using Bulk Processor in tests - -If you are running tests with Elasticsearch and are using the `BulkProcessor` to populate your dataset -you should better set the number of concurrent requests to `0` so the flush operation of the bulk will be executed -in a synchronous manner: - -[source,java] --------------------------------------------------- -BulkProcessor bulkProcessor = BulkProcessor.builder(client, new BulkProcessor.Listener() { /* Listener methods */ }) - .setBulkActions(10000) - .setConcurrentRequests(0) - .build(); - -// Add your requests -bulkProcessor.add(/* Your requests */); - -// Flush any remaining requests -bulkProcessor.flush(); - -// Or close the bulkProcessor if you don't need it anymore -bulkProcessor.close(); - -// Refresh your indices -client.admin().indices().prepareRefresh().get(); - -// Now you can start searching! -client.prepareSearch().get(); --------------------------------------------------- - - -[[java-docs-bulk-global-parameters]] -==== Global Parameters - -Global parameters can be specified on the BulkRequest as well as BulkProcessor, similar to the REST API. These global - parameters serve as defaults and can be overridden by local parameters specified on each sub request. Some parameters - have to be set before any sub request is added - index, type - and you have to specify them during BulkRequest or - BulkProcessor creation. Some are optional - pipeline, routing - and can be specified at any point before the bulk is sent. - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{hlrc-tests}/BulkProcessorIT.java[bulk-processor-mix-parameters] --------------------------------------------------- -<1> global parameters from the BulkRequest will be applied on a sub request -<2> local pipeline parameter on a sub request will override global parameters from BulkRequest - - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{hlrc-tests}/BulkRequestWithGlobalParametersIT.java[bulk-request-mix-pipeline] --------------------------------------------------- -<1> local pipeline parameter on a sub request will override global pipeline from the BulkRequest -<2> global parameter from the BulkRequest will be applied on a sub request diff --git a/docs/java-api/docs/delete.asciidoc b/docs/java-api/docs/delete.asciidoc deleted file mode 100644 index 004edc84b3d65..0000000000000 --- a/docs/java-api/docs/delete.asciidoc +++ /dev/null @@ -1,42 +0,0 @@ -[[java-docs-delete]] -=== Delete API - -The delete API allows one to delete a typed JSON document from a specific -index based on its id. The following example deletes the JSON document -from an index called twitter, under a type called `_doc`, with id valued -1: - -[source,java] --------------------------------------------------- -DeleteResponse response = client.prepareDelete("twitter", "_doc", "1").get(); --------------------------------------------------- - -For more information on the delete operation, check out the -{ref}/docs-delete.html[delete API] docs. - -[[java-docs-delete-by-query]] -=== Delete By Query API - -The delete by query API allows one to delete a given set of documents based on -the result of a query: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{client-reindex-tests}/ReindexDocumentationIT.java[delete-by-query-sync] --------------------------------------------------- -<1> query -<2> index -<3> execute the operation -<4> number of deleted documents - -As it can be a long running operation, if you wish to do it asynchronously, you can call `execute` instead of `get` -and provide a listener like: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{client-reindex-tests}/ReindexDocumentationIT.java[delete-by-query-async] --------------------------------------------------- -<1> query -<2> index -<3> listener -<4> number of deleted documents diff --git a/docs/java-api/docs/get.asciidoc b/docs/java-api/docs/get.asciidoc deleted file mode 100644 index ae03eb971004f..0000000000000 --- a/docs/java-api/docs/get.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -[[java-docs-get]] -=== Get API - -The get API allows to get a typed JSON document from the index based on -its id. The following example gets a JSON document from an index called -twitter, under a type called `_doc``, with id valued 1: - -[source,java] --------------------------------------------------- -GetResponse response = client.prepareGet("twitter", "_doc", "1").get(); --------------------------------------------------- - -For more information on the get operation, check out the REST -{ref}/docs-get.html[get] docs. diff --git a/docs/java-api/docs/index_.asciidoc b/docs/java-api/docs/index_.asciidoc deleted file mode 100644 index 80d187d5a77f3..0000000000000 --- a/docs/java-api/docs/index_.asciidoc +++ /dev/null @@ -1,167 +0,0 @@ -[[java-docs-index]] -=== Index API - -The index API allows one to index a typed JSON document into a specific -index and make it searchable. - - -[[java-docs-index-generate]] -==== Generate JSON document - -There are several different ways of generating a JSON document: - -* Manually (aka do it yourself) using native `byte[]` or as a `String` - -* Using a `Map` that will be automatically converted to its JSON -equivalent - -* Using a third party library to serialize your beans such as -https://github.com/FasterXML/jackson[Jackson] - -* Using built-in helpers XContentFactory.jsonBuilder() - -Internally, each type is converted to `byte[]` (so a String is converted -to a `byte[]`). Therefore, if the object is in this form already, then -use it. The `jsonBuilder` is highly optimized JSON generator that -directly constructs a `byte[]`. - - -[[java-docs-index-generate-diy]] -===== Do It Yourself - -Nothing really difficult here but note that you will have to encode -dates according to the -{ref}/mapping-date-format.html[Date Format]. - -[source,java] --------------------------------------------------- -String json = "{" + - "\"user\":\"kimchy\"," + - "\"postDate\":\"2013-01-30\"," + - "\"message\":\"trying out Elasticsearch\"" + - "}"; --------------------------------------------------- - - -[[java-docs-index-generate-using-map]] -===== Using Map - -Map is a key:values pair collection. It represents a JSON structure: - -[source,java] --------------------------------------------------- -Map json = new HashMap(); -json.put("user","kimchy"); -json.put("postDate",new Date()); -json.put("message","trying out Elasticsearch"); --------------------------------------------------- - - -[[java-docs-index-generate-beans]] -===== Serialize your beans - -You can use https://github.com/FasterXML/jackson[Jackson] to serialize -your beans to JSON. Please add http://search.maven.org/#search%7Cga%7C1%7Cjackson-databind[Jackson Databind] - to your project. Then you can use `ObjectMapper` to serialize your beans: - -[source,java] --------------------------------------------------- -import com.fasterxml.jackson.databind.*; - -// instance a json mapper -ObjectMapper mapper = new ObjectMapper(); // create once, reuse - -// generate json -byte[] json = mapper.writeValueAsBytes(yourbeaninstance); --------------------------------------------------- - - -[[java-docs-index-generate-helpers]] -===== Use Elasticsearch helpers - -Elasticsearch provides built-in helpers to generate JSON content. - -[source,java] --------------------------------------------------- -import static org.elasticsearch.common.xcontent.XContentFactory.*; - -XContentBuilder builder = jsonBuilder() - .startObject() - .field("user", "kimchy") - .field("postDate", new Date()) - .field("message", "trying out Elasticsearch") - .endObject() --------------------------------------------------- - -Note that you can also add arrays with `startArray(String)` and -`endArray()` methods. By the way, the `field` method + - accepts many object types. You can directly pass numbers, dates and even -other XContentBuilder objects. - -If you need to see the generated JSON content, you can use the -`Strings.toString()` method. - -[source,java] --------------------------------------------------- -import org.elasticsearch.common.Strings; - -String json = Strings.toString(builder); --------------------------------------------------- - - -[[java-docs-index-doc]] -==== Index document - -The following example indexes a JSON document into an index called -twitter, under a type called `_doc``, with id valued 1: - -[source,java] --------------------------------------------------- -import static org.elasticsearch.common.xcontent.XContentFactory.*; - -IndexResponse response = client.prepareIndex("twitter", "_doc", "1") - .setSource(jsonBuilder() - .startObject() - .field("user", "kimchy") - .field("postDate", new Date()) - .field("message", "trying out Elasticsearch") - .endObject() - ) - .get(); --------------------------------------------------- - -Note that you can also index your documents as JSON String and that you -don't have to give an ID: - -[source,java] --------------------------------------------------- -String json = "{" + - "\"user\":\"kimchy\"," + - "\"postDate\":\"2013-01-30\"," + - "\"message\":\"trying out Elasticsearch\"" + - "}"; - -IndexResponse response = client.prepareIndex("twitter", "_doc") -       .setSource(json, XContentType.JSON) - .get(); --------------------------------------------------- - -`IndexResponse` object will give you a report: - -[source,java] --------------------------------------------------- -// Index name -String _index = response.getIndex(); -// Type name -String _type = response.getType(); -// Document ID (generated or not) -String _id = response.getId(); -// Version (if it's the first time you index this document, you will get: 1) -long _version = response.getVersion(); -// status has stored current instance statement. -RestStatus status = response.status(); --------------------------------------------------- - -For more information on the index operation, check out the REST -{ref}/docs-index_.html[index] docs. - diff --git a/docs/java-api/docs/multi-get.asciidoc b/docs/java-api/docs/multi-get.asciidoc deleted file mode 100644 index 8ed2bede2927c..0000000000000 --- a/docs/java-api/docs/multi-get.asciidoc +++ /dev/null @@ -1,30 +0,0 @@ -[[java-docs-multi-get]] -=== Multi Get API - -The multi get API allows to get a list of documents based on their `index` and `id`: - -[source,java] --------------------------------------------------- -MultiGetResponse multiGetItemResponses = client.prepareMultiGet() - .add("twitter", "_doc", "1") <1> - .add("twitter", "_doc", "2", "3", "4") <2> - .add("another", "_doc", "foo") <3> - .get(); - -for (MultiGetItemResponse itemResponse : multiGetItemResponses) { <4> - GetResponse response = itemResponse.getResponse(); - if (response.isExists()) { <5> - String json = response.getSourceAsString(); <6> - } -} --------------------------------------------------- -<1> get by a single id -<2> or by a list of ids for the same index -<3> you can also get from another index -<4> iterate over the result set -<5> you can check if the document exists -<6> access to the `_source` field - -For more information on the multi get operation, check out the REST -{ref}/docs-multi-get.html[multi get] docs. - diff --git a/docs/java-api/docs/reindex.asciidoc b/docs/java-api/docs/reindex.asciidoc deleted file mode 100644 index 842e763f74d71..0000000000000 --- a/docs/java-api/docs/reindex.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[java-docs-reindex]] -=== Reindex API - -See {ref}/docs-reindex.html[reindex API]. - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{client-reindex-tests}/ReindexDocumentationIT.java[reindex1] --------------------------------------------------- -<1> Optionally a query can provided to filter what documents should be - re-indexed from the source to the target index. diff --git a/docs/java-api/docs/update-by-query.asciidoc b/docs/java-api/docs/update-by-query.asciidoc deleted file mode 100644 index ef58d3754276e..0000000000000 --- a/docs/java-api/docs/update-by-query.asciidoc +++ /dev/null @@ -1,166 +0,0 @@ -[[java-docs-update-by-query]] -=== Update By Query API - -The simplest usage of `updateByQuery` updates each -document in an index without changing the source. This usage enables -picking up a new property or another online mapping change. - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{client-reindex-tests}/ReindexDocumentationIT.java[update-by-query] --------------------------------------------------- - -Calls to the `updateByQuery` API start by getting a snapshot of the index, indexing -any documents found using the `internal` versioning. - -NOTE: Version conflicts happen when a document changes between the time of the -snapshot and the time the index request processes. - -When the versions match, `updateByQuery` updates the document -and increments the version number. - -All update and query failures cause `updateByQuery` to abort. These failures are -available from the `BulkByScrollResponse#getIndexingFailures` method. Any -successful updates remain and are not rolled back. While the first failure -causes the abort, the response contains all of the failures generated by the -failed bulk request. - -To prevent version conflicts from causing `updateByQuery` to abort, set -`abortOnVersionConflict(false)`. The first example does this because it is -trying to pick up an online mapping change and a version conflict means that -the conflicting document was updated between the start of the `updateByQuery` -and the time when it attempted to update the document. This is fine because -that update will have picked up the online mapping update. - -The `UpdateByQueryRequestBuilder` API supports filtering the updated documents, -limiting the total number of documents to update, and updating documents -with a script: - - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{client-reindex-tests}/ReindexDocumentationIT.java[update-by-query-filter] --------------------------------------------------- - -`UpdateByQueryRequestBuilder` also enables direct access to the query used -to select the documents. You can use this access to change the default scroll size or -otherwise modify the request for matching documents. - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{client-reindex-tests}/ReindexDocumentationIT.java[update-by-query-size] --------------------------------------------------- - -You can also combine `size` with sorting to limit the documents updated: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{client-reindex-tests}/ReindexDocumentationIT.java[update-by-query-sort] --------------------------------------------------- - -In addition to changing the `_source` field for the document, you can use a -script to change the action, similar to the Update API: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{client-reindex-tests}/ReindexDocumentationIT.java[update-by-query-script] --------------------------------------------------- - -As in the <>, you can set the value of `ctx.op` to change the -operation that executes: - -`noop`:: - -Set `ctx.op = "noop"` if your script doesn't make any -changes. The `updateByQuery` operation then omits that document from the updates. -This behavior increments the `noop` counter in the response body. - -`delete`:: - -Set `ctx.op = "delete"` if your script decides that the document must be -deleted. The deletion will be reported in the `deleted` counter in the -response body. - -Setting `ctx.op` to any other value generates an error. Setting any -other field in `ctx` generates an error. - -This API doesn't allow you to move the documents it touches, just modify their -source. This is intentional! We've made no provisions for removing the document -from its original location. - -You can also perform these operations on multiple indices at once, similar to the search API: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{client-reindex-tests}/ReindexDocumentationIT.java[update-by-query-multi-index] --------------------------------------------------- - -If you provide a `routing` value then the process copies the routing value to the scroll query, -limiting the process to the shards that match that routing value: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{client-reindex-tests}/ReindexDocumentationIT.java[update-by-query-routing] --------------------------------------------------- - -`updateByQuery` can also use the ingest node by -specifying a `pipeline` like this: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{client-reindex-tests}/ReindexDocumentationIT.java[update-by-query-pipeline] --------------------------------------------------- - -[float] -[[java-docs-update-by-query-task-api]] -=== Works with the Task API - -You can fetch the status of all running update-by-query requests with the Task API: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{client-reindex-tests}/ReindexDocumentationIT.java[update-by-query-list-tasks] --------------------------------------------------- - -With the `TaskId` shown above you can look up the task directly: - -// provide API Example -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{client-reindex-tests}/ReindexDocumentationIT.java[update-by-query-get-task] --------------------------------------------------- - -[float] -[[java-docs-update-by-query-cancel-task-api]] -=== Works with the Cancel Task API - -Any Update By Query can be canceled using the Task Cancel API: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{client-reindex-tests}/ReindexDocumentationIT.java[update-by-query-cancel-task] --------------------------------------------------- - -Use the `list tasks` API to find the value of `taskId`. - -Cancelling a request is typically a very fast process but can take up to a few seconds. -The task status API continues to list the task until the cancellation is complete. - -[float] -[[java-docs-update-by-query-rethrottle]] -=== Rethrottling - -Use the `_rethrottle` API to change the value of `requests_per_second` on a running update: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{client-reindex-tests}/ReindexDocumentationIT.java[update-by-query-rethrottle] --------------------------------------------------- - -Use the `list tasks` API to find the value of `taskId`. - -As with the `updateByQuery` API, the value of `requests_per_second` -can be any positive float value to set the level of the throttle, or `Float.POSITIVE_INFINITY` to disable throttling. -A value of `requests_per_second` that speeds up the process takes -effect immediately. `requests_per_second` values that slow the query take effect -after completing the current batch in order to prevent scroll timeouts. diff --git a/docs/java-api/docs/update.asciidoc b/docs/java-api/docs/update.asciidoc deleted file mode 100644 index 0935c9f11eca4..0000000000000 --- a/docs/java-api/docs/update.asciidoc +++ /dev/null @@ -1,118 +0,0 @@ -[[java-docs-update]] -=== Update API - - -You can either create an `UpdateRequest` and send it to the client: - -[source,java] --------------------------------------------------- -UpdateRequest updateRequest = new UpdateRequest(); -updateRequest.index("index"); -updateRequest.type("_doc"); -updateRequest.id("1"); -updateRequest.doc(jsonBuilder() - .startObject() - .field("gender", "male") - .endObject()); -client.update(updateRequest).get(); --------------------------------------------------- - -Or you can use `prepareUpdate()` method: - -[source,java] --------------------------------------------------- -client.prepareUpdate("ttl", "doc", "1") - .setScript(new Script( - "ctx._source.gender = \"male\"", <1> - ScriptService.ScriptType.INLINE, null, null)) - .get(); - -client.prepareUpdate("ttl", "doc", "1") - .setDoc(jsonBuilder() <2> - .startObject() - .field("gender", "male") - .endObject()) - .get(); --------------------------------------------------- -<1> Your script. It could also be a locally stored script name. -In that case, you'll need to use `ScriptService.ScriptType.FILE` -<2> Document which will be merged to the existing one. - -Note that you can't provide both `script` and `doc`. - -[[java-docs-update-api-script]] -==== Update by script - -The update API allows to update a document based on a script provided: - -[source,java] --------------------------------------------------- -UpdateRequest updateRequest = new UpdateRequest("ttl", "doc", "1") - .script(new Script("ctx._source.gender = \"male\"")); -client.update(updateRequest).get(); --------------------------------------------------- - - -[[java-docs-update-api-merge-docs]] -==== Update by merging documents - -The update API also support passing a partial document, which will be merged into the existing document (simple -recursive merge, inner merging of objects, replacing core "keys/values" and arrays). For example: - -[source,java] --------------------------------------------------- -UpdateRequest updateRequest = new UpdateRequest("index", "type", "1") - .doc(jsonBuilder() - .startObject() - .field("gender", "male") - .endObject()); -client.update(updateRequest).get(); --------------------------------------------------- - - -[[java-docs-update-api-upsert]] -==== Upsert - -There is also support for `upsert`. If the document does not exist, the content of the `upsert` -element will be used to index the fresh doc: - -[source,java] --------------------------------------------------- -IndexRequest indexRequest = new IndexRequest("index", "type", "1") - .source(jsonBuilder() - .startObject() - .field("name", "Joe Smith") - .field("gender", "male") - .endObject()); -UpdateRequest updateRequest = new UpdateRequest("index", "type", "1") - .doc(jsonBuilder() - .startObject() - .field("gender", "male") - .endObject()) - .upsert(indexRequest); <1> -client.update(updateRequest).get(); --------------------------------------------------- -<1> If the document does not exist, the one in `indexRequest` will be added - -If the document `index/_doc/1` already exists, we will have after this operation a document like: - -[source,js] --------------------------------------------------- -{ - "name" : "Joe Dalton", - "gender": "male" <1> -} --------------------------------------------------- -// NOTCONSOLE -<1> This field is added by the update request - -If it does not exist, we will have a new document: - -[source,js] --------------------------------------------------- -{ - "name" : "Joe Smith", - "gender": "male" -} --------------------------------------------------- -// NOTCONSOLE diff --git a/docs/java-api/index.asciidoc b/docs/java-api/index.asciidoc deleted file mode 100644 index 4a7fd7482d26e..0000000000000 --- a/docs/java-api/index.asciidoc +++ /dev/null @@ -1,149 +0,0 @@ -= Java API - -include::../Versions.asciidoc[] - -[[java-api]] -[preface] -== Preface - -deprecated[7.0.0, The `TransportClient` is deprecated in favour of the {java-rest}/java-rest-high.html[Java High Level REST Client] and will be removed in Elasticsearch 8.0. The {java-rest}/java-rest-high-level-migration.html[migration guide] describes all the steps needed to migrate.] - -This section describes the Java API that Elasticsearch provides. All -Elasticsearch operations are executed using a -<> object. All -operations are completely asynchronous in nature (either accepts a -listener, or returns a future). - -Additionally, operations on a client may be accumulated and executed in -<>. - -Note, all the APIs are exposed through the -Java API (actually, the Java API is used internally to execute them). - -== Javadoc - -The javadoc for the transport client can be found at {transport-client-javadoc}/index.html. - -== Maven Repository - -Elasticsearch is hosted on -http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22org.elasticsearch.client%22[Maven -Central]. - -For example, you can define the latest version in your `pom.xml` file: - -["source","xml",subs="attributes"] --------------------------------------------------- - - org.elasticsearch.client - transport - {version} - --------------------------------------------------- - -[[java-transport-usage-maven-lucene]] -=== Lucene Snapshot repository - -The very first releases of any major version (like a beta), might have been built on top of a Lucene Snapshot version. -In such a case you will be unable to resolve the Lucene dependencies of the client. - -For example, if you want to use the `6.0.0-beta1` version which depends on Lucene `7.0.0-snapshot-00142c9`, you must -define the following repository. - -For Maven: - -["source","xml",subs="attributes"] --------------------------------------------------- - - elastic-lucene-snapshots - Elastic Lucene Snapshots - https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/00142c9 - true - false - --------------------------------------------------- - -For Gradle: - -["source","groovy",subs="attributes"] --------------------------------------------------- -maven { - name "lucene-snapshots" - url 'https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/00142c9' -} --------------------------------------------------- - -=== Log4j 2 Logger - -You need to also include Log4j 2 dependencies: - -["source","xml",subs="attributes"] --------------------------------------------------- - - org.apache.logging.log4j - log4j-core - 2.11.1 - --------------------------------------------------- - -And also provide a Log4j 2 configuration file in your classpath. -For example, you can add in your `src/main/resources` project dir a `log4j2.properties` file like: - - -["source","properties",subs="attributes"] --------------------------------------------------- -appender.console.type = Console -appender.console.name = console -appender.console.layout.type = PatternLayout -appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n - -rootLogger.level = info -rootLogger.appenderRef.console.ref = console --------------------------------------------------- - -=== Using another Logger - -If you want to use another logger than Log4j 2, you can use http://www.slf4j.org/[SLF4J] bridge to do that: - -["source","xml",subs="attributes"] --------------------------------------------------- - - org.apache.logging.log4j - log4j-to-slf4j - 2.11.1 - - - org.slf4j - slf4j-api - 1.7.24 - --------------------------------------------------- - -http://www.slf4j.org/manual.html[This page] lists implementations you can use. Pick your favorite logger -and add it as a dependency. As an example, we will use the `slf4j-simple` logger: - -["source","xml",subs="attributes"] --------------------------------------------------- - - org.slf4j - slf4j-simple - 1.7.21 - --------------------------------------------------- - -:client-tests: {docdir}/../../server/src/test/java/org/elasticsearch/client/documentation -:hlrc-tests: {docdir}/../../client/rest-high-level/src/test/java/org/elasticsearch/client - -:client-reindex-tests: {docdir}/../../modules/reindex/src/test/java/org/elasticsearch/client/documentation - -include::client.asciidoc[] - -include::docs.asciidoc[] - -include::search.asciidoc[] - -include::aggs.asciidoc[] - -include::query-dsl.asciidoc[] - -include::admin/index.asciidoc[] diff --git a/docs/java-api/query-dsl.asciidoc b/docs/java-api/query-dsl.asciidoc deleted file mode 100644 index f4823fa08ab51..0000000000000 --- a/docs/java-api/query-dsl.asciidoc +++ /dev/null @@ -1,40 +0,0 @@ -[[java-query-dsl]] -== Query DSL - -Elasticsearch provides a full Java query dsl in a similar manner to the -REST {ref}/query-dsl.html[Query DSL]. The factory for query -builders is `QueryBuilders`. Once your query is ready, you can use the -<>. - -To use `QueryBuilders` just import them in your class: - -[source,java] --------------------------------------------------- -import static org.elasticsearch.index.query.QueryBuilders.*; --------------------------------------------------- - -Note that you can easily print (aka debug) JSON generated queries using -`toString()` method on `QueryBuilder` object. - -The `QueryBuilder` can then be used with any API that accepts a query, -such as `count` and `search`. - -:query-dsl-test: {docdir}/../../client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/QueryDSLDocumentationTests.java - -include::query-dsl/match-all-query.asciidoc[] - -include::query-dsl/full-text-queries.asciidoc[] - -include::query-dsl/term-level-queries.asciidoc[] - -include::query-dsl/compound-queries.asciidoc[] - -include::query-dsl/joining-queries.asciidoc[] - -include::query-dsl/geo-queries.asciidoc[] - -include::query-dsl/special-queries.asciidoc[] - -include::query-dsl/span-queries.asciidoc[] - -:query-dsl-test!: diff --git a/docs/java-api/query-dsl/bool-query.asciidoc b/docs/java-api/query-dsl/bool-query.asciidoc deleted file mode 100644 index da9ca0ad0cc8c..0000000000000 --- a/docs/java-api/query-dsl/bool-query.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -[[java-query-dsl-bool-query]] -==== Bool Query - -See {ref}/query-dsl-bool-query.html[Bool Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[bool] --------------------------------------------------- -<1> must query -<2> must not query -<3> should query -<4> a query that must appear in the matching documents but doesn't contribute to scoring. diff --git a/docs/java-api/query-dsl/boosting-query.asciidoc b/docs/java-api/query-dsl/boosting-query.asciidoc deleted file mode 100644 index 2a3c4437d1f89..0000000000000 --- a/docs/java-api/query-dsl/boosting-query.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -[[java-query-dsl-boosting-query]] -==== Boosting Query - -See {ref}/query-dsl-boosting-query.html[Boosting Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[boosting] --------------------------------------------------- -<1> query that will promote documents -<2> query that will demote documents -<3> negative boost diff --git a/docs/java-api/query-dsl/common-terms-query.asciidoc b/docs/java-api/query-dsl/common-terms-query.asciidoc deleted file mode 100644 index 2c8dfc7a88cfe..0000000000000 --- a/docs/java-api/query-dsl/common-terms-query.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[java-query-dsl-common-terms-query]] -==== Common Terms Query - -See {ref}/query-dsl-common-terms-query.html[Common Terms Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[common_terms] --------------------------------------------------- -<1> field -<2> value diff --git a/docs/java-api/query-dsl/compound-queries.asciidoc b/docs/java-api/query-dsl/compound-queries.asciidoc deleted file mode 100644 index b93e3b694a5ef..0000000000000 --- a/docs/java-api/query-dsl/compound-queries.asciidoc +++ /dev/null @@ -1,45 +0,0 @@ -[[java-compound-queries]] -=== Compound queries - -Compound queries wrap other compound or leaf queries, either to combine their -results and scores, to change their behaviour, or to switch from query to -filter context. - -The queries in this group are: - -<>:: - -A query which wraps another query, but executes it in filter context. All -matching documents are given the same ``constant'' `_score`. - -<>:: - -The default query for combining multiple leaf or compound query clauses, as -`must`, `should`, `must_not`, or `filter` clauses. The `must` and `should` -clauses have their scores combined -- the more matching clauses, the better -- -while the `must_not` and `filter` clauses are executed in filter context. - -<>:: - -A query which accepts multiple queries, and returns any documents which match -any of the query clauses. While the `bool` query combines the scores from all -matching queries, the `dis_max` query uses the score of the single best- -matching query clause. - -<>:: - -Modify the scores returned by the main query with functions to take into -account factors like popularity, recency, distance, or custom algorithms -implemented with scripting. - -<>:: - -Return documents which match a `positive` query, but reduce the score of -documents which also match a `negative` query. - - -include::constant-score-query.asciidoc[] -include::bool-query.asciidoc[] -include::dis-max-query.asciidoc[] -include::function-score-query.asciidoc[] -include::boosting-query.asciidoc[] diff --git a/docs/java-api/query-dsl/constant-score-query.asciidoc b/docs/java-api/query-dsl/constant-score-query.asciidoc deleted file mode 100644 index 49c5adbee6a73..0000000000000 --- a/docs/java-api/query-dsl/constant-score-query.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[java-query-dsl-constant-score-query]] -==== Constant Score Query - -See {ref}/query-dsl-constant-score-query.html[Constant Score Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[constant_score] --------------------------------------------------- -<1> your query -<2> query score diff --git a/docs/java-api/query-dsl/dis-max-query.asciidoc b/docs/java-api/query-dsl/dis-max-query.asciidoc deleted file mode 100644 index 8c91bcb99011a..0000000000000 --- a/docs/java-api/query-dsl/dis-max-query.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -[[java-query-dsl-dis-max-query]] -==== Dis Max Query - -See {ref}/query-dsl-dis-max-query.html[Dis Max Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[dis_max] --------------------------------------------------- -<1> add your queries -<2> add your queries -<3> boost factor -<4> tie breaker diff --git a/docs/java-api/query-dsl/exists-query.asciidoc b/docs/java-api/query-dsl/exists-query.asciidoc deleted file mode 100644 index 6fa5ba6a6f257..0000000000000 --- a/docs/java-api/query-dsl/exists-query.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -[[java-query-dsl-exists-query]] -==== Exists Query - -See {ref}/query-dsl-exists-query.html[Exists Query]. - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[exists] --------------------------------------------------- -<1> field diff --git a/docs/java-api/query-dsl/full-text-queries.asciidoc b/docs/java-api/query-dsl/full-text-queries.asciidoc deleted file mode 100644 index 27ce4bee1ba64..0000000000000 --- a/docs/java-api/query-dsl/full-text-queries.asciidoc +++ /dev/null @@ -1,44 +0,0 @@ -[[java-full-text-queries]] -=== Full text queries - -The high-level full text queries are usually used for running full text -queries on full text fields like the body of an email. They understand how the -field being queried is analyzed and will apply each field's -`analyzer` (or `search_analyzer`) to the query string before executing. - -The queries in this group are: - -<>:: - - The standard query for performing full text queries, including fuzzy matching - and phrase or proximity queries. - -<>:: - - The multi-field version of the `match` query. - -<>:: - - A more specialized query which gives more preference to uncommon words. - -<>:: - - Supports the compact Lucene query string syntax, - allowing you to specify AND|OR|NOT conditions and multi-field search - within a single query string. For expert users only. - -<>:: - - A simpler, more robust version of the `query_string` syntax suitable - for exposing directly to users. - -include::match-query.asciidoc[] - -include::multi-match-query.asciidoc[] - -include::common-terms-query.asciidoc[] - -include::query-string-query.asciidoc[] - -include::simple-query-string-query.asciidoc[] - diff --git a/docs/java-api/query-dsl/function-score-query.asciidoc b/docs/java-api/query-dsl/function-score-query.asciidoc deleted file mode 100644 index fcd5f2dc473f5..0000000000000 --- a/docs/java-api/query-dsl/function-score-query.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -[[java-query-dsl-function-score-query]] -==== Function Score Query - -See {ref}/query-dsl-function-score-query.html[Function Score Query]. - -To use `ScoreFunctionBuilders` just import them in your class: - -[source,java] --------------------------------------------------- -import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.*; --------------------------------------------------- - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[function_score] --------------------------------------------------- -<1> Add a first function based on a query -<2> And randomize the score based on a given seed -<3> Add another function based on the age field diff --git a/docs/java-api/query-dsl/fuzzy-query.asciidoc b/docs/java-api/query-dsl/fuzzy-query.asciidoc deleted file mode 100644 index 4a7bde82cdfb7..0000000000000 --- a/docs/java-api/query-dsl/fuzzy-query.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[java-query-dsl-fuzzy-query]] -==== Fuzzy Query - -See {ref}/query-dsl-fuzzy-query.html[Fuzzy Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[fuzzy] --------------------------------------------------- -<1> field -<2> text diff --git a/docs/java-api/query-dsl/geo-bounding-box-query.asciidoc b/docs/java-api/query-dsl/geo-bounding-box-query.asciidoc deleted file mode 100644 index 4983a21213376..0000000000000 --- a/docs/java-api/query-dsl/geo-bounding-box-query.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -[[java-query-dsl-geo-bounding-box-query]] -==== Geo Bounding Box Query - -See {ref}/query-dsl-geo-bounding-box-query.html[Geo Bounding Box Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[geo_bounding_box] --------------------------------------------------- -<1> field -<2> bounding box top left point -<3> bounding box bottom right point diff --git a/docs/java-api/query-dsl/geo-distance-query.asciidoc b/docs/java-api/query-dsl/geo-distance-query.asciidoc deleted file mode 100644 index cc8c89ca61eea..0000000000000 --- a/docs/java-api/query-dsl/geo-distance-query.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -[[java-query-dsl-geo-distance-query]] -==== Geo Distance Query - -See {ref}/query-dsl-geo-distance-query.html[Geo Distance Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[geo_distance] --------------------------------------------------- -<1> field -<2> center point -<3> distance from center point diff --git a/docs/java-api/query-dsl/geo-polygon-query.asciidoc b/docs/java-api/query-dsl/geo-polygon-query.asciidoc deleted file mode 100644 index 7dbf49b8d1afd..0000000000000 --- a/docs/java-api/query-dsl/geo-polygon-query.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[java-query-dsl-geo-polygon-query]] -==== Geo Polygon Query - -See {ref}/query-dsl-geo-polygon-query.html[Geo Polygon Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[geo_polygon] --------------------------------------------------- -<1> add your polygon of points a document should fall within -<2> initialise the query with field and points diff --git a/docs/java-api/query-dsl/geo-queries.asciidoc b/docs/java-api/query-dsl/geo-queries.asciidoc deleted file mode 100644 index 10df4ff5e8716..0000000000000 --- a/docs/java-api/query-dsl/geo-queries.asciidoc +++ /dev/null @@ -1,34 +0,0 @@ -[[java-geo-queries]] -=== Geo queries - -Elasticsearch supports two types of geo data: -`geo_point` fields which support lat/lon pairs, and -`geo_shape` fields, which support points, lines, circles, polygons, multi-polygons etc. - -The queries in this group are: - -<> query:: - - Find document with geo-shapes which either intersect, are contained by, or - do not intersect with the specified geo-shape. - -<> query:: - - Finds documents with geo-points that fall into the specified rectangle. - -<> query:: - - Finds document with geo-points within the specified distance of a central - point. - -<> query:: - - Find documents with geo-points within the specified polygon. - -include::geo-shape-query.asciidoc[] - -include::geo-bounding-box-query.asciidoc[] - -include::geo-distance-query.asciidoc[] - -include::geo-polygon-query.asciidoc[] diff --git a/docs/java-api/query-dsl/geo-shape-query.asciidoc b/docs/java-api/query-dsl/geo-shape-query.asciidoc deleted file mode 100644 index c2cd4c14e3adc..0000000000000 --- a/docs/java-api/query-dsl/geo-shape-query.asciidoc +++ /dev/null @@ -1,56 +0,0 @@ -[[java-query-dsl-geo-shape-query]] -==== GeoShape Query - -See {ref}/query-dsl-geo-shape-query.html[Geo Shape Query] - -Note: the `geo_shape` type uses `Spatial4J` and `JTS`, both of which are -optional dependencies. Consequently you must add `Spatial4J` and `JTS` -to your classpath in order to use this type: - -[source,xml] ------------------------------------------------ - - org.locationtech.spatial4j - spatial4j - 0.7 <1> - - - - org.locationtech.jts - jts-core - 1.15.0 <2> - - - xerces - xercesImpl - - - ------------------------------------------------ -<1> check for updates in http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22org.locationtech.spatial4j%22%20AND%20a%3A%22spatial4j%22[Maven Central] -<2> check for updates in http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22org.locationtech.jts%22%20AND%20a%3A%22jts-core%22[Maven Central] - -[source,java] --------------------------------------------------- -// Import ShapeRelation and ShapeBuilder -import org.elasticsearch.common.geo.ShapeRelation; -import org.elasticsearch.common.geo.builders.ShapeBuilder; --------------------------------------------------- - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[geo_shape] --------------------------------------------------- -<1> field -<2> shape -<3> relation can be `ShapeRelation.CONTAINS`, `ShapeRelation.WITHIN`, `ShapeRelation.INTERSECTS` or `ShapeRelation.DISJOINT` - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[indexed_geo_shape] --------------------------------------------------- -<1> field -<2> The ID of the document that containing the pre-indexed shape. -<3> relation -<4> Name of the index where the pre-indexed shape is. Defaults to 'shapes'. -<5> The field specified as path containing the pre-indexed shape. Defaults to 'shape'. diff --git a/docs/java-api/query-dsl/has-child-query.asciidoc b/docs/java-api/query-dsl/has-child-query.asciidoc deleted file mode 100644 index f47f3af487dfe..0000000000000 --- a/docs/java-api/query-dsl/has-child-query.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -[[java-query-dsl-has-child-query]] -==== Has Child Query - -See {ref}/query-dsl-has-child-query.html[Has Child Query] - -When using the `has_child` query it is important to use the `PreBuiltTransportClient` instead of the regular client: - -[source,java] --------------------------------------------------- -Settings settings = Settings.builder().put("cluster.name", "elasticsearch").build(); -TransportClient client = new PreBuiltTransportClient(settings); -client.addTransportAddress(new TransportAddress(new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 9300))); --------------------------------------------------- - -Otherwise the parent-join module doesn't get loaded and the `has_child` query can't be used from the transport client. - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[has_child] --------------------------------------------------- -<1> child type to query against -<2> query -<3> score mode can be `ScoreMode.Avg`, `ScoreMode.Max`, `ScoreMode.Min`, `ScoreMode.None` or `ScoreMode.Total` diff --git a/docs/java-api/query-dsl/has-parent-query.asciidoc b/docs/java-api/query-dsl/has-parent-query.asciidoc deleted file mode 100644 index 6a83fe2b0698f..0000000000000 --- a/docs/java-api/query-dsl/has-parent-query.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -[[java-query-dsl-has-parent-query]] -==== Has Parent Query - -See {ref}/query-dsl-has-parent-query.html[Has Parent] - -When using the `has_parent` query it is important to use the `PreBuiltTransportClient` instead of the regular client: - -[source,java] --------------------------------------------------- -Settings settings = Settings.builder().put("cluster.name", "elasticsearch").build(); -TransportClient client = new PreBuiltTransportClient(settings); -client.addTransportAddress(new TransportAddress(new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 9300))); --------------------------------------------------- - -Otherwise the parent-join module doesn't get loaded and the `has_parent` query can't be used from the transport client. - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[has_parent] --------------------------------------------------- -<1> parent type to query against -<2> query -<3> whether the score from the parent hit should propagate to the child hit diff --git a/docs/java-api/query-dsl/ids-query.asciidoc b/docs/java-api/query-dsl/ids-query.asciidoc deleted file mode 100644 index ba12a5df38b0e..0000000000000 --- a/docs/java-api/query-dsl/ids-query.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -[[java-query-dsl-ids-query]] -==== Ids Query - - -See {ref}/query-dsl-ids-query.html[Ids Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[ids] --------------------------------------------------- diff --git a/docs/java-api/query-dsl/joining-queries.asciidoc b/docs/java-api/query-dsl/joining-queries.asciidoc deleted file mode 100644 index fcefef5f6245b..0000000000000 --- a/docs/java-api/query-dsl/joining-queries.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -[[java-joining-queries]] -=== Joining queries - -Performing full SQL-style joins in a distributed system like Elasticsearch is -prohibitively expensive. Instead, Elasticsearch offers two forms of join -which are designed to scale horizontally. - -<>:: - -Documents may contains fields of type `nested`. These -fields are used to index arrays of objects, where each object can be queried -(with the `nested` query) as an independent document. - -<> and <> queries:: - -A parent-child relationship can exist between two -document types within a single index. The `has_child` query returns parent -documents whose child documents match the specified query, while the -`has_parent` query returns child documents whose parent document matches the -specified query. - -include::nested-query.asciidoc[] - -include::has-child-query.asciidoc[] - -include::has-parent-query.asciidoc[] - - diff --git a/docs/java-api/query-dsl/match-all-query.asciidoc b/docs/java-api/query-dsl/match-all-query.asciidoc deleted file mode 100644 index 85d847528f5b8..0000000000000 --- a/docs/java-api/query-dsl/match-all-query.asciidoc +++ /dev/null @@ -1,9 +0,0 @@ -[[java-query-dsl-match-all-query]] -=== Match All Query - -See {ref}/query-dsl-match-all-query.html[Match All Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[match_all] --------------------------------------------------- diff --git a/docs/java-api/query-dsl/match-query.asciidoc b/docs/java-api/query-dsl/match-query.asciidoc deleted file mode 100644 index 6884deb5f1f24..0000000000000 --- a/docs/java-api/query-dsl/match-query.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[java-query-dsl-match-query]] -==== Match Query - -See {ref}/query-dsl-match-query.html[Match Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[match] --------------------------------------------------- -<1> field -<2> text diff --git a/docs/java-api/query-dsl/mlt-query.asciidoc b/docs/java-api/query-dsl/mlt-query.asciidoc deleted file mode 100644 index 11e5c7ef40482..0000000000000 --- a/docs/java-api/query-dsl/mlt-query.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -[[java-query-dsl-mlt-query]] -==== More Like This Query - -See {ref}/query-dsl-mlt-query.html[More Like This Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[more_like_this] --------------------------------------------------- -<1> fields -<2> text -<3> ignore threshold -<4> max num of Terms in generated queries diff --git a/docs/java-api/query-dsl/multi-match-query.asciidoc b/docs/java-api/query-dsl/multi-match-query.asciidoc deleted file mode 100644 index 86b384d44d3c0..0000000000000 --- a/docs/java-api/query-dsl/multi-match-query.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[java-query-dsl-multi-match-query]] -==== Multi Match Query - -See {ref}/query-dsl-multi-match-query.html[Multi Match Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[multi_match] --------------------------------------------------- -<1> text -<2> fields diff --git a/docs/java-api/query-dsl/nested-query.asciidoc b/docs/java-api/query-dsl/nested-query.asciidoc deleted file mode 100644 index 9b675ea72acfd..0000000000000 --- a/docs/java-api/query-dsl/nested-query.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -[[java-query-dsl-nested-query]] -==== Nested Query - -See {ref}/query-dsl-nested-query.html[Nested Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[nested] --------------------------------------------------- -<1> path to nested document -<2> your query. Any fields referenced inside the query must use the complete path (fully qualified). -<3> score mode could be `ScoreMode.Max`, `ScoreMode.Min`, `ScoreMode.Total`, `ScoreMode.Avg` or `ScoreMode.None` diff --git a/docs/java-api/query-dsl/percolate-query.asciidoc b/docs/java-api/query-dsl/percolate-query.asciidoc deleted file mode 100644 index 18cdd4a14e5a9..0000000000000 --- a/docs/java-api/query-dsl/percolate-query.asciidoc +++ /dev/null @@ -1,61 +0,0 @@ -[[java-query-percolate-query]] -==== Percolate Query - -See: - * {ref}/query-dsl-percolate-query.html[Percolate Query] - - -[source,java] --------------------------------------------------- -Settings settings = Settings.builder().put("cluster.name", "elasticsearch").build(); -TransportClient client = new PreBuiltTransportClient(settings); -client.addTransportAddress(new TransportAddress(new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 9300))); --------------------------------------------------- - -Before the `percolate` query can be used an `percolator` mapping should be added and -a document containing a percolator query should be indexed: - -[source,java] --------------------------------------------------- -// create an index with a percolator field with the name 'query': -client.admin().indices().prepareCreate("myIndexName") - .addMapping("_doc", "query", "type=percolator", "content", "type=text") - .get(); - -//This is the query we're registering in the percolator -QueryBuilder qb = termQuery("content", "amazing"); - -//Index the query = register it in the percolator -client.prepareIndex("myIndexName", "_doc", "myDesignatedQueryName") - .setSource(jsonBuilder() - .startObject() - .field("query", qb) // Register the query - .endObject()) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) // Needed when the query shall be available immediately - .get(); --------------------------------------------------- - -This indexes the above term query under the name -*myDesignatedQueryName*. - -In order to check a document against the registered queries, use this -code: - -[source,java] --------------------------------------------------- -//Build a document to check against the percolator -XContentBuilder docBuilder = XContentFactory.jsonBuilder().startObject(); -docBuilder.field("content", "This is amazing!"); -docBuilder.endObject(); //End of the JSON root object - -PercolateQueryBuilder percolateQuery = new PercolateQueryBuilder("query", "_doc", BytesReference.bytes(docBuilder)); - -// Percolate, by executing the percolator query in the query dsl: -SearchResponse response = client().prepareSearch("myIndexName") - .setQuery(percolateQuery)) - .get(); -//Iterate over the results -for(SearchHit hit : response.getHits()) { - // Percolator queries as hit -} --------------------------------------------------- diff --git a/docs/java-api/query-dsl/prefix-query.asciidoc b/docs/java-api/query-dsl/prefix-query.asciidoc deleted file mode 100644 index eb15c4426f633..0000000000000 --- a/docs/java-api/query-dsl/prefix-query.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[java-query-dsl-prefix-query]] -==== Prefix Query - -See {ref}/query-dsl-prefix-query.html[Prefix Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[prefix] --------------------------------------------------- -<1> field -<2> prefix diff --git a/docs/java-api/query-dsl/query-string-query.asciidoc b/docs/java-api/query-dsl/query-string-query.asciidoc deleted file mode 100644 index 7d8bead2e340a..0000000000000 --- a/docs/java-api/query-dsl/query-string-query.asciidoc +++ /dev/null @@ -1,9 +0,0 @@ -[[java-query-dsl-query-string-query]] -==== Query String Query - -See {ref}/query-dsl-query-string-query.html[Query String Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[query_string] --------------------------------------------------- diff --git a/docs/java-api/query-dsl/range-query.asciidoc b/docs/java-api/query-dsl/range-query.asciidoc deleted file mode 100644 index 2d58fbd3a34ef..0000000000000 --- a/docs/java-api/query-dsl/range-query.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -[[java-query-dsl-range-query]] -==== Range Query - -See {ref}/query-dsl-range-query.html[Range Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[range] --------------------------------------------------- -<1> field -<2> from -<3> to -<4> include lower value means that `from` is `gt` when `false` or `gte` when `true` -<5> include upper value means that `to` is `lt` when `false` or `lte` when `true` - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[range_simplified] --------------------------------------------------- -<1> field -<2> set `from` to 10 and `includeLower` to `true` -<3> set `to` to 20 and `includeUpper` to `false` diff --git a/docs/java-api/query-dsl/regexp-query.asciidoc b/docs/java-api/query-dsl/regexp-query.asciidoc deleted file mode 100644 index f9cd8cd72d9d5..0000000000000 --- a/docs/java-api/query-dsl/regexp-query.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[java-query-dsl-regexp-query]] -==== Regexp Query - -See {ref}/query-dsl-regexp-query.html[Regexp Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[regexp] --------------------------------------------------- -<1> field -<2> regexp diff --git a/docs/java-api/query-dsl/script-query.asciidoc b/docs/java-api/query-dsl/script-query.asciidoc deleted file mode 100644 index a8c60f1d8eb0d..0000000000000 --- a/docs/java-api/query-dsl/script-query.asciidoc +++ /dev/null @@ -1,29 +0,0 @@ -[[java-query-dsl-script-query]] -==== Script Query - -See {ref}/query-dsl-script-query.html[Script Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[script_inline] --------------------------------------------------- -<1> inlined script - - -If you have stored on each data node a script named `myscript.painless` with: - -[source,painless] --------------------------------------------------- -doc['num1'].value > params.param1 --------------------------------------------------- - -You can use it then with: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[script_file] --------------------------------------------------- -<1> Script type: either `ScriptType.FILE`, `ScriptType.INLINE` or `ScriptType.INDEXED` -<2> Scripting engine -<3> Script name -<4> Parameters as a `Map` diff --git a/docs/java-api/query-dsl/simple-query-string-query.asciidoc b/docs/java-api/query-dsl/simple-query-string-query.asciidoc deleted file mode 100644 index c3b32ecd1cbb2..0000000000000 --- a/docs/java-api/query-dsl/simple-query-string-query.asciidoc +++ /dev/null @@ -1,9 +0,0 @@ -[[java-query-dsl-simple-query-string-query]] -==== Simple Query String Query - -See {ref}/query-dsl-simple-query-string-query.html[Simple Query String Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[simple_query_string] --------------------------------------------------- diff --git a/docs/java-api/query-dsl/span-containing-query.asciidoc b/docs/java-api/query-dsl/span-containing-query.asciidoc deleted file mode 100644 index 173e26952c265..0000000000000 --- a/docs/java-api/query-dsl/span-containing-query.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[java-query-dsl-span-containing-query]] -==== Span Containing Query - -See {ref}/query-dsl-span-containing-query.html[Span Containing Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[span_containing] --------------------------------------------------- -<1> `big` part -<2> `little` part diff --git a/docs/java-api/query-dsl/span-first-query.asciidoc b/docs/java-api/query-dsl/span-first-query.asciidoc deleted file mode 100644 index d02c164754c53..0000000000000 --- a/docs/java-api/query-dsl/span-first-query.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[java-query-dsl-span-first-query]] -==== Span First Query - -See {ref}/query-dsl-span-first-query.html[Span First Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[span_first] --------------------------------------------------- -<1> query -<2> max end position diff --git a/docs/java-api/query-dsl/span-multi-term-query.asciidoc b/docs/java-api/query-dsl/span-multi-term-query.asciidoc deleted file mode 100644 index eea00f61fe7e1..0000000000000 --- a/docs/java-api/query-dsl/span-multi-term-query.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[java-query-dsl-span-multi-term-query]] -==== Span Multi Term Query - -See {ref}/query-dsl-span-multi-term-query.html[Span Multi Term Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[span_multi] --------------------------------------------------- -<1> Can be any builder extending the `MultiTermQueryBuilder` class. For example: `FuzzyQueryBuilder`, -`PrefixQueryBuilder`, `RangeQueryBuilder`, `RegexpQueryBuilder` or `WildcardQueryBuilder`. diff --git a/docs/java-api/query-dsl/span-near-query.asciidoc b/docs/java-api/query-dsl/span-near-query.asciidoc deleted file mode 100644 index 6f4661e34c9d1..0000000000000 --- a/docs/java-api/query-dsl/span-near-query.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -[[java-query-dsl-span-near-query]] -==== Span Near Query - -See {ref}/query-dsl-span-near-query.html[Span Near Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[span_near] --------------------------------------------------- -<1> span term queries -<2> slop factor: the maximum number of intervening unmatched positions -<3> whether matches are required to be in-order diff --git a/docs/java-api/query-dsl/span-not-query.asciidoc b/docs/java-api/query-dsl/span-not-query.asciidoc deleted file mode 100644 index 001c2ca025e6d..0000000000000 --- a/docs/java-api/query-dsl/span-not-query.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[java-query-dsl-span-not-query]] -==== Span Not Query - -See {ref}/query-dsl-span-not-query.html[Span Not Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[span_not] --------------------------------------------------- -<1> span query whose matches are filtered -<2> span query whose matches must not overlap those returned diff --git a/docs/java-api/query-dsl/span-or-query.asciidoc b/docs/java-api/query-dsl/span-or-query.asciidoc deleted file mode 100644 index 787628b59342f..0000000000000 --- a/docs/java-api/query-dsl/span-or-query.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -[[java-query-dsl-span-or-query]] -==== Span Or Query - -See {ref}/query-dsl-span-or-query.html[Span Or Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[span_or] --------------------------------------------------- -<1> span term queries diff --git a/docs/java-api/query-dsl/span-queries.asciidoc b/docs/java-api/query-dsl/span-queries.asciidoc deleted file mode 100644 index 0ccbe30638c6a..0000000000000 --- a/docs/java-api/query-dsl/span-queries.asciidoc +++ /dev/null @@ -1,65 +0,0 @@ -[[java-span-queries]] -=== Span queries - -Span queries are low-level positional queries which provide expert control -over the order and proximity of the specified terms. These are typically used -to implement very specific queries on legal documents or patents. - -Span queries cannot be mixed with non-span queries (with the exception of the `span_multi` query). - -The queries in this group are: - -<>:: - -The equivalent of the <> but for use with -other span queries. - -<>:: - -Wraps a <>, <>, -<>, <>, -<>, or <> query. - -<>:: - -Accepts another span query whose matches must appear within the first N -positions of the field. - -<>:: - -Accepts multiple span queries whose matches must be within the specified distance of each other, and possibly in the same order. - -<>:: - -Combines multiple span queries -- returns documents which match any of the -specified queries. - -<>:: - -Wraps another span query, and excludes any documents which match that query. - -<>:: - -Accepts a list of span queries, but only returns those spans which also match a second span query. - -<>:: - -The result from a single span query is returned as long is its span falls -within the spans returned by a list of other span queries. - - -include::span-term-query.asciidoc[] - -include::span-multi-term-query.asciidoc[] - -include::span-first-query.asciidoc[] - -include::span-near-query.asciidoc[] - -include::span-or-query.asciidoc[] - -include::span-not-query.asciidoc[] - -include::span-containing-query.asciidoc[] - -include::span-within-query.asciidoc[] diff --git a/docs/java-api/query-dsl/span-term-query.asciidoc b/docs/java-api/query-dsl/span-term-query.asciidoc deleted file mode 100644 index 2bdf9276515dc..0000000000000 --- a/docs/java-api/query-dsl/span-term-query.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[java-query-dsl-span-term-query]] -==== Span Term Query - -See {ref}/query-dsl-span-term-query.html[Span Term Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[span_term] --------------------------------------------------- -<1> field -<2> value diff --git a/docs/java-api/query-dsl/span-within-query.asciidoc b/docs/java-api/query-dsl/span-within-query.asciidoc deleted file mode 100644 index afa527c0b67fb..0000000000000 --- a/docs/java-api/query-dsl/span-within-query.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[java-query-dsl-span-within-query]] -==== Span Within Query - -See {ref}/query-dsl-span-within-query.html[Span Within Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[span_within] --------------------------------------------------- -<1> `big` part -<2> `little` part diff --git a/docs/java-api/query-dsl/special-queries.asciidoc b/docs/java-api/query-dsl/special-queries.asciidoc deleted file mode 100644 index bca3bde3b3f62..0000000000000 --- a/docs/java-api/query-dsl/special-queries.asciidoc +++ /dev/null @@ -1,31 +0,0 @@ -[[java-specialized-queries]] - -=== Specialized queries - -This group contains queries which do not fit into the other groups: - -<>:: - -This query finds documents which are similar to the specified text, document, -or collection of documents. - -<>:: - -This query allows a script to act as a filter. Also see the -<>. - -<>:: - -This query finds percolator queries based on documents. - -<>:: - -A query that accepts other queries as json or yaml string. - -include::mlt-query.asciidoc[] - -include::script-query.asciidoc[] - -include::percolate-query.asciidoc[] - -include::wrapper-query.asciidoc[] diff --git a/docs/java-api/query-dsl/term-level-queries.asciidoc b/docs/java-api/query-dsl/term-level-queries.asciidoc deleted file mode 100644 index 7d3649e372bbd..0000000000000 --- a/docs/java-api/query-dsl/term-level-queries.asciidoc +++ /dev/null @@ -1,77 +0,0 @@ -[[java-term-level-queries]] -=== Term level queries - -While the <> will analyze the query -string before executing, the _term-level queries_ operate on the exact terms -that are stored in the inverted index. - -These queries are usually used for structured data like numbers, dates, and -enums, rather than full text fields. Alternatively, they allow you to craft -low-level queries, foregoing the analysis process. - -The queries in this group are: - -<>:: - - Find documents which contain the exact term specified in the field - specified. - -<>:: - - Find documents which contain any of the exact terms specified in the field - specified. - -<>:: - - Find documents where the field specified contains values (dates, numbers, - or strings) in the range specified. - -<>:: - - Find documents where the field specified contains any non-null value. - -<>:: - - Find documents where the field specified contains terms which being with - the exact prefix specified. - -<>:: - - Find documents where the field specified contains terms which match the - pattern specified, where the pattern supports single character wildcards - (`?`) and multi-character wildcards (`*`) - -<>:: - - Find documents where the field specified contains terms which match the - regular expression specified. - -<>:: - - Find documents where the field specified contains terms which are fuzzily - similar to the specified term. Fuzziness is measured as a - http://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance[Levenshtein edit distance] - of 1 or 2. - -<>:: - - Find documents with the specified type and IDs. - - -include::term-query.asciidoc[] - -include::terms-query.asciidoc[] - -include::range-query.asciidoc[] - -include::exists-query.asciidoc[] - -include::prefix-query.asciidoc[] - -include::wildcard-query.asciidoc[] - -include::regexp-query.asciidoc[] - -include::fuzzy-query.asciidoc[] - -include::ids-query.asciidoc[] diff --git a/docs/java-api/query-dsl/term-query.asciidoc b/docs/java-api/query-dsl/term-query.asciidoc deleted file mode 100644 index 7c8549dbed403..0000000000000 --- a/docs/java-api/query-dsl/term-query.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[java-query-dsl-term-query]] -==== Term Query - -See {ref}/query-dsl-term-query.html[Term Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[term] --------------------------------------------------- -<1> field -<2> text diff --git a/docs/java-api/query-dsl/terms-query.asciidoc b/docs/java-api/query-dsl/terms-query.asciidoc deleted file mode 100644 index 587968ba18e77..0000000000000 --- a/docs/java-api/query-dsl/terms-query.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[java-query-dsl-terms-query]] -==== Terms Query - -See {ref}/query-dsl-terms-query.html[Terms Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[terms] --------------------------------------------------- -<1> field -<2> values diff --git a/docs/java-api/query-dsl/wildcard-query.asciidoc b/docs/java-api/query-dsl/wildcard-query.asciidoc deleted file mode 100644 index f9ace822aac9d..0000000000000 --- a/docs/java-api/query-dsl/wildcard-query.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[java-query-dsl-wildcard-query]] -==== Wildcard Query - -See {ref}/query-dsl-wildcard-query.html[Wildcard Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[wildcard] --------------------------------------------------- -<1> field -<2> wildcard expression diff --git a/docs/java-api/query-dsl/wrapper-query.asciidoc b/docs/java-api/query-dsl/wrapper-query.asciidoc deleted file mode 100644 index 3bdf3cc69d30a..0000000000000 --- a/docs/java-api/query-dsl/wrapper-query.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[java-query-dsl-wrapper-query]] -==== Wrapper Query - -See {ref}/query-dsl-wrapper-query.html[Wrapper Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[wrapper] --------------------------------------------------- - -<1> query defined as query builder diff --git a/docs/java-api/search.asciidoc b/docs/java-api/search.asciidoc deleted file mode 100644 index ecf8415f4dcbe..0000000000000 --- a/docs/java-api/search.asciidoc +++ /dev/null @@ -1,250 +0,0 @@ -[[java-search]] -== Search API - -The search API allows one to execute a search query and get back search hits -that match the query. It can be executed across one or more indices and -across one or more types. The query can be provided using the <>. -The body of the search request is built using the `SearchSourceBuilder`. Here is an example: - -[source,java] --------------------------------------------------- -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.index.query.QueryBuilders.*; --------------------------------------------------- - -[source,java] --------------------------------------------------- -SearchResponse response = client.prepareSearch("index1", "index2") - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(QueryBuilders.termQuery("multi", "test")) // Query - .setPostFilter(QueryBuilders.rangeQuery("age").from(12).to(18)) // Filter - .setFrom(0).setSize(60).setExplain(true) - .get(); --------------------------------------------------- - -Note that all parameters are optional. Here is the smallest search call -you can write: - -[source,java] --------------------------------------------------- -// MatchAll on the whole cluster with all default options -SearchResponse response = client.prepareSearch().get(); --------------------------------------------------- - -NOTE: Although the Java API defines the additional search types QUERY_AND_FETCH and - DFS_QUERY_AND_FETCH, these modes are internal optimizations and should not - be specified explicitly by users of the API. - -For more information on the search operation, check out the REST -{ref}/search.html[search] docs. - - -[[java-search-scrolling]] -=== Using scrolls in Java - -Read the {ref}/search-request-scroll.html[scroll documentation] -first! - -[source,java] --------------------------------------------------- -import static org.elasticsearch.index.query.QueryBuilders.*; - -QueryBuilder qb = termQuery("multi", "test"); - -SearchResponse scrollResp = client.prepareSearch(test) - .addSort(FieldSortBuilder.DOC_FIELD_NAME, SortOrder.ASC) - .setScroll(new TimeValue(60000)) - .setQuery(qb) - .setSize(100).get(); //max of 100 hits will be returned for each scroll -//Scroll until no hits are returned -do { - for (SearchHit hit : scrollResp.getHits().getHits()) { - //Handle the hit... - } - - scrollResp = client.prepareSearchScroll(scrollResp.getScrollId()).setScroll(new TimeValue(60000)).execute().actionGet(); -} while(scrollResp.getHits().getHits().length != 0); // Zero hits mark the end of the scroll and the while loop. --------------------------------------------------- - -[[java-search-msearch]] -=== MultiSearch API - -See {ref}/search-multi-search.html[MultiSearch API Query] -documentation - -[source,java] --------------------------------------------------- -SearchRequestBuilder srb1 = client - .prepareSearch().setQuery(QueryBuilders.queryStringQuery("elasticsearch")).setSize(1); -SearchRequestBuilder srb2 = client - .prepareSearch().setQuery(QueryBuilders.matchQuery("name", "kimchy")).setSize(1); - -MultiSearchResponse sr = client.prepareMultiSearch() - .add(srb1) - .add(srb2) - .get(); - -// You will get all individual responses from MultiSearchResponse#getResponses() -long nbHits = 0; -for (MultiSearchResponse.Item item : sr.getResponses()) { - SearchResponse response = item.getResponse(); - nbHits += response.getHits().getTotalHits().value; -} --------------------------------------------------- - - -[[java-search-aggs]] -=== Using Aggregations - -The following code shows how to add two aggregations within your search: - -[source,java] --------------------------------------------------- -SearchResponse sr = client.prepareSearch() - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - AggregationBuilders.terms("agg1").field("field") - ) - .addAggregation( - AggregationBuilders.dateHistogram("agg2") - .field("birth") - .calendarInterval(DateHistogramInterval.YEAR) - ) - .get(); - -// Get your facet results -Terms agg1 = sr.getAggregations().get("agg1"); -Histogram agg2 = sr.getAggregations().get("agg2"); --------------------------------------------------- - -See <> -documentation for details. - - -[[java-search-terminate-after]] -=== Terminate After - -The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. -If set, you will be able to check if the operation terminated early by asking for `isTerminatedEarly()` in the -`SearchResponse` object: - -[source,java] --------------------------------------------------- -SearchResponse sr = client.prepareSearch(INDEX) - .setTerminateAfter(1000) <1> - .get(); - -if (sr.isTerminatedEarly()) { - // We finished early -} --------------------------------------------------- -<1> Finish after 1000 docs - -[[java-search-template]] -=== Search Template - -See {ref}/search-template.html[Search Template] documentation - -Define your template parameters as a `Map`: - -[source,java] --------------------------------------------------- -Map template_params = new HashMap<>(); -template_params.put("param_gender", "male"); --------------------------------------------------- - -You can use your stored search templates in `config/scripts`. -For example, if you have a file named `config/scripts/template_gender.mustache` containing: - -[source,js] --------------------------------------------------- -{ - "query" : { - "match" : { - "gender" : "{{param_gender}}" - } - } -} --------------------------------------------------- -// NOTCONSOLE - -Create your search template request: - -[source,java] --------------------------------------------------- -SearchResponse sr = new SearchTemplateRequestBuilder(client) - .setScript("template_gender") <1> - .setScriptType(ScriptService.ScriptType.FILE) <2> - .setScriptParams(template_params) <3> - .setRequest(new SearchRequest()) <4> - .get() <5> - .getResponse(); <6> --------------------------------------------------- -<1> template name -<2> template stored on disk in `gender_template.mustache` -<3> parameters -<4> set the execution context (ie. define the index name here) -<5> execute and get the template response -<6> get from the template response the search response itself - -You can also store your template in the cluster state: - -[source,java] --------------------------------------------------- -client.admin().cluster().preparePutStoredScript() - .setScriptLang("mustache") - .setId("template_gender") - .setSource(new BytesArray( - "{\n" + - " \"query\" : {\n" + - " \"match\" : {\n" + - " \"gender\" : \"{{param_gender}}\"\n" + - " }\n" + - " }\n" + - "}")).get(); --------------------------------------------------- - -To execute a stored templates, use `ScriptService.ScriptType.STORED`: - -[source,java] --------------------------------------------------- -SearchResponse sr = new SearchTemplateRequestBuilder(client) - .setScript("template_gender") <1> - .setScriptType(ScriptType.STORED) <2> - .setScriptParams(template_params) <3> - .setRequest(new SearchRequest()) <4> - .get() <5> - .getResponse(); <6> --------------------------------------------------- -<1> template name -<2> template stored in the cluster state -<3> parameters -<4> set the execution context (ie. define the index name here) -<5> execute and get the template response -<6> get from the template response the search response itself - -You can also execute inline templates: - -[source,java] --------------------------------------------------- -sr = new SearchTemplateRequestBuilder(client) - .setScript("{\n" + <1> - " \"query\" : {\n" + - " \"match\" : {\n" + - " \"gender\" : \"{{param_gender}}\"\n" + - " }\n" + - " }\n" + - "}") - .setScriptType(ScriptType.INLINE) <2> - .setScriptParams(template_params) <3> - .setRequest(new SearchRequest()) <4> - .get() <5> - .getResponse(); <6> --------------------------------------------------- -<1> template's body -<2> template is passed inline -<3> parameters -<4> set the execution context (ie. define the index name here) -<5> execute and get the template response -<6> get from the template response the search response itself diff --git a/docs/reference/modules/network.asciidoc b/docs/reference/modules/network.asciidoc index e60b56fe12d74..dc36d0965628e 100644 --- a/docs/reference/modules/network.asciidoc +++ b/docs/reference/modules/network.asciidoc @@ -173,11 +173,10 @@ settings, but may be further configured independently: TCP Transport:: -Used for communication between nodes in the cluster, by the Java -{javaclient}/transport-client.html[Transport client]. +Used for communication between nodes in the cluster. See the <> for more information. HTTP:: -Exposes the JSON-over-HTTP interface used by all clients other than the Java -clients. See the <> for more information. +Exposes the JSON-over-HTTP interface used by all clients. +See the <> for more information. diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index 031138dada3f1..49d05e289a3f6 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -8,9 +8,8 @@ then you have a cluster of one node. Every node in the cluster can handle <> and <> traffic by default. The transport layer -is used exclusively for communication between nodes and the -{javaclient}/transport-client.html[Java `TransportClient`]; the HTTP layer is -used only by external REST clients. +is used exclusively for communication between nodes; the HTTP layer is +used by REST clients. All nodes know about all the other nodes in the cluster and can forward client requests to the appropriate node. Besides that, each node serves one or more diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 6f68d781f4856..077d4f15f84c7 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -82,15 +82,13 @@ The `_uid` field has been removed in favour of the <> or the -{javaclient}/index.html[Java API]. +interface over <>. [role="exclude",id="modules-thrift"] === Thrift The `thrift` transport is no longer supported. Instead use the REST -interface over <> or the -{javaclient}/index.html[Java API]. +interface over <>. // QUERY DSL @@ -610,4 +608,4 @@ The `TransportClient` is deprecated in favour of the {java-rest}/java-rest-high.html[Java High Level REST Client] and was removed in Elasticsearch 8.0. The {java-rest}/java-rest-high-level-migration.html[migration guide] describes all -the steps needed to migrate. \ No newline at end of file +the steps needed to migrate. From 8e5812c4b320faa312c7fc946fbb3c96ebb6d275 Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Thu, 30 May 2019 15:33:53 -0700 Subject: [PATCH 207/224] remove v6.5.x and v6.6.x version constants (#42130) related to refactoring initiative #41164. --- .../upgrades/FullClusterRestartIT.java | 2 +- .../elasticsearch/upgrades/RecoveryIT.java | 2 +- .../main/java/org/elasticsearch/Version.java | 32 -------- .../ClusterFormationFailureHelper.java | 7 +- .../cluster/metadata/IndexMetaData.java | 4 +- .../org/elasticsearch/index/IndexService.java | 5 +- .../elasticsearch/index/IndexSettings.java | 2 +- .../index/mapper/BaseGeoShapeFieldMapper.java | 2 +- .../mapper/LegacyGeoShapeFieldMapper.java | 5 +- .../index/mapper/MapperService.java | 4 +- .../index/mapper/ParseContext.java | 17 +--- .../index/similarity/SimilarityService.java | 28 ++----- .../search/fetch/FetchPhase.java | 38 +++------ .../search/sort/FieldSortBuilder.java | 4 - .../search/sort/GeoDistanceSortBuilder.java | 5 -- .../search/sort/ScriptSortBuilder.java | 5 -- .../java/org/elasticsearch/VersionTests.java | 8 +- .../state/ClusterStateRequestTests.java | 6 +- .../ClusterFormationFailureHelperTests.java | 8 +- .../coordination/JoinTaskExecutorTests.java | 3 +- .../index/IndexSettingsTests.java | 15 +--- .../query/GeoShapeQueryBuilderTests.java | 2 +- .../xpack/ccr/action/ShardFollowTask.java | 2 +- .../action/AutoFollowCoordinatorTests.java | 11 +-- .../TransportResumeFollowActionTests.java | 11 --- .../xpack/core/ccr/AutoFollowMetadata.java | 2 +- .../xpack/core/ccr/AutoFollowStats.java | 15 +--- .../xpack/core/indexing/IndexerJobStats.java | 29 +++---- .../IndexLifecycleMetadata.java | 2 +- .../xpack/core/ml/action/OpenJobAction.java | 8 +- .../core/ml/action/StartDatafeedAction.java | 12 +-- .../core/ml/datafeed/DatafeedConfig.java | 23 +----- .../core/ml/datafeed/DatafeedUpdate.java | 10 +-- .../xpack/core/ml/job/config/JobUpdate.java | 10 +-- .../core/ml/job/results/AnomalyRecord.java | 9 +-- .../xpack/core/ml/job/results/Bucket.java | 9 --- .../rollup/action/StopRollupJobAction.java | 13 +-- .../action/token/CreateTokenRequest.java | 6 -- .../action/token/CreateTokenResponse.java | 9 +-- .../core/security/user/APMSystemUser.java | 2 - .../security/user/RemoteMonitoringUser.java | 3 - .../IndexLifecycleMetadataTests.java | 4 +- .../ml/MlConfigMigrationEligibilityCheck.java | 10 --- .../process/normalizer/NormalizerResult.java | 9 +-- ...lConfigMigrationEligibilityCheckTests.java | 81 ------------------- .../MlMigrationFullClusterRestartIT.java | 53 ------------ 46 files changed, 96 insertions(+), 451 deletions(-) diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 190523a3bc7c6..eb7078b2b189c 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -917,7 +917,7 @@ public void testSoftDeletes() throws Exception { mappingsAndSettings.startObject("settings"); mappingsAndSettings.field("number_of_shards", 1); mappingsAndSettings.field("number_of_replicas", 1); - if (getOldClusterVersion().onOrAfter(Version.V_6_5_0) && randomBoolean()) { + if (randomBoolean()) { mappingsAndSettings.field("soft_deletes.enabled", true); } mappingsAndSettings.endObject(); diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index 49bd5bb3585b6..602a31288ef46 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -356,7 +356,7 @@ public void testRecoveryWithSoftDeletes() throws Exception { // before timing out .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster - if (getNodeId(v -> v.onOrAfter(Version.V_6_5_0)) != null && randomBoolean()) { + if (randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); } createIndex(index, settings.build()); diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 844b963e9aa83..ec79c17a834f1 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -46,22 +46,6 @@ public class Version implements Comparable, ToXContentFragment { */ public static final int V_EMPTY_ID = 0; public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST); - public static final int V_6_5_0_ID = 6050099; - public static final Version V_6_5_0 = new Version(V_6_5_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); - public static final int V_6_5_1_ID = 6050199; - public static final Version V_6_5_1 = new Version(V_6_5_1_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); - public static final int V_6_5_2_ID = 6050299; - public static final Version V_6_5_2 = new Version(V_6_5_2_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); - public static final int V_6_5_3_ID = 6050399; - public static final Version V_6_5_3 = new Version(V_6_5_3_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); - public static final int V_6_5_4_ID = 6050499; - public static final Version V_6_5_4 = new Version(V_6_5_4_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); - public static final int V_6_6_0_ID = 6060099; - public static final Version V_6_6_0 = new Version(V_6_6_0_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); - public static final int V_6_6_1_ID = 6060199; - public static final Version V_6_6_1 = new Version(V_6_6_1_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); - public static final int V_6_6_2_ID = 6060299; - public static final Version V_6_6_2 = new Version(V_6_6_2_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); public static final int V_6_8_0_ID = 6080099; public static final Version V_6_8_0 = new Version(V_6_8_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_6_8_1_ID = 6080199; @@ -116,22 +100,6 @@ public static Version fromId(int id) { return V_6_8_1; case V_6_8_0_ID: return V_6_8_0; - case V_6_6_2_ID: - return V_6_6_2; - case V_6_6_1_ID: - return V_6_6_1; - case V_6_6_0_ID: - return V_6_6_0; - case V_6_5_4_ID: - return V_6_5_4; - case V_6_5_3_ID: - return V_6_5_3; - case V_6_5_2_ID: - return V_6_5_2; - case V_6_5_1_ID: - return V_6_5_1; - case V_6_5_0_ID: - return V_6_5_0; case V_EMPTY_ID: return V_EMPTY; default: diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java index a707a9ae980b7..b5bae4bc3f0d4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; import org.elasticsearch.cluster.coordination.CoordinationState.VoteCollection; @@ -150,8 +149,6 @@ String getDescription() { if (clusterState.getLastAcceptedConfiguration().isEmpty()) { - // TODO handle the case that there is a 6.x node around here, when rolling upgrades are supported - final String bootstrappingDescription; if (INITIAL_MASTER_NODES_SETTING.get(Settings.EMPTY).equals(INITIAL_MASTER_NODES_SETTING.get(settings))) { @@ -164,8 +161,8 @@ String getDescription() { } return String.format(Locale.ROOT, - "master not discovered yet, this node has not previously joined a bootstrapped (v%d+) cluster, and %s: %s", - Version.V_6_6_0.major + 1, bootstrappingDescription, discoveryStateIgnoringQuorum); + "master not discovered yet, this node has not previously joined a bootstrapped cluster, and %s: %s", + bootstrappingDescription, discoveryStateIgnoringQuorum); } assert clusterState.getLastCommittedConfiguration().isEmpty() == false; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 412e16e14a3a5..3598753f80d9c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -1408,10 +1408,10 @@ public static IndexMetaData fromXContent(XContentParser parser) throws IOExcepti throw new IllegalArgumentException("Unexpected token " + token); } } - if (Assertions.ENABLED && Version.indexCreated(builder.settings).onOrAfter(Version.V_6_5_0)) { + if (Assertions.ENABLED) { assert mappingVersion : "mapping version should be present for indices created on or after 6.5.0"; } - if (Assertions.ENABLED && Version.indexCreated(builder.settings).onOrAfter(Version.V_6_5_0)) { + if (Assertions.ENABLED) { assert settingsVersion : "settings version should be present for indices created on or after 6.5.0"; } if (Assertions.ENABLED && Version.indexCreated(builder.settings).onOrAfter(Version.V_7_2_0)) { diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index a7751bc765552..c186a725c82c1 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -26,7 +26,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.Accountable; import org.elasticsearch.Assertions; -import org.elasticsearch.Version; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; @@ -660,9 +659,7 @@ public IndexMetaData getMetaData() { public synchronized void updateMetaData(final IndexMetaData currentIndexMetaData, final IndexMetaData newIndexMetaData) { final boolean updateIndexMetaData = indexSettings.updateIndexMetaData(newIndexMetaData); - if (Assertions.ENABLED - && currentIndexMetaData != null - && currentIndexMetaData.getCreationVersion().onOrAfter(Version.V_6_5_0)) { + if (Assertions.ENABLED && currentIndexMetaData != null) { final long currentSettingsVersion = currentIndexMetaData.getSettingsVersion(); final long newSettingsVersion = newIndexMetaData.getSettingsVersion(); if (currentSettingsVersion == newSettingsVersion) { diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index d4cc38f0b959c..ebcf362b1c9e4 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -458,7 +458,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti generationThresholdSize = scopedSettings.get(INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING); mergeSchedulerConfig = new MergeSchedulerConfig(this); gcDeletesInMillis = scopedSettings.get(INDEX_GC_DELETES_SETTING).getMillis(); - softDeleteEnabled = version.onOrAfter(Version.V_6_5_0) && scopedSettings.get(INDEX_SOFT_DELETES_SETTING); + softDeleteEnabled = scopedSettings.get(INDEX_SOFT_DELETES_SETTING); softDeleteRetentionOperations = scopedSettings.get(INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING); retentionLeaseMillis = scopedSettings.get(INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING).millis(); warmerEnabled = scopedSettings.get(INDEX_WARMER_ENABLED_SETTING); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BaseGeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BaseGeoShapeFieldMapper.java index 74892bf7d516c..20151f301d791 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BaseGeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BaseGeoShapeFieldMapper.java @@ -190,7 +190,7 @@ public Mapper.Builder parse(String name, Map node, ParserContext } } final Builder builder; - if (parsedDeprecatedParams || parserContext.indexVersionCreated().before(Version.V_6_6_0)) { + if (parsedDeprecatedParams) { // Legacy index-based shape builder = new LegacyGeoShapeFieldMapper.Builder(name, deprecatedParameters); } else { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapper.java index 9c8726a498a3c..c4996eab901dd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapper.java @@ -208,8 +208,6 @@ private void setupFieldTypeDeprecatedParameters(BuilderContext context) { } if (deprecatedParameters.tree != null) { ft.setTree(deprecatedParameters.tree); - } else if (context.indexCreatedVersion().before(Version.V_6_6_0)) { - ft.setTree(DeprecatedParameters.PrefixTrees.GEOHASH); } if (deprecatedParameters.treeLevels != null) { ft.setTreeLevels(deprecatedParameters.treeLevels); @@ -527,8 +525,7 @@ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, super.doXContentBody(builder, includeDefaults, params); if (includeDefaults - || (fieldType().tree().equals(indexCreatedVersion.onOrAfter(Version.V_6_6_0) ? - DeprecatedParameters.Defaults.TREE : DeprecatedParameters.PrefixTrees.GEOHASH)) == false) { + || (fieldType().tree().equals(DeprecatedParameters.Defaults.TREE)) == false) { builder.field(DeprecatedParameters.Names.TREE.getPreferredName(), fieldType().tree()); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 487a6ac4789e3..fc33dcafc63b6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -260,9 +260,7 @@ private void assertMappingVersion( final IndexMetaData currentIndexMetaData, final IndexMetaData newIndexMetaData, final Map updatedEntries) { - if (Assertions.ENABLED - && currentIndexMetaData != null - && currentIndexMetaData.getCreationVersion().onOrAfter(Version.V_6_5_0)) { + if (Assertions.ENABLED && currentIndexMetaData != null) { if (currentIndexMetaData.getMappingVersion() == newIndexMetaData.getMappingVersion()) { // if the mapping version is unchanged, then there should not be any updates and all mappings should be the same assert updatedEntries.isEmpty() : updatedEntries; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java b/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java index 4cfd5be2afed3..b2c536a900e33 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java @@ -24,7 +24,6 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.IndexSettings; @@ -458,18 +457,10 @@ public Iterable nonRootDocuments() { void postParse() { if (documents.size() > 1) { docsReversed = true; - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_6_5_0)) { - /** - * For indices created on or after {@link Version#V_6_5_0} we preserve the order - * of the children while ensuring that parents appear after them. - */ - List newDocs = reorderParent(documents); - documents.clear(); - documents.addAll(newDocs); - } else { - // reverse the order of docs for nested docs support, parent should be last - Collections.reverse(documents); - } + // We preserve the order of the children while ensuring that parents appear after them. + List newDocs = reorderParent(documents); + documents.clear(); + documents.addAll(newDocs); } } diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java index 57cbc961aacc0..3fe20a1f3b26e 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java @@ -212,9 +212,8 @@ private static void validateScoresArePositive(Version indexCreatedVersion, Simil for (int freq = 1; freq <= 10; ++freq) { float score = scorer.score(freq, norm); if (score < 0) { - fail(indexCreatedVersion, "Similarities should not return negative scores:\n" + - scorer.explain(Explanation.match(freq, "term freq"), norm)); - break; + throw new IllegalArgumentException("Similarities should not return negative scores:\n" + + scorer.explain(Explanation.match(freq, "term freq"), norm)); } } } @@ -230,10 +229,9 @@ private static void validateScoresDoNotDecreaseWithFreq(Version indexCreatedVers for (int freq = 1; freq <= 10; ++freq) { float score = scorer.score(freq, norm); if (score < previousScore) { - fail(indexCreatedVersion, "Similarity scores should not decrease when term frequency increases:\n" + - scorer.explain(Explanation.match(freq - 1, "term freq"), norm) + "\n" + - scorer.explain(Explanation.match(freq, "term freq"), norm)); - break; + throw new IllegalArgumentException("Similarity scores should not decrease when term frequency increases:\n" + + scorer.explain(Explanation.match(freq - 1, "term freq"), norm) + "\n" + + scorer.explain(Explanation.match(freq, "term freq"), norm)); } previousScore = score; } @@ -256,22 +254,12 @@ private static void validateScoresDoNotIncreaseWithNorm(Version indexCreatedVers } float score = scorer.score(1, norm); if (score > previousScore) { - fail(indexCreatedVersion, "Similarity scores should not increase when norm increases:\n" + - scorer.explain(Explanation.match(1, "term freq"), norm - 1) + "\n" + - scorer.explain(Explanation.match(1, "term freq"), norm)); - break; + throw new IllegalArgumentException("Similarity scores should not increase when norm increases:\n" + + scorer.explain(Explanation.match(1, "term freq"), norm - 1) + "\n" + + scorer.explain(Explanation.match(1, "term freq"), norm)); } previousScore = score; previousNorm = norm; } } - - private static void fail(Version indexCreatedVersion, String message) { - if (indexCreatedVersion.onOrAfter(Version.V_7_0_0)) { - throw new IllegalArgumentException(message); - } else if (indexCreatedVersion.onOrAfter(Version.V_6_5_0)) { - deprecationLogger.deprecated(message); - } - } - } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index c23be0f4cb994..196e74e0bbabd 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -31,7 +31,6 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.BitSet; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.document.DocumentField; @@ -383,32 +382,19 @@ private SearchHit.NestedIdentity getInternalNestedIdentity(SearchContext context BitSet parentBits = context.bitsetFilterCache().getBitSetProducer(parentFilter).getBitSet(subReaderContext); int offset = 0; - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_6_5_0)) { - /** - * Starts from the previous parent and finds the offset of the - * nestedSubDocID within the nested children. Nested documents - * are indexed in the same order than in the source array so the offset - * of the nested child is the number of nested document with the same parent - * that appear before him. - */ - int previousParent = parentBits.prevSetBit(currentParent); - for (int docId = childIter.advance(previousParent + 1); docId < nestedSubDocId && docId != DocIdSetIterator.NO_MORE_DOCS; - docId = childIter.nextDoc()) { - offset++; - } - currentParent = nestedSubDocId; - } else { - /** - * Nested documents are in reverse order in this version so we start from the current nested document - * and find the number of documents with the same parent that appear after it. - */ - int nextParent = parentBits.nextSetBit(currentParent); - for (int docId = childIter.advance(currentParent + 1); docId < nextParent && docId != DocIdSetIterator.NO_MORE_DOCS; - docId = childIter.nextDoc()) { - offset++; - } - currentParent = nextParent; + /** + * Starts from the previous parent and finds the offset of the + * nestedSubDocID within the nested children. Nested documents + * are indexed in the same order than in the source array so the offset + * of the nested child is the number of nested document with the same parent + * that appear before him. + */ + int previousParent = parentBits.prevSetBit(currentParent); + for (int docId = childIter.advance(previousParent + 1); docId < nestedSubDocId && docId != DocIdSetIterator.NO_MORE_DOCS; + docId = childIter.nextDoc()) { + offset++; } + currentParent = nestedSubDocId; current = nestedObjectMapper = nestedParentObjectMapper; int currentPrefix = current == null ? 0 : current.name().length() + 1; nestedIdentity = new SearchHit.NestedIdentity(originalName.substring(currentPrefix), offset, nestedIdentity); diff --git a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index 8abd4b9f40d5c..6598d32bc2ca8 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -397,10 +397,6 @@ public SortFieldAndFormat build(QueryShardContext context) throws IOException { Nested nested = null; if (isUnmapped == false) { if (nestedSort != null) { - if (context.indexVersionCreated().before(Version.V_6_5_0) && nestedSort.getMaxChildren() != Integer.MAX_VALUE) { - throw new QueryShardException(context, - "max_children is only supported on v6.5.0 or higher"); - } if (nestedSort.getNestedSort() != null && nestedSort.getMaxChildren() != Integer.MAX_VALUE) { throw new QueryShardException(context, "max_children is only supported on last level of nested sort"); diff --git a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index 116a54a71103b..73877b3faa309 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -28,7 +28,6 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.util.BitSet; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.geo.GeoDistance; @@ -613,10 +612,6 @@ public SortFieldAndFormat build(QueryShardContext context) throws IOException { final Nested nested; if (nestedSort != null) { - if (context.indexVersionCreated().before(Version.V_6_5_0) && nestedSort.getMaxChildren() != Integer.MAX_VALUE) { - throw new QueryShardException(context, - "max_children is only supported on v6.5.0 or higher"); - } if (nestedSort.getNestedSort() != null && nestedSort.getMaxChildren() != Integer.MAX_VALUE) { throw new QueryShardException(context, "max_children is only supported on last level of nested sort"); diff --git a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index d858f0f7c6ac3..21de011e276b7 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -312,10 +311,6 @@ public SortFieldAndFormat build(QueryShardContext context) throws IOException { final Nested nested; if (nestedSort != null) { - if (context.indexVersionCreated().before(Version.V_6_5_0) && nestedSort.getMaxChildren() != Integer.MAX_VALUE) { - throw new QueryShardException(context, - "max_children is only supported on v6.5.0 or higher"); - } if (nestedSort.getNestedSort() != null && nestedSort.getMaxChildren() != Integer.MAX_VALUE) { throw new QueryShardException(context, "max_children is only supported on last level of nested sort"); diff --git a/server/src/test/java/org/elasticsearch/VersionTests.java b/server/src/test/java/org/elasticsearch/VersionTests.java index e5149b9bce515..eb57964af4dc4 100644 --- a/server/src/test/java/org/elasticsearch/VersionTests.java +++ b/server/src/test/java/org/elasticsearch/VersionTests.java @@ -181,7 +181,7 @@ public void testMinCompatVersion() { assertThat(Version.fromString("5.3.0").minimumCompatibilityVersion(), equalTo(major5x)); Version major56x = Version.fromString("5.6.0"); - assertThat(Version.V_6_5_0.minimumCompatibilityVersion(), equalTo(major56x)); + assertThat(Version.fromString("6.4.0").minimumCompatibilityVersion(), equalTo(major56x)); assertThat(Version.fromString("6.3.1").minimumCompatibilityVersion(), equalTo(major56x)); // from 7.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is @@ -345,10 +345,10 @@ public static void assertUnknownVersion(Version version) { public void testIsCompatible() { assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion())); - assertFalse(isCompatible(Version.V_6_6_0, Version.V_7_0_0)); - assertTrue(isCompatible(Version.V_6_8_0, Version.V_7_0_0)); + assertFalse(isCompatible(Version.fromString("6.7.0"), Version.fromString("7.0.0"))); + assertTrue(isCompatible(Version.fromString("6.8.0"), Version.fromString("7.0.0"))); assertFalse(isCompatible(Version.fromId(2000099), Version.V_7_0_0)); - assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_5_0)); + assertFalse(isCompatible(Version.fromId(2000099), Version.fromString("6.5.0"))); final Version currentMajorVersion = Version.fromId(Version.CURRENT.major * 1000000 + 99); final Version currentOrNextMajorVersion; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java index f2214f4ba81fa..b219e208a6049 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java @@ -67,10 +67,8 @@ public void testSerialization() throws Exception { assertThat(deserializedCSRequest.blocks(), equalTo(clusterStateRequest.blocks())); assertThat(deserializedCSRequest.indices(), equalTo(clusterStateRequest.indices())); assertOptionsMatch(deserializedCSRequest.indicesOptions(), clusterStateRequest.indicesOptions()); - if (testVersion.onOrAfter(Version.V_6_6_0)) { - assertThat(deserializedCSRequest.waitForMetaDataVersion(), equalTo(clusterStateRequest.waitForMetaDataVersion())); - assertThat(deserializedCSRequest.waitForTimeout(), equalTo(clusterStateRequest.waitForTimeout())); - } + assertThat(deserializedCSRequest.waitForMetaDataVersion(), equalTo(clusterStateRequest.waitForMetaDataVersion())); + assertThat(deserializedCSRequest.waitForTimeout(), equalTo(clusterStateRequest.waitForTimeout())); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java index 16740b0761ff5..2f36d315a24e9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java @@ -163,28 +163,28 @@ public void testDescriptionBeforeBootstrapping() { .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId())).build(); assertThat(new ClusterFormationState(Settings.EMPTY, clusterState, emptyList(), emptyList(), 1L).getDescription(), - is("master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and " + + is("master not discovered yet, this node has not previously joined a bootstrapped cluster, and " + "[cluster.initial_master_nodes] is empty on this node: have discovered []; " + "discovery will continue using [] from hosts providers and [" + localNode + "] from last-known cluster state; node term 1, last-accepted version 7 in term 4")); final TransportAddress otherAddress = buildNewFakeTransportAddress(); assertThat(new ClusterFormationState(Settings.EMPTY, clusterState, singletonList(otherAddress), emptyList(), 2L).getDescription(), - is("master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and " + + is("master not discovered yet, this node has not previously joined a bootstrapped cluster, and " + "[cluster.initial_master_nodes] is empty on this node: have discovered []; " + "discovery will continue using [" + otherAddress + "] from hosts providers and [" + localNode + "] from last-known cluster state; node term 2, last-accepted version 7 in term 4")); final DiscoveryNode otherNode = new DiscoveryNode("other", buildNewFakeTransportAddress(), Version.CURRENT); assertThat(new ClusterFormationState(Settings.EMPTY, clusterState, emptyList(), singletonList(otherNode), 3L).getDescription(), - is("master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and " + + is("master not discovered yet, this node has not previously joined a bootstrapped cluster, and " + "[cluster.initial_master_nodes] is empty on this node: have discovered [" + otherNode + "]; " + "discovery will continue using [] from hosts providers and [" + localNode + "] from last-known cluster state; node term 3, last-accepted version 7 in term 4")); assertThat(new ClusterFormationState(Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), "other").build(), clusterState, emptyList(), emptyList(), 4L).getDescription(), - is("master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and " + + is("master not discovered yet, this node has not previously joined a bootstrapped cluster, and " + "this node must discover master-eligible nodes [other] to bootstrap a cluster: have discovered []; " + "discovery will continue using [] from hosts providers and [" + localNode + "] from last-known cluster state; node term 4, last-accepted version 7 in term 4")); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java index f2bb3bd3cc03b..b8168ce3de4a7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; -import static org.elasticsearch.test.VersionUtils.getPreviousVersion; import static org.elasticsearch.test.VersionUtils.maxCompatibleVersion; import static org.elasticsearch.test.VersionUtils.randomCompatibleVersion; import static org.elasticsearch.test.VersionUtils.randomVersionBetween; @@ -76,7 +75,7 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { final Version maxNodeVersion = nodes.getMaxNodeVersion(); final Version minNodeVersion = nodes.getMinNodeVersion(); - final Version tooLow = getPreviousVersion(maxNodeVersion.minimumCompatibilityVersion()); + final Version tooLow = Version.fromId(maxNodeVersion.minimumCompatibilityVersion().id - 100); expectThrows(IllegalStateException.class, () -> { if (randomBoolean()) { JoinTaskExecutor.ensureNodesCompatibility(tooLow, nodes); diff --git a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java index b3e6557b187ae..31a92745a4245 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java @@ -564,17 +564,8 @@ public void testUpdateSoftDeletesFails() { public void testSoftDeletesDefaultSetting() { // enabled by default on 7.0+ or later - { - Version createdVersion = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.CURRENT); - Settings settings = Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), createdVersion).build(); - assertTrue(IndexSettings.INDEX_SOFT_DELETES_SETTING.get(settings)); - } - // disabled by default on the previous versions - { - Version prevVersion = VersionUtils.randomVersionBetween( - random(), Version.V_6_5_0, VersionUtils.getPreviousVersion(Version.V_7_0_0)); - Settings settings = Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), prevVersion).build(); - assertFalse(IndexSettings.INDEX_SOFT_DELETES_SETTING.get(settings)); - } + Version createdVersion = VersionUtils.randomIndexCompatibleVersion(random()); + Settings settings = Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), createdVersion).build(); + assertTrue(IndexSettings.INDEX_SOFT_DELETES_SETTING.get(settings)); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java index 62cc7a43cd2c2..a5311ed157c62 100644 --- a/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java @@ -72,7 +72,7 @@ protected String fieldName() { @Override protected Settings createTestIndexSettings() { // force the new shape impl - Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_6_0, Version.CURRENT); + Version version = VersionUtils.randomIndexCompatibleVersion(random()); return Settings.builder() .put(super.createTestIndexSettings()) .put(IndexMetaData.SETTING_VERSION_CREATED, version) diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java index 5d4564a2030c1..50b32679aee0c 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java @@ -183,6 +183,6 @@ public String toString() { @Override public Version getMinimalSupportedVersion() { - return Version.V_6_5_0; + return Version.CURRENT.minimumCompatibilityVersion(); } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java index 75e6a732c8210..7648d10aa0915 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -813,8 +813,7 @@ public void testAutoFollowerSoftDeletesDisabled() { Client client = mock(Client.class); when(client.getRemoteClusterClient(anyString())).thenReturn(client); - ClusterState remoteState = randomBoolean() ? createRemoteClusterState("logs-20190101", false) : - createRemoteClusterState("logs-20190101", null); + ClusterState remoteState = createRemoteClusterState("logs-20190101", false); AutoFollowPattern autoFollowPattern = new AutoFollowPattern("remote", Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null, null, null, null); @@ -953,13 +952,9 @@ void cleanFollowedRemoteIndices(ClusterState remoteClusterState, List pa assertThat(entries.get(0).getValue(), nullValue()); } - private static ClusterState createRemoteClusterState(String indexName, Boolean enableSoftDeletes) { + private static ClusterState createRemoteClusterState(String indexName, boolean enableSoftDeletes) { Settings.Builder indexSettings; - if (enableSoftDeletes != null) { - indexSettings = settings(Version.CURRENT).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), enableSoftDeletes); - } else { - indexSettings = settings(Version.V_6_6_0); - } + indexSettings = settings(Version.CURRENT).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), enableSoftDeletes); IndexMetaData indexMetaData = IndexMetaData.builder(indexName) .settings(indexSettings) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java index 44f8583bb9b5a..6a4e4d4b6cf0b 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.index.MapperTestUtils; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.ccr.CcrSettings; import org.elasticsearch.xpack.ccr.IndexFollowingIT; @@ -79,16 +78,6 @@ public void testValidation() throws IOException { Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, UUIDs, null)); assertThat(e.getMessage(), equalTo("leader index [index1] does not have soft deletes enabled")); } - { - // should fail because leader index does not have soft deletes enabled (by default). - Version prevVersion = VersionUtils.randomVersionBetween( - random(), Version.V_6_5_0, VersionUtils.getPreviousVersion(Version.V_7_0_0)); - IndexMetaData leaderIMD = IndexMetaData.builder("index1").settings(settings(prevVersion)).numberOfShards(1) - .numberOfReplicas(0).setRoutingNumShards(1).putMapping("_doc", "{\"properties\": {}}").build(); - IndexMetaData followIMD = createIMD("index2", 5, Settings.EMPTY, customMetaData); - Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, UUIDs, null)); - assertThat(e.getMessage(), equalTo("leader index [index1] does not have soft deletes enabled")); - } { // should fail because the follower index does not have soft deletes enabled IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY, null); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java index a8758ed6c2d5a..b06ba584178c0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java @@ -120,7 +120,7 @@ public String getWriteableName() { @Override public Version getMinimalSupportedVersion() { - return Version.V_6_5_0.minimumCompatibilityVersion(); + return Version.CURRENT.minimumCompatibilityVersion(); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java index 6c605ede85e24..879c8d79cd548 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.core.ccr; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; @@ -19,7 +18,6 @@ import java.io.IOException; import java.util.AbstractMap; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.NavigableMap; @@ -118,12 +116,8 @@ public AutoFollowStats(StreamInput in) throws IOException { numberOfSuccessfulFollowIndices = in.readVLong(); // note: the casts to the following Writeable.Reader instances are needed by some IDEs (e.g. Eclipse 4.8) as a compiler help recentAutoFollowErrors = new TreeMap<>(in.readMap((Writeable.Reader) StreamInput::readString, - (Writeable.Reader>) in1 -> new Tuple<>(in1.readZLong(), in1.readException()))); - if (in.getVersion().onOrAfter(Version.V_6_6_0)) { - autoFollowedClusters = new TreeMap<>(in.readMap(StreamInput::readString, AutoFollowedCluster::new)); - } else { - autoFollowedClusters = Collections.emptyNavigableMap(); - } + (Writeable.Reader>) in1 -> new Tuple<>(in1.readZLong(), in1.readException()))); + autoFollowedClusters = new TreeMap<>(in.readMap(StreamInput::readString, AutoFollowedCluster::new)); } @Override @@ -135,10 +129,7 @@ public void writeTo(StreamOutput out) throws IOException { out1.writeZLong(value.v1()); out1.writeException(value.v2()); }); - - if (out.getVersion().onOrAfter(Version.V_6_6_0)) { - out.writeMap(autoFollowedClusters, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); - } + out.writeMap(autoFollowedClusters, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); } public long getNumberOfFailedFollowIndices() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java index 2ef9d242d9ef4..72134b0553b98 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.indexing; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -61,14 +60,12 @@ public IndexerJobStats(StreamInput in) throws IOException { this.numInputDocuments = in.readVLong(); this.numOuputDocuments = in.readVLong(); this.numInvocations = in.readVLong(); - if (in.getVersion().onOrAfter(Version.V_6_6_0)) { - this.indexTime = in.readVLong(); - this.searchTime = in.readVLong(); - this.indexTotal = in.readVLong(); - this.searchTotal = in.readVLong(); - this.indexFailures = in.readVLong(); - this.searchFailures = in.readVLong(); - } + this.indexTime = in.readVLong(); + this.searchTime = in.readVLong(); + this.indexTotal = in.readVLong(); + this.searchTotal = in.readVLong(); + this.indexFailures = in.readVLong(); + this.searchFailures = in.readVLong(); } public long getNumPages() { @@ -163,14 +160,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(numInputDocuments); out.writeVLong(numOuputDocuments); out.writeVLong(numInvocations); - if (out.getVersion().onOrAfter(Version.V_6_6_0)) { - out.writeVLong(indexTime); - out.writeVLong(searchTime); - out.writeVLong(indexTotal); - out.writeVLong(searchTotal); - out.writeVLong(indexFailures); - out.writeVLong(searchFailures); - } + out.writeVLong(indexTime); + out.writeVLong(searchTime); + out.writeVLong(indexTotal); + out.writeVLong(searchTotal); + out.writeVLong(indexFailures); + out.writeVLong(searchFailures); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java index c0cafa8e9079e..161033601ee3f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java @@ -106,7 +106,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public Version getMinimalSupportedVersion() { - return Version.V_6_6_0; + return Version.CURRENT.minimumCompatibilityVersion(); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java index 418ae16d3e632..4468baac6e5c7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java @@ -170,9 +170,7 @@ public JobParams(String jobId) { public JobParams(StreamInput in) throws IOException { jobId = in.readString(); timeout = TimeValue.timeValueMillis(in.readVLong()); - if (in.getVersion().onOrAfter(Version.V_6_6_0)) { - job = in.readOptionalWriteable(Job::new); - } + job = in.readOptionalWriteable(Job::new); } public String getJobId() { @@ -209,9 +207,7 @@ public String getWriteableName() { public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); out.writeVLong(timeout.millis()); - if (out.getVersion().onOrAfter(Version.V_6_6_0)) { - out.writeOptionalWriteable(job); - } + out.writeOptionalWriteable(job); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java index cdfdf06bf85ac..0ba35e6b521e2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java @@ -195,10 +195,8 @@ public DatafeedParams(StreamInput in) throws IOException { startTime = in.readVLong(); endTime = in.readOptionalLong(); timeout = TimeValue.timeValueMillis(in.readVLong()); - if (in.getVersion().onOrAfter(Version.V_6_6_0)) { - jobId = in.readOptionalString(); - datafeedIndices = in.readStringList(); - } + jobId = in.readOptionalString(); + datafeedIndices = in.readStringList(); } DatafeedParams() { @@ -272,10 +270,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(startTime); out.writeOptionalLong(endTime); out.writeVLong(timeout.millis()); - if (out.getVersion().onOrAfter(Version.V_6_6_0)) { - out.writeOptionalString(jobId); - out.writeStringCollection(datafeedIndices); - } + out.writeOptionalString(jobId); + out.writeStringCollection(datafeedIndices); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index f08c4a9d7391d..6889f5199526d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -8,7 +8,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -204,12 +203,6 @@ public DatafeedConfig(StreamInput in) throws IOException { } else { this.indices = null; } - // This consumes the list of types if there was one. - if (in.getVersion().before(Version.V_7_0_0)) { - if (in.readBoolean()) { - in.readStringList(); - } - } // each of these writables are version aware this.queryProvider = QueryProvider.fromStream(in); // This reads a boolean from the stream, if true, it sends the stream to the `fromStream` method @@ -223,11 +216,7 @@ public DatafeedConfig(StreamInput in) throws IOException { this.scrollSize = in.readOptionalVInt(); this.chunkingConfig = in.readOptionalWriteable(ChunkingConfig::new); this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); - if (in.getVersion().onOrAfter(Version.V_6_6_0)) { - delayedDataCheckConfig = in.readOptionalWriteable(DelayedDataCheckConfig::new); - } else { - delayedDataCheckConfig = DelayedDataCheckConfig.defaultDelayedDataCheckConfig(); - } + delayedDataCheckConfig = in.readOptionalWriteable(DelayedDataCheckConfig::new); } /** @@ -408,12 +397,6 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - // Write the now removed types to prior versions. - // An empty list is expected - if (out.getVersion().before(Version.V_7_0_0)) { - out.writeBoolean(true); - out.writeStringCollection(Collections.emptyList()); - } // Each of these writables are version aware queryProvider.writeTo(out); // never null @@ -429,9 +412,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVInt(scrollSize); out.writeOptionalWriteable(chunkingConfig); out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); - if (out.getVersion().onOrAfter(Version.V_6_6_0)) { - out.writeOptionalWriteable(delayedDataCheckConfig); - } + out.writeOptionalWriteable(delayedDataCheckConfig); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java index ccbb516197217..c1005bb971a56 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java @@ -134,11 +134,7 @@ public DatafeedUpdate(StreamInput in) throws IOException { } this.scrollSize = in.readOptionalVInt(); this.chunkingConfig = in.readOptionalWriteable(ChunkingConfig::new); - if (in.getVersion().onOrAfter(Version.V_6_6_0)) { - delayedDataCheckConfig = in.readOptionalWriteable(DelayedDataCheckConfig::new); - } else { - delayedDataCheckConfig = null; - } + delayedDataCheckConfig = in.readOptionalWriteable(DelayedDataCheckConfig::new); } /** @@ -181,9 +177,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalVInt(scrollSize); out.writeOptionalWriteable(chunkingConfig); - if (out.getVersion().onOrAfter(Version.V_6_6_0)) { - out.writeOptionalWriteable(delayedDataCheckConfig); - } + out.writeOptionalWriteable(delayedDataCheckConfig); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java index b50b7d2fa5126..c1058e9c0639e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java @@ -132,11 +132,7 @@ public JobUpdate(StreamInput in) throws IOException { } else { jobVersion = null; } - if (in.getVersion().onOrAfter(Version.V_6_6_0)) { - clearJobFinishTime = in.readOptionalBoolean(); - } else { - clearJobFinishTime = null; - } + clearJobFinishTime = in.readOptionalBoolean(); if (in.getVersion().onOrAfter(Version.V_7_0_0) && in.readBoolean()) { modelSnapshotMinVersion = Version.readVersion(in); } else { @@ -172,9 +168,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - if (out.getVersion().onOrAfter(Version.V_6_6_0)) { - out.writeOptionalBoolean(clearJobFinishTime); - } + out.writeOptionalBoolean(clearJobFinishTime); if (out.getVersion().onOrAfter(Version.V_7_0_0)) { if (modelSnapshotMinVersion != null) { out.writeBoolean(true); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java index 3c099e309243b..5026c387e0870 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java @@ -18,7 +18,6 @@ import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; -import org.elasticsearch.Version; import java.io.IOException; import java.util.ArrayList; @@ -160,9 +159,7 @@ public AnomalyRecord(StreamInput in) throws IOException { jobId = in.readString(); detectorIndex = in.readInt(); probability = in.readDouble(); - if (in.getVersion().onOrAfter(Version.V_6_5_0)) { - multiBucketImpact = in.readOptionalDouble(); - } + multiBucketImpact = in.readOptionalDouble(); byFieldName = in.readOptionalString(); byFieldValue = in.readOptionalString(); correlatedByFieldValue = in.readOptionalString(); @@ -197,9 +194,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); out.writeInt(detectorIndex); out.writeDouble(probability); - if (out.getVersion().onOrAfter(Version.V_6_5_0)) { - out.writeOptionalDouble(multiBucketImpact); - } + out.writeOptionalDouble(multiBucketImpact); out.writeOptionalString(byFieldName); out.writeOptionalString(byFieldValue); out.writeOptionalString(correlatedByFieldValue); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java index 8e04e001ed6cd..e34bc648cadae 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.results; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -134,10 +133,6 @@ public Bucket(StreamInput in) throws IOException { isInterim = in.readBoolean(); bucketInfluencers = in.readList(BucketInfluencer::new); processingTimeMs = in.readLong(); - // bwc for perPartitionNormalization - if (in.getVersion().before(Version.V_6_5_0)) { - in.readList(Bucket::readOldPerPartitionNormalization); - } scheduledEvents = in.readStringList(); if (scheduledEvents.isEmpty()) { scheduledEvents = Collections.emptyList(); @@ -156,10 +151,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isInterim); out.writeList(bucketInfluencers); out.writeLong(processingTimeMs); - // bwc for perPartitionNormalization - if (out.getVersion().before(Version.V_6_5_0)) { - out.writeList(Collections.emptyList()); - } out.writeStringCollection(scheduledEvents); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java index 6fc079e0328c9..6b4a743ef2b3f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.rollup.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; @@ -70,20 +69,16 @@ public Request() {} public Request(StreamInput in) throws IOException { super(in); id = in.readString(); - if (in.getVersion().onOrAfter(Version.V_6_6_0)) { - waitForCompletion = in.readBoolean(); - timeout = in.readTimeValue(); - } + waitForCompletion = in.readBoolean(); + timeout = in.readTimeValue(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(id); - if (out.getVersion().onOrAfter(Version.V_6_6_0)) { - out.writeBoolean(waitForCompletion); - out.writeTimeValue(timeout); - } + out.writeBoolean(waitForCompletion); + out.writeTimeValue(timeout); } public String getId() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java index 3fdfaab060542..edbfce3d17aa6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.security.action.token; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Nullable; @@ -186,11 +185,6 @@ public String getRefreshToken() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_6_5_0) && GrantType.CLIENT_CREDENTIALS.getValue().equals(grantType)) { - throw new IllegalArgumentException("a request with the client_credentials grant_type cannot be sent to version [" + - out.getVersion() + "]"); - } - out.writeString(grantType); out.writeOptionalString(username); if (password == null) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java index 93ddc56459677..f3b094b1fd141 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.security.action.token; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -59,9 +58,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(tokenString); out.writeTimeValue(expiresIn); out.writeOptionalString(scope); - if (out.getVersion().onOrAfter(Version.V_6_5_0)) { - out.writeOptionalString(refreshToken); - } + out.writeOptionalString(refreshToken); } @Override @@ -70,9 +67,7 @@ public void readFrom(StreamInput in) throws IOException { tokenString = in.readString(); expiresIn = in.readTimeValue(); scope = in.readOptionalString(); - if (in.getVersion().onOrAfter(Version.V_6_5_0)) { - refreshToken = in.readOptionalString(); - } + refreshToken = in.readOptionalString(); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/APMSystemUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/APMSystemUser.java index a63c3b0dc8ca1..e4fa627fb7ab4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/APMSystemUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/APMSystemUser.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.security.user; -import org.elasticsearch.Version; import org.elasticsearch.xpack.core.security.support.MetadataUtils; /** @@ -15,7 +14,6 @@ public class APMSystemUser extends User { public static final String NAME = UsernamesField.APM_NAME; public static final String ROLE_NAME = UsernamesField.APM_ROLE; - public static final Version DEFINED_SINCE = Version.V_6_5_0; public APMSystemUser(boolean enabled) { super(NAME, new String[]{ ROLE_NAME }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, enabled); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/RemoteMonitoringUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/RemoteMonitoringUser.java index ad51c575d72d9..edfa9a8f5b8ca 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/RemoteMonitoringUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/RemoteMonitoringUser.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.security.user; -import org.elasticsearch.Version; import org.elasticsearch.xpack.core.security.support.MetadataUtils; /** @@ -17,8 +16,6 @@ public class RemoteMonitoringUser extends User { public static final String COLLECTION_ROLE_NAME = UsernamesField.REMOTE_MONITORING_COLLECTION_ROLE; public static final String INDEXING_ROLE_NAME = UsernamesField.REMOTE_MONITORING_INDEXING_ROLE; - public static final Version DEFINED_SINCE = Version.V_6_5_0; - public RemoteMonitoringUser(boolean enabled) { super(NAME, new String[]{ COLLECTION_ROLE_NAME, INDEXING_ROLE_NAME }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, enabled); } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java index 2444cbf99fd52..65f8399a6bd76 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractDiffableSerializationTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.indexlifecycle.AllocateAction; import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; import org.elasticsearch.xpack.core.indexlifecycle.ForceMergeAction; @@ -137,7 +138,8 @@ protected Reader> diffReader() { } public void testMinimumSupportedVersion() { - assertEquals(Version.V_6_6_0, createTestInstance().getMinimalSupportedVersion()); + Version min = createTestInstance().getMinimalSupportedVersion(); + assertTrue(min.onOrBefore(VersionUtils.randomCompatibleVersion(random(), Version.CURRENT))); } public void testcontext() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheck.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheck.java index daa143ec01977..1c1a28dc8943e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheck.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheck.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.ml; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.service.ClusterService; @@ -23,8 +22,6 @@ */ public class MlConfigMigrationEligibilityCheck { - private static final Version MIN_NODE_VERSION = Version.V_6_6_0; - public static final Setting ENABLE_CONFIG_MIGRATION = Setting.boolSetting( "xpack.ml.enable_config_migration", true, Setting.Property.Dynamic, Setting.Property.NodeScope); @@ -43,7 +40,6 @@ private void setConfigMigrationEnabled(boolean configMigrationEnabled) { /** * Can migration start? Returns: * False if config migration is disabled via the setting {@link #ENABLE_CONFIG_MIGRATION} - * False if the min node version of the cluster is before {@link #MIN_NODE_VERSION} * False if the .ml-config index shards are not active * True otherwise * @param clusterState The cluster state @@ -53,12 +49,6 @@ public boolean canStartMigration(ClusterState clusterState) { if (isConfigMigrationEnabled == false) { return false; } - - Version minNodeVersion = clusterState.nodes().getMinNodeVersion(); - if (minNodeVersion.before(MIN_NODE_VERSION)) { - return false; - } - return mlConfigIndexIsAllocated(clusterState); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerResult.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerResult.java index 269792dbe7797..43f87f33ed7a4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerResult.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerResult.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.ml.job.process.normalizer; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -66,9 +65,7 @@ public NormalizerResult(StreamInput in) throws IOException { partitionFieldName = in.readOptionalString(); partitionFieldValue = in.readOptionalString(); personFieldName = in.readOptionalString(); - if (in.getVersion().onOrAfter(Version.V_6_5_0)) { - personFieldValue = in.readOptionalString(); - } + personFieldValue = in.readOptionalString(); functionName = in.readOptionalString(); valueFieldName = in.readOptionalString(); probability = in.readDouble(); @@ -81,9 +78,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(partitionFieldName); out.writeOptionalString(partitionFieldValue); out.writeOptionalString(personFieldName); - if (out.getVersion().onOrAfter(Version.V_6_5_0)) { - out.writeOptionalString(personFieldValue); - } + out.writeOptionalString(personFieldValue); out.writeOptionalString(functionName); out.writeOptionalString(valueFieldName); out.writeDouble(probability); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheckTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheckTests.java index 4d07a93e1019c..de8eb5bb2eb63 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheckTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheckTests.java @@ -10,8 +10,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RecoverySource; @@ -21,7 +19,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; @@ -38,7 +35,6 @@ import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.junit.Before; -import java.net.InetAddress; import java.util.Collections; import java.util.HashSet; @@ -70,50 +66,6 @@ public void testCanStartMigration_givenMigrationIsDisabled() { assertFalse(check.canStartMigration(clusterState)); } - public void testCanStartMigration_givenNodesNotUpToVersion() { - MetaData.Builder metaData = MetaData.builder(); - RoutingTable.Builder routingTable = RoutingTable.builder(); - addMlConfigIndex(metaData, routingTable); - - // mixed 6.5 and 6.6 nodes - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .nodes(DiscoveryNodes.builder() - .add(new DiscoveryNode("node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.V_6_5_0)) - .add(new DiscoveryNode("node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), Version.V_6_6_0))) - .routingTable(routingTable.build()) - .metaData(metaData) - .build(); - - Settings settings = newSettings(true); - givenClusterSettings(settings); - - MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); - - assertFalse(check.canStartMigration(clusterState)); - } - - public void testCanStartMigration_givenNodesNotUpToVersionAndMigrationIsEnabled() { - MetaData.Builder metaData = MetaData.builder(); - RoutingTable.Builder routingTable = RoutingTable.builder(); - addMlConfigIndex(metaData, routingTable); - - // mixed 6.5 and 6.6 nodes - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .nodes(DiscoveryNodes.builder() - .add(new DiscoveryNode("node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.V_6_6_0)) - .add(new DiscoveryNode("node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), Version.V_6_6_0))) - .routingTable(routingTable.build()) - .metaData(metaData) - .build(); - - Settings settings = newSettings(true); - givenClusterSettings(settings); - - MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); - - assertTrue(check.canStartMigration(clusterState)); - } - public void testCanStartMigration_givenMissingIndex() { Settings settings = newSettings(true); givenClusterSettings(settings); @@ -159,23 +111,6 @@ private void addMlConfigIndex(MetaData.Builder metaData, RoutingTable.Builder ro .addIndexShard(new IndexShardRoutingTable.Builder(shardId).addShard(shardRouting).build())); } - - public void testJobIsEligibleForMigration_givenNodesNotUpToVersion() { - // mixed 6.5 and 6.6 nodes - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .nodes(DiscoveryNodes.builder() - .add(new DiscoveryNode("node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.V_6_5_0)) - .add(new DiscoveryNode("node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), Version.V_6_6_0))) - .build(); - - Settings settings = newSettings(true); - givenClusterSettings(settings); - - MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); - - assertFalse(check.jobIsEligibleForMigration("pre-min-version", clusterState)); - } - public void testJobIsEligibleForMigration_givenJobNotInClusterState() { ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests")).build(); @@ -306,22 +241,6 @@ public void testJobIsEligibleForMigration_givenOpenAndUnallocatedJob() { assertTrue(check.jobIsEligibleForMigration(openJob.getId(), clusterState)); } - public void testDatafeedIsEligibleForMigration_givenNodesNotUpToVersion() { - // mixed 6.5 and 6.6 nodes - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .nodes(DiscoveryNodes.builder() - .add(new DiscoveryNode("node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.V_6_5_0)) - .add(new DiscoveryNode("node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), Version.V_6_6_0))) - .build(); - - Settings settings = newSettings(true); - givenClusterSettings(settings); - - MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); - - assertFalse(check.datafeedIsEligibleForMigration("pre-min-version", clusterState)); - } - public void testDatafeedIsEligibleForMigration_givenDatafeedNotInClusterState() { ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests")).build(); Settings settings = newSettings(true); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java index 3c849811f5620..14f5d66a0fff2 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.restart; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; @@ -30,12 +29,10 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; -import java.util.Arrays; import java.util.Base64; import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.isEmptyOrNullString; @@ -128,10 +125,6 @@ private void oldClusterTests() throws IOException { } private void upgradedClusterTests() throws Exception { - // wait for the closed and open jobs and datafeed to be migrated - waitForMigration(Arrays.asList(OLD_CLUSTER_CLOSED_JOB_ID, OLD_CLUSTER_OPEN_JOB_ID), - Arrays.asList(OLD_CLUSTER_STOPPED_DATAFEED_ID, OLD_CLUSTER_STARTED_DATAFEED_ID)); - waitForJobToBeAssigned(OLD_CLUSTER_OPEN_JOB_ID); waitForDatafeedToBeAssigned(OLD_CLUSTER_STARTED_DATAFEED_ID); // The persistent task params for the job & datafeed left open @@ -181,40 +174,6 @@ private void waitForDatafeedToBeAssigned(String datafeedId) throws Exception { }, 30, TimeUnit.SECONDS); } - @SuppressWarnings("unchecked") - private void waitForMigration(List expectedMigratedJobs, List expectedMigratedDatafeeds) throws Exception { - - // After v6.6.0 jobs are created in the index so no migration will take place - if (getOldClusterVersion().onOrAfter(Version.V_6_6_0)) { - return; - } - - assertBusy(() -> { - // wait for the eligible configs to be moved from the clusterstate - Request getClusterState = new Request("GET", "/_cluster/state/metadata"); - Response response = client().performRequest(getClusterState); - Map responseMap = entityAsMap(response); - - List> jobs = - (List>) XContentMapValues.extractValue("metadata.ml.jobs", responseMap); - - if (jobs != null) { - for (String jobId : expectedMigratedJobs) { - assertJobNotPresent(jobId, jobs); - } - } - - List> datafeeds = - (List>) XContentMapValues.extractValue("metadata.ml.datafeeds", responseMap); - - if (datafeeds != null) { - for (String datafeedId : expectedMigratedDatafeeds) { - assertDatafeedNotPresent(datafeedId, datafeeds); - } - } - }, 30, TimeUnit.SECONDS); - } - @SuppressWarnings("unchecked") private void checkTaskParamsAreUpdated(String jobId, String datafeedId) throws Exception { Request getClusterState = new Request("GET", "/_cluster/state/metadata"); @@ -240,18 +199,6 @@ else if (id.equals(MlTasks.datafeedTaskId(datafeedId))) { } } - private void assertDatafeedNotPresent(String datafeedId, List> datafeeds) { - Optional config = datafeeds.stream().map(map -> map.get("datafeed_id")) - .filter(id -> id.equals(datafeedId)).findFirst(); - assertFalse(config.isPresent()); - } - - private void assertJobNotPresent(String jobId, List> jobs) { - Optional config = jobs.stream().map(map -> map.get("job_id")) - .filter(id -> id.equals(jobId)).findFirst(); - assertFalse(config.isPresent()); - } - private void addAggregations(DatafeedConfig.Builder dfBuilder) { TermsAggregationBuilder airline = AggregationBuilders.terms("airline"); MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time").subAggregation(airline); From b0607ce3440013fbd63d565e569a271101ef73e5 Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Fri, 31 May 2019 14:27:04 +1000 Subject: [PATCH 208/224] Log the status of security on license change (#42488) Whether security is enabled/disabled is dependent on the combination of the node settings and the cluster license. This commit adds a license state listener that logs when the license change causes security to switch state (or to be initialised). This is primarily useful for diagnosing cluster formation issues. --- .../elasticsearch/test/MockLogAppender.java | 2 +- .../xpack/security/Security.java | 2 + .../support/SecurityStatusChangeListener.java | 45 +++++++ .../SecurityStatusChangeListenerTests.java | 115 ++++++++++++++++++ 4 files changed, 163 insertions(+), 1 deletion(-) create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityStatusChangeListener.java create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityStatusChangeListenerTests.java diff --git a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java b/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java index c6a5d77faf5f3..e9c53ed896765 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java +++ b/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java @@ -117,7 +117,7 @@ public UnseenEventExpectation(String name, String logger, Level level, String me @Override public void assertMatched() { - assertThat("expected to see " + name + " but did not", saw, equalTo(false)); + assertThat("expected not to see " + name + " but did", saw, equalTo(false)); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index c7ada6e79a9ac..52d40d86d53bc 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -226,6 +226,7 @@ import org.elasticsearch.xpack.security.rest.action.user.RestPutUserAction; import org.elasticsearch.xpack.security.rest.action.user.RestSetEnabledAction; import org.elasticsearch.xpack.security.support.SecurityIndexManager; +import org.elasticsearch.xpack.security.support.SecurityStatusChangeListener; import org.elasticsearch.xpack.security.transport.SecurityHttpSettings; import org.elasticsearch.xpack.security.transport.SecurityServerTransportInterceptor; import org.elasticsearch.xpack.security.transport.filter.IPFilter; @@ -446,6 +447,7 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste // to keep things simple, just invalidate all cached entries on license change. this happens so rarely that the impact should be // minimal getLicenseState().addListener(allRolesStore::invalidateAll); + getLicenseState().addListener(new SecurityStatusChangeListener(getLicenseState())); final AuthenticationFailureHandler failureHandler = createAuthenticationFailureHandler(realms); authcService.set(new AuthenticationService(settings, realms, auditTrailService, failureHandler, threadPool, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityStatusChangeListener.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityStatusChangeListener.java new file mode 100644 index 0000000000000..ddc41561afabd --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityStatusChangeListener.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.support; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.license.LicenseStateListener; +import org.elasticsearch.license.XPackLicenseState; + +import java.util.Objects; + +/** + * A listener for license state changes that provides log messages when a license change + * causes security to switch between enable and disabled (or vice versa). + */ +public class SecurityStatusChangeListener implements LicenseStateListener { + + private final Logger logger; + private final XPackLicenseState licenseState; + private Boolean securityEnabled; + + public SecurityStatusChangeListener(XPackLicenseState licenseState) { + this.logger = LogManager.getLogger(getClass()); + this.licenseState = licenseState; + this.securityEnabled = null; + } + + /** + * This listener will not be registered if security has been explicitly disabled, so we only need to account for dynamic changes due + * to changes in the applied license. + */ + @Override + public synchronized void licenseStateChanged() { + final boolean newState = licenseState.isSecurityAvailable() && licenseState.isSecurityDisabledByLicenseDefaults() == false; + // old state might be null (undefined) so do Object comparison + if (Objects.equals(newState, securityEnabled) == false) { + logger.info("Active license is now [{}]; Security is {}", licenseState.getOperationMode(), newState ? "enabled" : "disabled"); + this.securityEnabled = newState; + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityStatusChangeListenerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityStatusChangeListenerTests.java new file mode 100644 index 0000000000000..da18d5dc902d4 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityStatusChangeListenerTests.java @@ -0,0 +1,115 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.support; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.license.License; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; +import org.junit.After; +import org.junit.Before; +import org.mockito.Mockito; + +import static org.mockito.Mockito.when; + +public class SecurityStatusChangeListenerTests extends ESTestCase { + + private XPackLicenseState licenseState; + private SecurityStatusChangeListener listener; + private MockLogAppender logAppender; + private Logger listenerLogger; + + @Before + public void setup() throws IllegalAccessException { + licenseState = Mockito.mock(XPackLicenseState.class); + when(licenseState.isSecurityAvailable()).thenReturn(true); + + listener = new SecurityStatusChangeListener(licenseState); + + logAppender = new MockLogAppender(); + logAppender.start(); + listenerLogger = LogManager.getLogger(listener.getClass()); + Loggers.addAppender(listenerLogger, logAppender); + } + + @After + public void cleanup() { + Loggers.removeAppender(listenerLogger, logAppender); + logAppender.stop(); + } + + public void testSecurityEnabledToDisabled() { + when(licenseState.isSecurityDisabledByLicenseDefaults()).thenReturn(false); + + when(licenseState.getOperationMode()).thenReturn(License.OperationMode.GOLD); + logAppender.addExpectation(new MockLogAppender.SeenEventExpectation( + "initial change", + listener.getClass().getName(), + Level.INFO, + "Active license is now [GOLD]; Security is enabled" + )); + listener.licenseStateChanged(); + + when(licenseState.getOperationMode()).thenReturn(License.OperationMode.PLATINUM); + logAppender.addExpectation(new MockLogAppender.UnseenEventExpectation( + "no-op change", + listener.getClass().getName(), + Level.INFO, + "Active license is now [PLATINUM]; Security is enabled" + )); + + when(licenseState.isSecurityDisabledByLicenseDefaults()).thenReturn(true); + when(licenseState.getOperationMode()).thenReturn(License.OperationMode.BASIC); + logAppender.addExpectation(new MockLogAppender.SeenEventExpectation( + "change to basic", + listener.getClass().getName(), + Level.INFO, + "Active license is now [BASIC]; Security is disabled" + )); + listener.licenseStateChanged(); + + logAppender.assertAllExpectationsMatched(); + } + + public void testSecurityDisabledToEnabled() { + when(licenseState.isSecurityDisabledByLicenseDefaults()).thenReturn(true); + + when(licenseState.getOperationMode()).thenReturn(License.OperationMode.TRIAL); + logAppender.addExpectation(new MockLogAppender.SeenEventExpectation( + "initial change", + listener.getClass().getName(), + Level.INFO, + "Active license is now [TRIAL]; Security is disabled" + )); + listener.licenseStateChanged(); + + when(licenseState.getOperationMode()).thenReturn(License.OperationMode.BASIC); + logAppender.addExpectation(new MockLogAppender.UnseenEventExpectation( + "no-op change", + listener.getClass().getName(), + Level.INFO, + "Active license is now [BASIC]; Security is disabled" + )); + + when(licenseState.isSecurityDisabledByLicenseDefaults()).thenReturn(false); + when(licenseState.getOperationMode()).thenReturn(License.OperationMode.PLATINUM); + logAppender.addExpectation(new MockLogAppender.SeenEventExpectation( + "change to platinum", + listener.getClass().getName(), + Level.INFO, + "Active license is now [PLATINUM]; Security is enabled" + )); + listener.licenseStateChanged(); + + logAppender.assertAllExpectationsMatched(); + } + +} From be8020ae9908f61ad4571bd22d5642d2ab8d8968 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 30 May 2019 22:29:58 -0700 Subject: [PATCH 209/224] Remove leftover transport module docs (#42734) This commit removes docs for alternate transport implementations which were removed years ago. These were missed because they have redirects masking their existsence. --- docs/reference/modules/memcached.asciidoc | 69 ----------------------- docs/reference/modules/thrift.asciidoc | 25 -------- 2 files changed, 94 deletions(-) delete mode 100644 docs/reference/modules/memcached.asciidoc delete mode 100644 docs/reference/modules/thrift.asciidoc diff --git a/docs/reference/modules/memcached.asciidoc b/docs/reference/modules/memcached.asciidoc deleted file mode 100644 index 508d328671b26..0000000000000 --- a/docs/reference/modules/memcached.asciidoc +++ /dev/null @@ -1,69 +0,0 @@ -[[modules-memcached]] -== memcached - -The memcached module allows to expose *Elasticsearch* -APIs over the memcached protocol (as closely -as possible). - -It is provided as a plugin called `transport-memcached` and installing -is explained -https://github.com/elastic/elasticsearch-transport-memcached[here] -. Another option is to download the memcached plugin and placing it -under the `plugins` directory. - -The memcached protocol supports both the binary and the text protocol, -automatically detecting the correct one to use. - -[float] -=== Mapping REST to Memcached Protocol - -Memcached commands are mapped to REST and handled by the same generic -REST layer in Elasticsearch. Here is a list of the memcached commands -supported: - -[float] -==== GET - -The memcached `GET` command maps to a REST `GET`. The key used is the -URI (with parameters). The main downside is the fact that the memcached -`GET` does not allow body in the request (and `SET` does not allow to -return a result...). For this reason, most REST APIs (like search) allow -to accept the "source" as a URI parameter as well. - -[float] -==== SET - -The memcached `SET` command maps to a REST `POST`. The key used is the -URI (with parameters), and the body maps to the REST body. - -[float] -==== DELETE - -The memcached `DELETE` command maps to a REST `DELETE`. The key used is -the URI (with parameters). - -[float] -==== QUIT - -The memcached `QUIT` command is supported and disconnects the client. - -[float] -=== Settings - -The following are the settings the can be configured for memcached: - -[cols="<,<",options="header",] -|=============================================================== -|Setting |Description -|`memcached.port` |A bind port range. Defaults to `11211-11311`. -|=============================================================== - -It also uses the common -<>. - -[float] -=== Disable memcached - -The memcached module can be completely disabled and not started using by -setting `memcached.enabled` to `false`. By default it is enabled once it -is detected as a plugin. diff --git a/docs/reference/modules/thrift.asciidoc b/docs/reference/modules/thrift.asciidoc deleted file mode 100644 index 1ea3f81812693..0000000000000 --- a/docs/reference/modules/thrift.asciidoc +++ /dev/null @@ -1,25 +0,0 @@ -[[modules-thrift]] -== Thrift - -The https://thrift.apache.org/[thrift] transport module allows to expose the REST interface of -Elasticsearch using thrift. Thrift should provide better performance -over http. Since thrift provides both the wire protocol and the -transport, it should make using Elasticsearch more efficient (though it has limited -documentation). - -Using thrift requires installing the `transport-thrift` plugin, located -https://github.com/elastic/elasticsearch-transport-thrift[here]. - -The thrift -https://github.com/elastic/elasticsearch-transport-thrift/blob/master/elasticsearch.thrift[schema] -can be used to generate thrift clients. - -[cols="<,<",options="header",] -|======================================================================= -|Setting |Description -|`thrift.port` |The port to bind to. Defaults to 9500-9600 - -|`thrift.frame` |Defaults to `-1`, which means no framing. Set to a -higher value to specify the frame size (like `15mb`). -|======================================================================= - From 053e1543e9c3702cf3c423744ee9db999991bb6a Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Fri, 31 May 2019 11:23:36 +0100 Subject: [PATCH 210/224] Add option to ObjectParser to consume unknown fields (#42491) ObjectParser has two ways of dealing with unknown fields: ignore them entirely, or throw an error. Sometimes it can be useful instead to gather up these unknown fields and record them separately, for example as arbitrary entries in a map. This commit adds the ability to specify an unknown field consumer on an ObjectParser, called with the field name and parsed value of each unknown field encountered during parsing. The public API of ObjectParser is largely unchanged, with a single new constructor method and interface definition. --- .../common/xcontent/ObjectParser.java | 103 ++++++++++++++---- .../common/xcontent/ObjectParserTests.java | 39 +++++++ 2 files changed, 121 insertions(+), 21 deletions(-) diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java index ee5e3347f8d99..c80c5bdb0d09a 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java @@ -78,14 +78,63 @@ public static BiConsumer> fromLi }; } + private interface UnknownFieldParser { + + void acceptUnknownField(String parserName, String field, XContentLocation location, XContentParser parser, + Value value, Context context) throws IOException; + } + + private static UnknownFieldParser ignoreUnknown() { + return (n, f, l, p, v, c) -> p.skipChildren(); + } + + private static UnknownFieldParser errorOnUnknown() { + return (n, f, l, p, v, c) -> { + throw new XContentParseException(l, "[" + n + "] unknown field [" + f + "], parser not found"); + }; + } + + /** + * Defines how to consume a parsed undefined field + */ + public interface UnknownFieldConsumer { + void accept(Value target, String field, Object value); + } + + private static UnknownFieldParser consumeUnknownField(UnknownFieldConsumer consumer) { + return (parserName, field, location, parser, value, context) -> { + XContentParser.Token t = parser.currentToken(); + switch (t) { + case VALUE_STRING: + consumer.accept(value, field, parser.text()); + break; + case VALUE_NUMBER: + consumer.accept(value, field, parser.numberValue()); + break; + case VALUE_BOOLEAN: + consumer.accept(value, field, parser.booleanValue()); + break; + case VALUE_NULL: + consumer.accept(value, field, null); + break; + case START_OBJECT: + consumer.accept(value, field, parser.map()); + break; + case START_ARRAY: + consumer.accept(value, field, parser.list()); + break; + default: + throw new XContentParseException(parser.getTokenLocation(), + "[" + parserName + "] cannot parse field [" + field + "] with value type [" + t + "]"); + } + }; + } + private final Map fieldParserMap = new HashMap<>(); private final String name; private final Supplier valueSupplier; - /** - * Should this parser ignore unknown fields? This should generally be set to true only when parsing responses from external systems, - * never when parsing requests from users. - */ - private final boolean ignoreUnknownFields; + + private final UnknownFieldParser unknownFieldParser; /** * Creates a new ObjectParser instance with a name. This name is used to reference the parser in exceptions and messages. @@ -95,25 +144,45 @@ public ObjectParser(String name) { } /** - * Creates a new ObjectParser instance which a name. + * Creates a new ObjectParser instance with a name. * @param name the parsers name, used to reference the parser in exceptions and messages. * @param valueSupplier a supplier that creates a new Value instance used when the parser is used as an inner object parser. */ public ObjectParser(String name, @Nullable Supplier valueSupplier) { - this(name, false, valueSupplier); + this(name, errorOnUnknown(), valueSupplier); } /** - * Creates a new ObjectParser instance which a name. + * Creates a new ObjectParser instance with a name. * @param name the parsers name, used to reference the parser in exceptions and messages. * @param ignoreUnknownFields Should this parser ignore unknown fields? This should generally be set to true only when parsing * responses from external systems, never when parsing requests from users. * @param valueSupplier a supplier that creates a new Value instance used when the parser is used as an inner object parser. */ public ObjectParser(String name, boolean ignoreUnknownFields, @Nullable Supplier valueSupplier) { + this(name, ignoreUnknownFields ? ignoreUnknown() : errorOnUnknown(), valueSupplier); + } + + /** + * Creates a new ObjectParser instance with a name. + * @param name the parsers name, used to reference the parser in exceptions and messages. + * @param unknownFieldConsumer how to consume parsed unknown fields + * @param valueSupplier a supplier that creates a new Value instance used when the parser is used as an inner object parser. + */ + public ObjectParser(String name, UnknownFieldConsumer unknownFieldConsumer, @Nullable Supplier valueSupplier) { + this(name, consumeUnknownField(unknownFieldConsumer), valueSupplier); + } + + /** + * Creates a new ObjectParser instance with a name. + * @param name the parsers name, used to reference the parser in exceptions and messages. + * @param unknownFieldParser how to parse unknown fields + * @param valueSupplier a supplier that creates a new Value instance used when the parser is used as an inner object parser. + */ + private ObjectParser(String name, UnknownFieldParser unknownFieldParser, @Nullable Supplier valueSupplier) { this.name = name; this.valueSupplier = valueSupplier; - this.ignoreUnknownFields = ignoreUnknownFields; + this.unknownFieldParser = unknownFieldParser; } /** @@ -152,17 +221,18 @@ public Value parse(XContentParser parser, Value value, Context context) throws I FieldParser fieldParser = null; String currentFieldName = null; + XContentLocation currentPosition = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); - fieldParser = getParser(currentFieldName, parser); + currentPosition = parser.getTokenLocation(); + fieldParser = fieldParserMap.get(currentFieldName); } else { if (currentFieldName == null) { throw new XContentParseException(parser.getTokenLocation(), "[" + name + "] no field found"); } if (fieldParser == null) { - assert ignoreUnknownFields : "this should only be possible if configured to ignore known fields"; - parser.skipChildren(); // noop if parser points to a value, skips children if parser is start object or start array + unknownFieldParser.acceptUnknownField(name, currentFieldName, currentPosition, parser, value, context); } else { fieldParser.assertSupports(name, parser, currentFieldName); parseSub(parser, fieldParser, currentFieldName, value, context); @@ -363,15 +433,6 @@ private void parseSub(XContentParser parser, FieldParser fieldParser, String cur } } - private FieldParser getParser(String fieldName, XContentParser xContentParser) { - FieldParser parser = fieldParserMap.get(fieldName); - if (parser == null && false == ignoreUnknownFields) { - throw new XContentParseException(xContentParser.getTokenLocation(), - "[" + name + "] unknown field [" + fieldName + "], parser not found"); - } - return parser; - } - private class FieldParser { private final Parser parser; private final EnumSet supportedTokens; diff --git a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java index e089b8a956ac8..6002c6bd35076 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java @@ -33,7 +33,9 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.containsString; @@ -733,4 +735,41 @@ public void setFoo(int foo) { this.foo = foo; } } + + private static class ObjectWithArbitraryFields { + String name; + Map fields = new HashMap<>(); + void setField(String key, Object value) { + fields.put(key, value); + } + void setName(String name) { + this.name = name; + } + } + + public void testConsumeUnknownFields() throws IOException { + XContentParser parser = createParser(JsonXContent.jsonXContent, + "{\n" + + " \"test\" : \"foo\",\n" + + " \"test_number\" : 2,\n" + + " \"name\" : \"geoff\",\n" + + " \"test_boolean\" : true,\n" + + " \"test_null\" : null,\n" + + " \"test_array\": [1,2,3,4],\n" + + " \"test_nested\": { \"field\" : \"value\", \"field2\" : [ \"list1\", \"list2\" ] }\n" + + "}"); + ObjectParser op + = new ObjectParser<>("unknown", ObjectWithArbitraryFields::setField, ObjectWithArbitraryFields::new); + op.declareString(ObjectWithArbitraryFields::setName, new ParseField("name")); + + ObjectWithArbitraryFields o = op.parse(parser, null); + assertEquals("geoff", o.name); + assertEquals(6, o.fields.size()); + assertEquals("foo", o.fields.get("test")); + assertEquals(2, o.fields.get("test_number")); + assertEquals(true, o.fields.get("test_boolean")); + assertNull(o.fields.get("test_null")); + assertEquals(List.of(1, 2, 3, 4), o.fields.get("test_array")); + assertEquals(Map.of("field", "value", "field2", List.of("list1", "list2")), o.fields.get("test_nested")); + } } From a7947367d267f727ae74889705676a9b3bcf1c84 Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Fri, 31 May 2019 13:14:18 +0100 Subject: [PATCH 211/224] Return NO_INTERVALS rather than null from empty TokenStream (#42750) IntervalBuilder#analyzeText will currently return null if it is passed an empty TokenStream, which can lead to a confusing NullPointerException later on during querying. This commit changes the code to return NO_INTERVALS instead. Fixes #42587 --- .../java/org/elasticsearch/index/query/IntervalBuilder.java | 4 ++-- .../org/elasticsearch/index/query/IntervalBuilderTests.java | 6 ++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java b/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java index 92b4fa664193c..5e1047684840f 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java @@ -96,7 +96,7 @@ protected IntervalsSource analyzeText(CachingTokenFilter stream, int maxGaps, bo // formulate a single term, boolean, or phrase. if (numTokens == 0) { - return null; + return NO_INTERVALS; } else if (numTokens == 1) { // single term return analyzeTerm(stream); @@ -231,7 +231,7 @@ protected List analyzeGraph(TokenStream source) throws IOExcept return clauses; } - private static final IntervalsSource NO_INTERVALS = new IntervalsSource() { + static final IntervalsSource NO_INTERVALS = new IntervalsSource() { @Override public IntervalIterator intervals(String field, LeafReaderContext ctx) { diff --git a/server/src/test/java/org/elasticsearch/index/query/IntervalBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/IntervalBuilderTests.java index 15ec8af0af2c5..69464edb51332 100644 --- a/server/src/test/java/org/elasticsearch/index/query/IntervalBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/IntervalBuilderTests.java @@ -110,6 +110,12 @@ public void testPhraseWithStopword() throws IOException { } + public void testEmptyTokenStream() throws IOException { + CannedTokenStream ts = new CannedTokenStream(); + IntervalsSource source = BUILDER.analyzeText(new CachingTokenFilter(ts), 0, true); + assertSame(IntervalBuilder.NO_INTERVALS, source); + } + public void testSimpleSynonyms() throws IOException { CannedTokenStream ts = new CannedTokenStream( From 04cf4a1faa2a5be386d4c3dc49630ba88560ef03 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 31 May 2019 08:56:20 -0500 Subject: [PATCH 212/224] [ML] [Data Frame] nesting group_by fields like other aggs (#42718) --- .../dataframe/integration/DataFramePivotRestIT.java | 12 ++++++++---- .../transforms/pivot/AggregationResultUtils.java | 2 +- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index 3c661a0f4aca4..36f95e599ff73 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -251,10 +251,10 @@ public void testPreviewTransform() throws Exception { config += " \"pivot\": {" + " \"group_by\": {" - + " \"reviewer\": {\"terms\": { \"field\": \"user_id\" }}," + + " \"user.id\": {\"terms\": { \"field\": \"user_id\" }}," + " \"by_day\": {\"date_histogram\": {\"fixed_interval\": \"1d\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-dd\"}}}," + " \"aggregations\": {" - + " \"avg_rating\": {" + + " \"user.avg_rating\": {" + " \"avg\": {" + " \"field\": \"stars\"" + " } } } }" @@ -265,10 +265,14 @@ public void testPreviewTransform() throws Exception { List> preview = (List>)previewDataframeResponse.get("preview"); // preview is limited to 100 assertThat(preview.size(), equalTo(100)); - Set expectedFields = new HashSet<>(Arrays.asList("reviewer", "by_day", "avg_rating")); + Set expectedTopLevelFields = new HashSet<>(Arrays.asList("user", "by_day")); + Set expectedNestedFields = new HashSet<>(Arrays.asList("id", "avg_rating")); preview.forEach(p -> { Set keys = p.keySet(); - assertThat(keys, equalTo(expectedFields)); + assertThat(keys, equalTo(expectedTopLevelFields)); + Map nestedObj = (Map)p.get("user"); + keys = nestedObj.keySet(); + assertThat(keys, equalTo(expectedNestedFields)); }); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java index f8857591b2322..6201dd936ba19 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java @@ -61,7 +61,7 @@ public static Stream> extractCompositeAggregationResults(Com groups.getGroups().keySet().forEach(destinationFieldName -> { Object value = bucket.getKey().get(destinationFieldName); idGen.add(destinationFieldName, value); - document.put(destinationFieldName, value); + updateDocument(document, destinationFieldName, value); }); List aggNames = aggregationBuilders.stream().map(AggregationBuilder::getName).collect(Collectors.toList()); From 0d6cbf92e5903ad97d1d5c823bb8ba6545581ef1 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Fri, 31 May 2019 15:01:12 +0100 Subject: [PATCH 213/224] [ML Data Frame] Refactor stop logic (#42644) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Revert "invalid test" This reverts commit 9dd8b52c13c716918ff97e6527aaf43aefc4695d. * Testing * mend * Revert "[ML Data Frame] Mute Data Frame tests" This reverts commit 5d837fa312b0e41a77a65462667a2d92d1114567. * Call onStop and onAbort outside atomic update * Don’t update CS * Tidying up * Remove invalid test that asserted logic that has been removed * Add stopped event * Revert "Add stopped event" This reverts commit 02ba992f4818bebd838e1c7678bd2e1cc090bfab. * Adding check for STOPPED in saveState --- .../core/indexing/AsyncTwoPhaseIndexer.java | 29 ++++---- .../indexing/AsyncTwoPhaseIndexerTests.java | 19 ------ .../integration/DataFrameTransformIT.java | 1 - .../integration/DataFrameAuditorIT.java | 2 - .../DataFrameConfigurationIndexIT.java | 2 - .../DataFrameGetAndGetStatsIT.java | 2 - .../integration/DataFrameMetaDataIT.java | 2 - .../integration/DataFramePivotRestIT.java | 2 - .../DataFrameTaskFailedStateIT.java | 2 - .../integration/DataFrameUsageIT.java | 2 - .../transforms/DataFrameTransformTask.java | 66 ++++++++----------- .../test/data_frame/transforms_start_stop.yml | 45 +++++++++++++ 12 files changed, 87 insertions(+), 87 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java index 0c4477b6b700e..f9bbf890fe6ce 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java @@ -90,28 +90,21 @@ public synchronized IndexerState start() { * Sets the internal state to {@link IndexerState#STOPPING} if an async job is * running in the background, {@link #onStop()} will be called when the background job * detects that the indexer is stopped. - * If there is no job running when this function is called - * the state is set to {@link IndexerState#STOPPED} and {@link #onStop()} called directly. + * If there is no job running when this function is called the returned + * state is {@link IndexerState#STOPPED} and {@link #onStop()} will not be called. * * @return The new state for the indexer (STOPPED, STOPPING or ABORTING if the job was already aborted). */ public synchronized IndexerState stop() { - AtomicBoolean wasStartedAndSetStopped = new AtomicBoolean(false); - IndexerState currentState = state.updateAndGet(previousState -> { + return state.updateAndGet(previousState -> { if (previousState == IndexerState.INDEXING) { return IndexerState.STOPPING; } else if (previousState == IndexerState.STARTED) { - wasStartedAndSetStopped.set(true); return IndexerState.STOPPED; } else { return previousState; } }); - - if (wasStartedAndSetStopped.get()) { - onStop(); - } - return currentState; } /** @@ -288,20 +281,22 @@ private void finishWithIndexingFailure(Exception exc) { } private IndexerState finishAndSetState() { - return state.updateAndGet(prev -> { + AtomicBoolean callOnStop = new AtomicBoolean(false); + AtomicBoolean callOnAbort = new AtomicBoolean(false); + IndexerState updatedState = state.updateAndGet(prev -> { switch (prev) { case INDEXING: // ready for another job return IndexerState.STARTED; case STOPPING: + callOnStop.set(true); // must be started again - onStop(); return IndexerState.STOPPED; case ABORTING: + callOnAbort.set(true); // abort and exit - onAbort(); return IndexerState.ABORTING; // This shouldn't matter, since onAbort() will kill the task first case STOPPED: @@ -316,6 +311,14 @@ private IndexerState finishAndSetState() { throw new IllegalStateException("Indexer job encountered an illegal state [" + prev + "]"); } }); + + if (callOnStop.get()) { + onStop(); + } else if (callOnAbort.get()) { + onAbort(); + } + + return updatedState; } private void onSearchResponse(SearchResponse searchResponse) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index fc86a9554880f..053e41d9b2a63 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -268,25 +268,6 @@ public void testStateMachineBrokenSearch() throws InterruptedException { } } - public void testStop_AfterIndexerIsFinished() throws InterruptedException { - AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); - final ExecutorService executor = Executors.newFixedThreadPool(1); - try { - CountDownLatch countDownLatch = new CountDownLatch(1); - MockIndexer indexer = new MockIndexer(executor, state, 2, countDownLatch, false); - indexer.start(); - assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); - countDownLatch.countDown(); - assertTrue(awaitBusy(() -> isFinished.get())); - - indexer.stop(); - assertTrue(isStopped.get()); - assertThat(indexer.getState(), equalTo(IndexerState.STOPPED)); - } finally { - executor.shutdownNow(); - } - } - public void testStop_WhileIndexing() throws InterruptedException { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java index 1ec425c641693..69fb980871dce 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java @@ -30,7 +30,6 @@ public void cleanTransforms() throws IOException { cleanUp(); } - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public void testDataFrameTransformCrud() throws Exception { createReviewsIndex(); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java index 7dc79c1ae8fbe..9884c9bb6793b 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; import org.junit.Before; @@ -23,7 +22,6 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.is; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameAuditorIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_admin_plus_data"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java index d7e12cf2bee4d..681599331c8af 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java @@ -8,7 +8,6 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -23,7 +22,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameConfigurationIndexIT extends DataFrameRestTestCase { /** diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java index 9bac6ca0b4049..d9927cd09ed8f 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.xpack.core.dataframe.DataFrameField; @@ -22,7 +21,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_user"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java index 5b95d1daead53..26a957ea055c2 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -16,7 +15,6 @@ import java.io.IOException; import java.util.Map; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameMetaDataIT extends DataFrameRestTestCase { private boolean indicesCreated = false; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index 36f95e599ff73..933fcc6c8e5c4 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.junit.Before; @@ -22,7 +21,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFramePivotRestIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_admin_plus_data"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java index 7b63644dd34ad..96aeeda8755f4 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestStatus; @@ -20,7 +19,6 @@ import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase { public void testDummy() { diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java index f98fa6a271365..4f209c5a9f3f4 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -23,7 +22,6 @@ import static org.elasticsearch.xpack.core.dataframe.DataFrameField.INDEX_DOC_TYPE; import static org.elasticsearch.xpack.dataframe.DataFrameFeatureSet.PROVIDED_STATS; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameUsageIT extends DataFrameRestTestCase { private boolean indicesCreated = false; diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index 13deab6748c94..575cd4c15bd67 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -66,7 +66,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S private final Map initialPosition; private final IndexerState initialIndexerState; - private final SetOnce indexer = new SetOnce<>(); + private final SetOnce indexer = new SetOnce<>(); private final AtomicReference taskState; private final AtomicReference stateReason; @@ -125,7 +125,7 @@ public Status getStatus() { return getState(); } - private DataFrameIndexer getIndexer() { + private ClientDataFrameIndexer getIndexer() { return indexer.get(); } @@ -236,7 +236,10 @@ public synchronized void stop() { return; } - getIndexer().stop(); + IndexerState state = getIndexer().stop(); + if (state == IndexerState.STOPPED) { + getIndexer().doSaveState(state, getIndexer().getPosition(), () -> getIndexer().onStop()); + } } @Override @@ -530,11 +533,17 @@ protected void doSaveState(IndexerState indexerState, Map positi next.run(); return; } + // If we are `STOPPED` on a `doSaveState` call, that indicates we transitioned to `STOPPED` from `STOPPING` + // OR we called `doSaveState` manually as the indexer was not actively running. + // Since we save the state to an index, we should make sure that our task state is in parity with the indexer state + if (indexerState.equals(IndexerState.STOPPED)) { + transformTask.setTaskStateStopped(); + } final DataFrameTransformState state = new DataFrameTransformState( transformTask.taskState.get(), indexerState, - getPosition(), + position, transformTask.currentCheckpoint.get(), transformTask.stateReason.get(), getProgress()); @@ -542,28 +551,18 @@ protected void doSaveState(IndexerState indexerState, Map positi // Persisting stats when we call `doSaveState` should be ok as we only call it on a state transition and // only every-so-often when doing the bulk indexing calls. See AsyncTwoPhaseIndexer#onBulkResponse for current periodicity - ActionListener> updateClusterStateListener = ActionListener.wrap( - task -> { - transformsConfigManager.putOrUpdateTransformStats( - new DataFrameTransformStateAndStats(transformId, state, getStats(), - DataFrameTransformCheckpointingInfo.EMPTY), // TODO should this be null - ActionListener.wrap( - r -> { - next.run(); - }, - statsExc -> { - logger.error("Updating stats of transform [" + transformConfig.getId() + "] failed", statsExc); - next.run(); - } - )); - }, - exc -> { - logger.error("Updating persistent state of transform [" + transformConfig.getId() + "] failed", exc); - next.run(); - } - ); - - transformTask.persistStateToClusterState(state, updateClusterStateListener); + transformsConfigManager.putOrUpdateTransformStats( + new DataFrameTransformStateAndStats(transformId, state, getStats(), + DataFrameTransformCheckpointingInfo.EMPTY), // TODO should this be null + ActionListener.wrap( + r -> { + next.run(); + }, + statsExc -> { + logger.error("Updating stats of transform [" + transformConfig.getId() + "] failed", statsExc); + next.run(); + } + )); } @Override @@ -602,20 +601,7 @@ protected void onFinish(ActionListener listener) { protected void onStop() { auditor.info(transformConfig.getId(), "Indexer has stopped"); logger.info("Data frame transform [{}] indexer has stopped", transformConfig.getId()); - - transformTask.setTaskStateStopped(); - transformsConfigManager.putOrUpdateTransformStats( - new DataFrameTransformStateAndStats(transformId, transformTask.getState(), getStats(), - DataFrameTransformCheckpointingInfo.EMPTY), // TODO should this be null - ActionListener.wrap( - r -> { - transformTask.shutdown(); - }, - statsExc -> { - transformTask.shutdown(); - logger.error("Updating saving stats of transform [" + transformConfig.getId() + "] failed", statsExc); - } - )); + transformTask.shutdown(); } @Override diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml index 31f80033e7bdb..2686c57fd06ac 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml @@ -190,8 +190,10 @@ teardown: - do: data_frame.stop_data_frame_transform: transform_id: "airline-transform-start-stop" + wait_for_completion: true - match: { acknowledged: true } + - do: data_frame.get_data_frame_transform_stats: transform_id: "airline-transform-start-later" @@ -209,3 +211,46 @@ teardown: - do: data_frame.delete_data_frame_transform: transform_id: "airline-transform-start-later" + +--- +"Test stop all": + - do: + data_frame.put_data_frame_transform: + transform_id: "airline-transform-stop-all" + body: > + { + "source": { "index": "airline-data" }, + "dest": { "index": "airline-data-start-later" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } + - do: + data_frame.start_data_frame_transform: + transform_id: "airline-transform-stop-all" + - match: { acknowledged: true } + + - do: + data_frame.start_data_frame_transform: + transform_id: "airline-transform-start-stop" + - match: { acknowledged: true } + + - do: + data_frame.stop_data_frame_transform: + transform_id: "_all" + wait_for_completion: true + - match: { acknowledged: true } + + - do: + data_frame.get_data_frame_transform_stats: + transform_id: "*" + - match: { count: 2 } + - match: { transforms.0.state.indexer_state: "stopped" } + - match: { transforms.0.state.task_state: "stopped" } + - match: { transforms.1.state.indexer_state: "stopped" } + - match: { transforms.1.state.task_state: "stopped" } + + - do: + data_frame.delete_data_frame_transform: + transform_id: "airline-transform-stop-all" From a5529c8df94909cd878bbe74f4c4ce755e5d190c Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Fri, 31 May 2019 08:02:55 -0600 Subject: [PATCH 214/224] Re-enable token bwc tests (#42726) This commit re-enables token bwc tests that run as part of the rolling upgrade tests. These tests were muted while #42651 was being backported. --- .../upgrades/TokenBackwardsCompatibilityIT.java | 2 -- .../rest-api-spec/test/mixed_cluster/50_token_auth.yml | 6 ------ .../rest-api-spec/test/upgraded_cluster/50_token_auth.yml | 4 ---- 3 files changed, 12 deletions(-) diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java index 4a0639050d522..69c515d80a3d2 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java @@ -7,7 +7,6 @@ import org.apache.http.HttpHeaders; import org.apache.http.HttpHost; -import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -26,7 +25,6 @@ import java.util.List; import java.util.Map; -@AwaitsFix(bugUrl = "need to backport #42651") public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase { private Collection twoClients = null; diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/50_token_auth.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/50_token_auth.yml index a34128579f3f8..f426d9b2525b4 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/50_token_auth.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/50_token_auth.yml @@ -2,8 +2,6 @@ "Get the indexed token and use if to authenticate": - skip: features: headers - version: " - 7.99.99" - reason: "Need to backport PR #42651" - do: cluster.health: @@ -61,8 +59,6 @@ "Get the indexed refreshed access token and use if to authenticate": - skip: features: headers - version: " - 7.99.99" - reason: "Need to backport PR #42651" - do: get: @@ -115,8 +111,6 @@ "Get the indexed refresh token and use it to get another access token and authenticate": - skip: features: headers - version: " - 7.99.99" - reason: "Need to backport PR #42651" - do: get: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/50_token_auth.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/50_token_auth.yml index 64897707c15d3..430f94c1064d6 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/50_token_auth.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/50_token_auth.yml @@ -2,8 +2,6 @@ "Get the indexed token and use if to authenticate": - skip: features: headers - version: " - 8.0.0" - reason: "Need to backport PR #42651" - do: cluster.health: @@ -51,8 +49,6 @@ "Get the indexed refresh token and use if to get another access token and authenticate": - skip: features: headers - version: " - 8.0.0" - reason: "Need to backport PR #42651" - do: get: From fddeb0cb466683f0782675686f4a0dd4dc936e03 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Fri, 31 May 2019 15:40:53 +0100 Subject: [PATCH 215/224] [ML] Add Kibana application privilege to data frame admin/user roles (#42757) Data frame transforms are restricted by different roles to ML, but share the ML UI. To prevent the ML UI being hidden for users who only have the data frame admin or user role, it is necessary to add the ML Kibana application privilege to the backend data frame roles. --- .../authz/store/ReservedRolesStore.java | 12 ++++++++-- .../authz/store/ReservedRolesStoreTests.java | 24 +++++++++++++++++++ 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index 49d4159f13968..ab06fc32e288f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -180,14 +180,22 @@ private static Map initializeReservedRoles() { RoleDescriptor.IndicesPrivileges.builder() .indices(".data-frame-notifications*") .privileges("view_index_metadata", "read").build() - }, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null)) + }, + new RoleDescriptor.ApplicationResourcePrivileges[] { + RoleDescriptor.ApplicationResourcePrivileges.builder() + .application("kibana-*").resources("*").privileges("reserved_ml").build() + }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null)) .put("data_frame_transforms_user", new RoleDescriptor("data_frame_transforms_user", new String[] { "monitor_data_frame_transforms" }, new RoleDescriptor.IndicesPrivileges[]{ RoleDescriptor.IndicesPrivileges.builder() .indices(".data-frame-notifications*") .privileges("view_index_metadata", "read").build() - }, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null)) + }, + new RoleDescriptor.ApplicationResourcePrivileges[] { + RoleDescriptor.ApplicationResourcePrivileges.builder() + .application("kibana-*").resources("*").privileges("reserved_ml").build() + }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null)) .put("watcher_admin", new RoleDescriptor("watcher_admin", new String[] { "manage_watcher" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(Watch.INDEX, TriggeredWatchStoreField.INDEX_NAME, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 78f9623f4fbb8..bf2c08a913821 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -1096,6 +1096,18 @@ public void testDataFrameTransformsAdminRole() { assertNoAccessAllowed(role, ".data-frame-internal-1"); // internal use only assertNoAccessAllowed(role, RestrictedIndicesNames.RESTRICTED_NAMES); + + final String kibanaApplicationWithRandomIndex = "kibana-" + randomFrom(randomAlphaOfLengthBetween(8, 24), ".kibana"); + assertThat(role.application().grants( + new ApplicationPrivilege(kibanaApplicationWithRandomIndex, "app-foo", "foo"), "*"), is(false)); + assertThat(role.application().grants( + new ApplicationPrivilege(kibanaApplicationWithRandomIndex, "app-reserved_ml", "reserved_ml"), "*"), is(true)); + + final String otherApplication = "logstash-" + randomAlphaOfLengthBetween(8, 24); + assertThat(role.application().grants( + new ApplicationPrivilege(otherApplication, "app-foo", "foo"), "*"), is(false)); + assertThat(role.application().grants( + new ApplicationPrivilege(otherApplication, "app-reserved_ml", "reserved_ml"), "*"), is(false)); } public void testDataFrameTransformsUserRole() { @@ -1120,6 +1132,18 @@ public void testDataFrameTransformsUserRole() { assertNoAccessAllowed(role, ".data-frame-internal-1"); assertNoAccessAllowed(role, RestrictedIndicesNames.RESTRICTED_NAMES); + + final String kibanaApplicationWithRandomIndex = "kibana-" + randomFrom(randomAlphaOfLengthBetween(8, 24), ".kibana"); + assertThat(role.application().grants( + new ApplicationPrivilege(kibanaApplicationWithRandomIndex, "app-foo", "foo"), "*"), is(false)); + assertThat(role.application().grants( + new ApplicationPrivilege(kibanaApplicationWithRandomIndex, "app-reserved_ml", "reserved_ml"), "*"), is(true)); + + final String otherApplication = "logstash-" + randomAlphaOfLengthBetween(8, 24); + assertThat(role.application().grants( + new ApplicationPrivilege(otherApplication, "app-foo", "foo"), "*"), is(false)); + assertThat(role.application().grants( + new ApplicationPrivilege(otherApplication, "app-reserved_ml", "reserved_ml"), "*"), is(false)); } public void testWatcherAdminRole() { From 931492d496634f49206d56935d838e0aac4112b3 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Fri, 31 May 2019 11:04:30 -0400 Subject: [PATCH 216/224] [DOCS] Remove unneeded `ifdef::asciidoctor[]` conditionals (#42758) Several `ifdef::asciidoctor` conditionals were added so that AsciiDoc and Asciidoctor doc builds rendered consistently. With https://github.com/elastic/docs/pull/827, Elasticsearch Reference documentation migrated completely to Asciidoctor. We no longer need to support AsciiDoc so we can remove these conditionals. Resolves #41722 --- .../settings/notification-settings.asciidoc | 6 ------ docs/reference/settings/ssl-settings.asciidoc | 15 --------------- 2 files changed, 21 deletions(-) diff --git a/docs/reference/settings/notification-settings.asciidoc b/docs/reference/settings/notification-settings.asciidoc index ac7160bd20aac..a2eb84bc2110e 100644 --- a/docs/reference/settings/notification-settings.asciidoc +++ b/docs/reference/settings/notification-settings.asciidoc @@ -37,14 +37,8 @@ required. For more information, see {xpack-ref}/encrypting-data.html[Encrypting sensitive data in {watcher}]. `xpack.watcher.history.cleaner_service.enabled`:: -ifdef::asciidoctor[] added:[6.3.0,Default changed to `true`.] deprecated:[7.0.0,Watcher history indices are now managed by the `watch-history-ilm-policy` ILM policy] -endif::[] -ifndef::asciidoctor[] -added[6.3.0,Default changed to `true`.] -deprecated[7.0.0,Watcher history indices are now managed by the `watch-history-ilm-policy` ILM policy] -endif::[] + Set to `true` (default) to enable the cleaner service. If this setting is `true`, the `xpack.monitoring.enabled` setting must also be set to `true` with diff --git a/docs/reference/settings/ssl-settings.asciidoc b/docs/reference/settings/ssl-settings.asciidoc index 6d8ffd90b6a37..a9c8576a8c4e9 100644 --- a/docs/reference/settings/ssl-settings.asciidoc +++ b/docs/reference/settings/ssl-settings.asciidoc @@ -38,13 +38,8 @@ endif::verifies[] Supported cipher suites can be found in Oracle's http://docs.oracle.com/javase/8/docs/technotes/guides/security/SunProviders.html[ Java Cryptography Architecture documentation]. Defaults to ``. -ifdef::asciidoctor[] [#{ssl-context}-tls-ssl-key-trusted-certificate-settings] ===== {component} TLS/SSL Key and Trusted Certificate Settings -endif::[] -ifndef::asciidoctor[] -===== anchor:{ssl-context}-tls-ssl-key-trusted-certificate-settings[] {component} TLS/SSL Key and Trusted Certificate Settings -endif::[] The following settings are used to specify a private key, certificate, and the trusted certificates that should be used when communicating over an SSL/TLS connection. @@ -110,13 +105,8 @@ Password to the truststore. +{ssl-prefix}.ssl.truststore.secure_password+ (<>):: Password to the truststore. -ifdef::asciidoctor[] [#{ssl-context}-pkcs12-files] ===== PKCS#12 Files -endif::[] -ifndef::asciidoctor[] -===== anchor:{ssl-context}-pkcs12-files[] PKCS#12 Files -endif::[] {es} can be configured to use PKCS#12 container files (`.p12` or `.pfx` files) that contain the private key, certificate and certificates that should be trusted. @@ -154,13 +144,8 @@ Password to the PKCS#12 file. +{ssl-prefix}.ssl.truststore.secure_password+ (<>):: Password to the PKCS#12 file. -ifdef::asciidoctor[] [#{ssl-context}-pkcs11-tokens] ===== PKCS#11 Tokens -endif::[] -ifndef::asciidoctor[] -===== anchor:{ssl-context}-pkcs11-tokens[] PKCS#11 Tokens -endif::[] {es} can be configured to use a PKCS#11 token that contains the private key, certificate and certificates that should be trusted. From 6dd4d2b7a6bd17232c0a93a5a48a4e23327dea81 Mon Sep 17 00:00:00 2001 From: Marios Trivyzas Date: Fri, 31 May 2019 17:06:06 +0200 Subject: [PATCH 217/224] Remove CommonTermsQuery and cutoff_frequency param (#42654) Remove `common` query and `cutoff_frequency` parameter of `match` and `multi_match` queries. Both have already been deprecated for the next 7.x version. Closes: #37096 --- .../QueryDSLDocumentationTests.java | 8 - .../high-level/query-builders.asciidoc | 1 - docs/reference/migration/migrate_8_0.asciidoc | 2 + .../migration/migrate_8_0/search.asciidoc | 14 +- .../query-dsl/common-terms-query.asciidoc | 306 ------------- .../query-dsl/full-text-queries.asciidoc | 6 - docs/reference/query-dsl/match-query.asciidoc | 47 -- .../query-dsl/multi-match-query.asciidoc | 14 +- docs/reference/sql/functions/search.asciidoc | 9 +- .../search.query/50_queries_with_synonyms.yml | 141 ------ .../percolator/QueryAnalyzer.java | 9 - .../percolator/CandidateQueryTests.java | 9 +- .../percolator/PercolatorQuerySearchIT.java | 14 +- .../percolator/QueryAnalyzerTests.java | 24 +- .../test/search/310_match_bool_prefix.yml | 16 - .../lucene/queries/BlendedTermQuery.java | 49 +- .../queries/ExtendedCommonTermsQuery.java | 80 ---- .../common/lucene/search/Queries.java | 3 - .../index/query/CommonTermsQueryBuilder.java | 417 ------------------ .../index/query/MatchQueryBuilder.java | 55 +-- .../index/query/MultiMatchQueryBuilder.java | 65 +-- .../index/query/QueryBuilders.java | 13 - .../index/search/MatchQuery.java | 43 +- .../index/search/MultiMatchQuery.java | 16 +- .../elasticsearch/search/SearchModule.java | 5 - .../CustomUnifiedHighlighterTests.java | 16 - .../query/CommonTermsQueryBuilderTests.java | 248 ----------- .../query/CommonTermsQueryParserTests.java | 48 -- .../index/query/MatchQueryBuilderTests.java | 13 - .../query/MultiMatchQueryBuilderTests.java | 2 - .../index/search/MultiMatchQueryTests.java | 10 +- .../search/SearchModuleTests.java | 2 +- .../highlight/HighlighterSearchIT.java | 55 --- .../search/query/MultiMatchQueryIT.java | 73 --- .../search/query/SearchQueryIT.java | 93 ---- .../validate/SimpleValidateQueryIT.java | 22 +- .../index/query/commonTerms-query1.json | 11 - .../index/query/commonTerms-query2.json | 11 - .../index/query/commonTerms-query3.json | 9 - .../qa/src/main/resources/docs/docs.csv-spec | 5 +- .../qa/src/main/resources/fulltext.csv-spec | 8 +- .../xpack/sql/querydsl/query/MatchQuery.java | 1 - .../sql/querydsl/query/MultiMatchQuery.java | 1 - 43 files changed, 74 insertions(+), 1920 deletions(-) delete mode 100644 docs/reference/query-dsl/common-terms-query.asciidoc delete mode 100644 server/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java delete mode 100644 server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java delete mode 100644 server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryBuilderTests.java delete mode 100644 server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryParserTests.java delete mode 100644 server/src/test/resources/org/elasticsearch/index/query/commonTerms-query1.json delete mode 100644 server/src/test/resources/org/elasticsearch/index/query/commonTerms-query2.json delete mode 100644 server/src/test/resources/org/elasticsearch/index/query/commonTerms-query3.json diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/QueryDSLDocumentationTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/QueryDSLDocumentationTests.java index 51670b29de1b6..3530e63e47e1d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/QueryDSLDocumentationTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/QueryDSLDocumentationTests.java @@ -42,7 +42,6 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.boostingQuery; -import static org.elasticsearch.index.query.QueryBuilders.commonTermsQuery; import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.disMaxQuery; import static org.elasticsearch.index.query.QueryBuilders.existsQuery; @@ -106,13 +105,6 @@ public void testBoosting() { // end::boosting } - public void testCommonTerms() { - // tag::common_terms - commonTermsQuery("name", // <1> - "kimchy"); // <2> - // end::common_terms - } - public void testConstantScore() { // tag::constant_score constantScoreQuery( diff --git a/docs/java-rest/high-level/query-builders.asciidoc b/docs/java-rest/high-level/query-builders.asciidoc index 53d9b9af97d12..f845a8c32e602 100644 --- a/docs/java-rest/high-level/query-builders.asciidoc +++ b/docs/java-rest/high-level/query-builders.asciidoc @@ -23,7 +23,6 @@ This page lists all the available search queries with their corresponding `Query | {ref}/query-dsl-match-query-phrase.html[Match Phrase] | {query-ref}/MatchPhraseQueryBuilder.html[MatchPhraseQueryBuilder] | {query-ref}/QueryBuilders.html#matchPhraseQuery-java.lang.String-java.lang.Object-[QueryBuilders.matchPhraseQuery()] | {ref}/query-dsl-match-query-phrase-prefix.html[Match Phrase Prefix] | {query-ref}/MatchPhrasePrefixQueryBuilder.html[MatchPhrasePrefixQueryBuilder] | {query-ref}/QueryBuilders.html#matchPhrasePrefixQuery-java.lang.String-java.lang.Object-[QueryBuilders.matchPhrasePrefixQuery()] | {ref}/query-dsl-multi-match-query.html[Multi Match] | {query-ref}/MultiMatchQueryBuilder.html[MultiMatchQueryBuilder] | {query-ref}/QueryBuilders.html#multiMatchQuery-java.lang.Object-java.lang.String\…-[QueryBuilders.multiMatchQuery()] -| {ref}/query-dsl-common-terms-query.html[Common Terms] | {query-ref}/CommonTermsQueryBuilder.html[CommonTermsQueryBuilder] | {query-ref}/QueryBuilders.html#commonTermsQuery-java.lang.String-java.lang.Object-[QueryBuilders.commonTermsQuery()] | {ref}/query-dsl-query-string-query.html[Query String] | {query-ref}/QueryStringQueryBuilder.html[QueryStringQueryBuilder] | {query-ref}/QueryBuilders.html#queryStringQuery-java.lang.String-[QueryBuilders.queryStringQuery()] | {ref}/query-dsl-simple-query-string-query.html[Simple Query String] | {query-ref}/SimpleQueryStringBuilder.html[SimpleQueryStringBuilder] | {query-ref}/QueryBuilders.html#simpleQueryStringQuery-java.lang.String-[QueryBuilders.simpleQueryStringQuery()] |====== diff --git a/docs/reference/migration/migrate_8_0.asciidoc b/docs/reference/migration/migrate_8_0.asciidoc index b697abf9a9f25..d1a68ba8f7933 100644 --- a/docs/reference/migration/migrate_8_0.asciidoc +++ b/docs/reference/migration/migrate_8_0.asciidoc @@ -24,6 +24,7 @@ coming[8.0.0] * <> * <> * <> +* <> * <> //NOTE: The notable-breaking-changes tagged regions are re-used in the @@ -60,4 +61,5 @@ include::migrate_8_0/node.asciidoc[] include::migrate_8_0/transport.asciidoc[] include::migrate_8_0/http.asciidoc[] include::migrate_8_0/reindex.asciidoc[] +include::migrate_8_0/search.asciidoc[] include::migrate_8_0/settings.asciidoc[] diff --git a/docs/reference/migration/migrate_8_0/search.asciidoc b/docs/reference/migration/migrate_8_0/search.asciidoc index 82886d35bc6a5..6fba2970f593e 100644 --- a/docs/reference/migration/migrate_8_0/search.asciidoc +++ b/docs/reference/migration/migrate_8_0/search.asciidoc @@ -7,4 +7,16 @@ The `/{index}/{type}/_search`, `/{index}/{type}/_msearch`, `/{index}/{type}/_search/template` and `/{index}/{type}/_msearch/template` REST endpoints have been removed in favour of `/{index}/_search`, `/{index}/_msearch`, `/{index}/_search/template` and `/{index}/_msearch/template`, since indexes no longer contain types, these typed endpoints are obsolete.. -The `/{index}/{type}/_termvectors`, `/{index}/{type}/{id}/_termvectors` and `/{index}/{type}/_mtermvectors` REST endpoints have been removed in favour of `/{index}/_termvectors`, `/{index}/{id}/_termvectors` and `/{index}/_mtermvectors`, since indexes no longer contain types, these typed endpoints are obsolete.. \ No newline at end of file +The `/{index}/{type}/_termvectors`, `/{index}/{type}/{id}/_termvectors` and `/{index}/{type}/_mtermvectors` REST endpoints have been removed in favour of `/{index}/_termvectors`, `/{index}/{id}/_termvectors` and `/{index}/_mtermvectors`, since indexes no longer contain types, these typed endpoints are obsolete.. + +[float] +==== Removal of queries + +The `common` query was deprecated in 7.x and has been removed in 8.0. +The same functionality can be achieved by the `match` query if the total number of hits is not tracked. + +[float] +===== Removal of query parameters + +The `cutoff_frequency` parameter was deprecated in 7.x and has been removed in 8.0 from `match` and `multi_match` queries. +The same functionality can be achieved without any configuration provided that the total number of hits is not tracked. diff --git a/docs/reference/query-dsl/common-terms-query.asciidoc b/docs/reference/query-dsl/common-terms-query.asciidoc deleted file mode 100644 index f2d784eb0c4c9..0000000000000 --- a/docs/reference/query-dsl/common-terms-query.asciidoc +++ /dev/null @@ -1,306 +0,0 @@ -[[query-dsl-common-terms-query]] -=== Common Terms Query - -deprecated[7.3.0,"Use <> instead, which skips blocks of documents efficiently, without any configuration, provided that the total number of hits is not tracked."] - -The `common` terms query is a modern alternative to stopwords which -improves the precision and recall of search results (by taking stopwords -into account), without sacrificing performance. - -[float] -==== The problem - -Every term in a query has a cost. A search for `"The brown fox"` -requires three term queries, one for each of `"the"`, `"brown"` and -`"fox"`, all of which are executed against all documents in the index. -The query for `"the"` is likely to match many documents and thus has a -much smaller impact on relevance than the other two terms. - -Previously, the solution to this problem was to ignore terms with high -frequency. By treating `"the"` as a _stopword_, we reduce the index size -and reduce the number of term queries that need to be executed. - -The problem with this approach is that, while stopwords have a small -impact on relevance, they are still important. If we remove stopwords, -we lose precision, (eg we are unable to distinguish between `"happy"` -and `"not happy"`) and we lose recall (eg text like `"The The"` or -`"To be or not to be"` would simply not exist in the index). - -[float] -==== The solution - -The `common` terms query divides the query terms into two groups: more -important (ie _low frequency_ terms) and less important (ie _high -frequency_ terms which would previously have been stopwords). - -First it searches for documents which match the more important terms. -These are the terms which appear in fewer documents and have a greater -impact on relevance. - -Then, it executes a second query for the less important terms -- terms -which appear frequently and have a low impact on relevance. But instead -of calculating the relevance score for *all* matching documents, it only -calculates the `_score` for documents already matched by the first -query. In this way the high frequency terms can improve the relevance -calculation without paying the cost of poor performance. - -If a query consists only of high frequency terms, then a single query is -executed as an `AND` (conjunction) query, in other words all terms are -required. Even though each individual term will match many documents, -the combination of terms narrows down the resultset to only the most -relevant. The single query can also be executed as an `OR` with a -specific -<>, -in this case a high enough value should probably be used. - -Terms are allocated to the high or low frequency groups based on the -`cutoff_frequency`, which can be specified as an absolute frequency -(`>=1`) or as a relative frequency (`0.0 .. 1.0`). (Remember that document -frequencies are computed on a per shard level as explained in the blog post -{defguide}/relevance-is-broken.html[Relevance is broken].) - -Perhaps the most interesting property of this query is that it adapts to -domain specific stopwords automatically. For example, on a video hosting -site, common terms like `"clip"` or `"video"` will automatically behave -as stopwords without the need to maintain a manual list. - -[float] -==== Examples - -In this example, words that have a document frequency greater than 0.1% -(eg `"this"` and `"is"`) will be treated as _common terms_. - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "common": { - "body": { - "query": "this is bonsai cool", - "cutoff_frequency": 0.001 - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] - -The number of terms which should match can be controlled with the -<> -(`high_freq`, `low_freq`), `low_freq_operator` (default `"or"`) and -`high_freq_operator` (default `"or"`) parameters. - -For low frequency terms, set the `low_freq_operator` to `"and"` to make -all terms required: - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "common": { - "body": { - "query": "nelly the elephant as a cartoon", - "cutoff_frequency": 0.001, - "low_freq_operator": "and" - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] - -which is roughly equivalent to: - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "bool": { - "must": [ - { "term": { "body": "nelly"}}, - { "term": { "body": "elephant"}}, - { "term": { "body": "cartoon"}} - ], - "should": [ - { "term": { "body": "the"}}, - { "term": { "body": "as"}}, - { "term": { "body": "a"}} - ] - } - } -} --------------------------------------------------- -// CONSOLE - -Alternatively use -<> -to specify a minimum number or percentage of low frequency terms which -must be present, for instance: - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "common": { - "body": { - "query": "nelly the elephant as a cartoon", - "cutoff_frequency": 0.001, - "minimum_should_match": 2 - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] - -which is roughly equivalent to: - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "bool": { - "must": { - "bool": { - "should": [ - { "term": { "body": "nelly"}}, - { "term": { "body": "elephant"}}, - { "term": { "body": "cartoon"}} - ], - "minimum_should_match": 2 - } - }, - "should": [ - { "term": { "body": "the"}}, - { "term": { "body": "as"}}, - { "term": { "body": "a"}} - ] - } - } -} --------------------------------------------------- -// CONSOLE - -A different -<> -can be applied for low and high frequency terms with the additional -`low_freq` and `high_freq` parameters. Here is an example when providing -additional parameters (note the change in structure): - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "common": { - "body": { - "query": "nelly the elephant not as a cartoon", - "cutoff_frequency": 0.001, - "minimum_should_match": { - "low_freq" : 2, - "high_freq" : 3 - } - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] - -which is roughly equivalent to: - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "bool": { - "must": { - "bool": { - "should": [ - { "term": { "body": "nelly"}}, - { "term": { "body": "elephant"}}, - { "term": { "body": "cartoon"}} - ], - "minimum_should_match": 2 - } - }, - "should": { - "bool": { - "should": [ - { "term": { "body": "the"}}, - { "term": { "body": "not"}}, - { "term": { "body": "as"}}, - { "term": { "body": "a"}} - ], - "minimum_should_match": 3 - } - } - } - } -} --------------------------------------------------- -// CONSOLE - -In this case it means the high frequency terms have only an impact on -relevance when there are at least three of them. But the most -interesting use of the -<> -for high frequency terms is when there are only high frequency terms: - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "common": { - "body": { - "query": "how not to be", - "cutoff_frequency": 0.001, - "minimum_should_match": { - "low_freq" : 2, - "high_freq" : 3 - } - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] - -which is roughly equivalent to: - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "bool": { - "should": [ - { "term": { "body": "how"}}, - { "term": { "body": "not"}}, - { "term": { "body": "to"}}, - { "term": { "body": "be"}} - ], - "minimum_should_match": "3<50%" - } - } -} --------------------------------------------------- -// CONSOLE - -The high frequency generated query is then slightly less restrictive -than with an `AND`. - -The `common` terms query also supports `boost` and `analyzer` as -parameters. diff --git a/docs/reference/query-dsl/full-text-queries.asciidoc b/docs/reference/query-dsl/full-text-queries.asciidoc index 0af99b61f194f..8fc53bc7e9b8a 100644 --- a/docs/reference/query-dsl/full-text-queries.asciidoc +++ b/docs/reference/query-dsl/full-text-queries.asciidoc @@ -29,10 +29,6 @@ The queries in this group are: The multi-field version of the `match` query. -<>:: - - A more specialized query which gives more preference to uncommon words. - <>:: Supports the compact Lucene <>, @@ -59,8 +55,6 @@ include::match-bool-prefix-query.asciidoc[] include::multi-match-query.asciidoc[] -include::common-terms-query.asciidoc[] - include::query-string-query.asciidoc[] include::simple-query-string-query.asciidoc[] diff --git a/docs/reference/query-dsl/match-query.asciidoc b/docs/reference/query-dsl/match-query.asciidoc index 14fc155cfccae..4b998d82cda24 100644 --- a/docs/reference/query-dsl/match-query.asciidoc +++ b/docs/reference/query-dsl/match-query.asciidoc @@ -119,53 +119,6 @@ GET /_search -------------------------------------------------- // CONSOLE -[[query-dsl-match-query-cutoff]] -===== Cutoff frequency - -deprecated[7.3.0,"This option can be omitted as the <> can skip block of documents efficiently, without any configuration, provided that the total number of hits is not tracked."] - -The match query supports a `cutoff_frequency` that allows -specifying an absolute or relative document frequency where high -frequency terms are moved into an optional subquery and are only scored -if one of the low frequency (below the cutoff) terms in the case of an -`or` operator or all of the low frequency terms in the case of an `and` -operator match. - -This query allows handling `stopwords` dynamically at runtime, is domain -independent and doesn't require a stopword file. It prevents scoring / -iterating high frequency terms and only takes the terms into account if a -more significant / lower frequency term matches a document. Yet, if all -of the query terms are above the given `cutoff_frequency` the query is -automatically transformed into a pure conjunction (`and`) query to -ensure fast execution. - -The `cutoff_frequency` can either be relative to the total number of -documents if in the range `[0..1)` or absolute if greater or equal to -`1.0`. - -Here is an example showing a query composed of stopwords exclusively: - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "match" : { - "message" : { - "query" : "to be or not to be", - "cutoff_frequency" : 0.001 - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[warning:Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [match] query can skip block of documents efficiently if the total number of hits is not tracked]] - -IMPORTANT: The `cutoff_frequency` option operates on a per-shard-level. This means -that when trying it out on test indexes with low document numbers you -should follow the advice in {defguide}/relevance-is-broken.html[Relevance is broken]. - [[query-dsl-match-query-synonyms]] ===== Synonyms diff --git a/docs/reference/query-dsl/multi-match-query.asciidoc b/docs/reference/query-dsl/multi-match-query.asciidoc index 9f574ed814d3c..8382b6ac6fbb4 100644 --- a/docs/reference/query-dsl/multi-match-query.asciidoc +++ b/docs/reference/query-dsl/multi-match-query.asciidoc @@ -151,8 +151,8 @@ follows: Also, accepts `analyzer`, `boost`, `operator`, `minimum_should_match`, `fuzziness`, `lenient`, `prefix_length`, `max_expansions`, `rewrite`, `zero_terms_query`, - `cutoff_frequency`, `auto_generate_synonyms_phrase_query` and `fuzzy_transpositions`, - as explained in <>. +`auto_generate_synonyms_phrase_query` and `fuzzy_transpositions`, +as explained in <>. [IMPORTANT] [[operator-min]] @@ -247,9 +247,7 @@ The score from each `match` clause is added together, then divided by the number of `match` clauses. Also, accepts `analyzer`, `boost`, `operator`, `minimum_should_match`, -`fuzziness`, `lenient`, `prefix_length`, `max_expansions`, `rewrite`, `zero_terms_query` -and `cutoff_frequency`, as explained in <>, but -*see <>*. +`fuzziness`, `lenient`, `prefix_length`, `max_expansions`, `rewrite`, and `zero_terms_query`. [[type-phrase]] ==== `phrase` and `phrase_prefix` @@ -389,8 +387,7 @@ explanation: +blended("smith", fields: [first_name, last_name]) Also, accepts `analyzer`, `boost`, `operator`, `minimum_should_match`, -`lenient`, `zero_terms_query` and `cutoff_frequency`, as explained in -<>. +`lenient` and `zero_terms_query`. [[cross-field-analysis]] ===== `cross_field` and analysis @@ -554,5 +551,4 @@ explained in <> are supported. The construct term queries, but do not have an effect on the prefix query constructed from the final term. -The `slop` and `cutoff_frequency` parameters are not supported by this query -type. +The `slop` parameter is not supported by this query type. diff --git a/docs/reference/sql/functions/search.asciidoc b/docs/reference/sql/functions/search.asciidoc index 6990f6669d69c..079571a4e37c6 100644 --- a/docs/reference/sql/functions/search.asciidoc +++ b/docs/reference/sql/functions/search.asciidoc @@ -58,17 +58,12 @@ additional configuration parameters (separated by semicolon `;`) for either `mat include-tagged::{sql-specs}/docs/docs.csv-spec[optionalParamsForMatch] ---- -In the more advanced example above, the `cutoff_frequency` parameter allows specifying an absolute or relative document frequency where -high frequency terms are moved into an optional subquery and are only scored if one of the low frequency (below the cutoff) terms in the -case of an `or` operator or all of the low frequency terms in the case of an `and` operator match. More about this you can find in the -<> page. - NOTE: The allowed optional parameters for a single-field `MATCH()` variant (for the `match` {es} query) are: `analyzer`, `auto_generate_synonyms_phrase_query`, -`cutoff_frequency`, `lenient`, `fuzziness`, `fuzzy_transpositions`, `fuzzy_rewrite`, `minimum_should_match`, `operator`, +`lenient`, `fuzziness`, `fuzzy_transpositions`, `fuzzy_rewrite`, `minimum_should_match`, `operator`, `max_expansions`, `prefix_length`. NOTE: The allowed optional parameters for a multi-field `MATCH()` variant (for the `multi_match` {es} query) are: `analyzer`, `auto_generate_synonyms_phrase_query`, -`cutoff_frequency`, `lenient`, `fuzziness`, `fuzzy_transpositions`, `fuzzy_rewrite`, `minimum_should_match`, `operator`, +`lenient`, `fuzziness`, `fuzzy_transpositions`, `fuzzy_rewrite`, `minimum_should_match`, `operator`, `max_expansions`, `prefix_length`, `slop`, `tie_breaker`, `type`. diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml index ce9cc74955729..dca56565e6954 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml @@ -1,8 +1,5 @@ --- "Test common terms query with stacked tokens": - - skip: - features: "warnings" - - do: indices.create: index: test @@ -50,135 +47,6 @@ refresh: true - do: - warnings: - - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' - search: - rest_total_hits_as_int: true - body: - query: - common: - field1: - query: the fast brown - cutoff_frequency: 3 - low_freq_operator: or - - match: { hits.total: 3 } - - match: { hits.hits.0._id: "1" } - - match: { hits.hits.1._id: "2" } - - match: { hits.hits.2._id: "3" } - - - do: - warnings: - - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' - search: - rest_total_hits_as_int: true - body: - query: - common: - field1: - query: the fast brown - cutoff_frequency: 3 - low_freq_operator: and - - match: { hits.total: 2 } - - match: { hits.hits.0._id: "1" } - - match: { hits.hits.1._id: "2" } - - - do: - warnings: - - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' - search: - rest_total_hits_as_int: true - body: - query: - common: - field1: - query: the fast brown - cutoff_frequency: 3 - - match: { hits.total: 3 } - - match: { hits.hits.0._id: "1" } - - match: { hits.hits.1._id: "2" } - - match: { hits.hits.2._id: "3" } - - - do: - warnings: - - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' - search: - rest_total_hits_as_int: true - body: - query: - common: - field1: - query: the fast huge fox - minimum_should_match: - low_freq: 3 - - match: { hits.total: 1 } - - match: { hits.hits.0._id: "2" } - - - do: - warnings: - - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' - search: - rest_total_hits_as_int: true - body: - query: - common: - field1: - query: the fast lazy fox brown - cutoff_frequency: 1 - minimum_should_match: - high_freq: 5 - - match: { hits.total: 2 } - - match: { hits.hits.0._id: "2" } - - match: { hits.hits.1._id: "1" } - - - do: - warnings: - - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' - search: - rest_total_hits_as_int: true - body: - query: - common: - field1: - query: the fast lazy fox brown - cutoff_frequency: 1 - minimum_should_match: - high_freq: 6 - - match: { hits.total: 1 } - - match: { hits.hits.0._id: "2" } - - - do: - warnings: - - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' - search: - rest_total_hits_as_int: true - body: - query: - common: - field1: - query: the fast lazy fox brown - cutoff_frequency: 1 - - match: { hits.total: 1 } - - match: { hits.hits.0._id: "2" } - - - do: - warnings: - - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' - search: - rest_total_hits_as_int: true - body: - query: - common: - field1: - query: the quick brown - cutoff_frequency: 3 - - match: { hits.total: 3 } - - match: { hits.hits.0._id: "1" } - - match: { hits.hits.1._id: "2" } - - match: { hits.hits.2._id: "3" } - - - do: - warnings: - - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [match] query can skip block of documents efficiently if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: @@ -186,15 +54,12 @@ match: field1: query: the fast brown - cutoff_frequency: 3 operator: and - match: { hits.total: 2 } - match: { hits.hits.0._id: "1" } - match: { hits.hits.1._id: "2" } - do: - warnings: - - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [match] query can skip block of documents efficiently if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: @@ -202,7 +67,6 @@ match: field1: query: the fast brown - cutoff_frequency: 3 operator: or - match: { hits.total: 3 } - match: { hits.hits.0._id: "1" } @@ -210,8 +74,6 @@ - match: { hits.hits.2._id: "3" } - do: - warnings: - - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [match] query can skip block of documents efficiently if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: @@ -219,7 +81,6 @@ match: field1: query: the fast brown - cutoff_frequency: 3 minimum_should_match: 3 - match: { hits.total: 2 } - match: { hits.hits.0._id: "1" } @@ -227,7 +88,6 @@ - do: warnings: - - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [multi_match] query can skip block of documents efficiently if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: @@ -235,7 +95,6 @@ multi_match: query: the fast brown fields: [ "field1", "field2" ] - cutoff_frequency: 3 operator: and - match: { hits.total: 3 } - match: { hits.hits.0._id: "3" } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java index c245e2cb3a20b..d3c4bdedde7d2 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java @@ -22,7 +22,6 @@ import org.apache.lucene.index.PrefixCodedTerms; import org.apache.lucene.index.Term; import org.apache.lucene.queries.BlendedTermQuery; -import org.apache.lucene.queries.CommonTermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; @@ -75,7 +74,6 @@ final class QueryAnalyzer { entry(BoostQuery.class, boostQuery()), entry(TermQuery.class, termQuery()), entry(TermInSetQuery.class, termInSetQuery()), - entry(CommonTermsQuery.class, commonTermsQuery()), entry(BlendedTermQuery.class, blendedTermQuery()), entry(PhraseQuery.class, phraseQuery()), entry(MultiPhraseQuery.class, multiPhraseQuery()), @@ -185,13 +183,6 @@ private static BiFunction synonymQuery() { }; } - private static BiFunction commonTermsQuery() { - return (query, version) -> { - Set terms = ((CommonTermsQuery) query).getTerms().stream().map(QueryExtraction::new).collect(toSet()); - return new Result(false, terms, Math.min(1, terms.size())); - }; - } - private static BiFunction blendedTermQuery() { return (query, version) -> { Set terms = ((BlendedTermQuery) query).getTerms().stream().map(QueryExtraction::new).collect(toSet()); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index b191dd948c574..e487037afaea7 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -46,7 +46,6 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.memory.MemoryIndex; import org.apache.lucene.queries.BlendedTermQuery; -import org.apache.lucene.queries.CommonTermsQuery; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; @@ -84,8 +83,8 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.common.geo.ShapeRelation; +import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; @@ -530,12 +529,6 @@ public void testDuelIdBased() throws Exception { public void testDuelSpecificQueries() throws Exception { List documents = new ArrayList<>(); - CommonTermsQuery commonTermsQuery = new CommonTermsQuery(Occur.SHOULD, Occur.SHOULD, 128); - commonTermsQuery.add(new Term("field", "quick")); - commonTermsQuery.add(new Term("field", "brown")); - commonTermsQuery.add(new Term("field", "fox")); - addQuery(commonTermsQuery, documents); - BlendedTermQuery blendedTermQuery = BlendedTermQuery.dismaxBlendedQuery(new Term[]{new Term("field", "quick"), new Term("field", "brown"), new Term("field", "fox")}, 1.0f); addQuery(blendedTermQuery, documents); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java index ee31a81ae168a..57a6ca15ac593 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java @@ -44,7 +44,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.yamlBuilder; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; -import static org.elasticsearch.index.query.QueryBuilders.commonTermsQuery; import static org.elasticsearch.index.query.QueryBuilders.geoBoundingBoxQuery; import static org.elasticsearch.index.query.QueryBuilders.geoDistanceQuery; import static org.elasticsearch.index.query.QueryBuilders.geoPolygonQuery; @@ -356,13 +355,10 @@ public void testPercolatorSpecificQueries() throws Exception { ); client().prepareIndex("test", "type", "1") - .setSource(jsonBuilder().startObject().field("query", commonTermsQuery("field1", "quick brown fox")).endObject()) - .get(); - client().prepareIndex("test", "type", "2") .setSource(jsonBuilder().startObject().field("query", multiMatchQuery("quick brown fox", "field1", "field2") .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)).endObject()) .get(); - client().prepareIndex("test", "type", "3") + client().prepareIndex("test", "type", "2") .setSource(jsonBuilder().startObject().field("query", spanNearQuery(spanTermQuery("field1", "quick"), 0) .addClause(spanTermQuery("field1", "brown")) @@ -372,7 +368,7 @@ public void testPercolatorSpecificQueries() throws Exception { .get(); client().admin().indices().prepareRefresh().get(); - client().prepareIndex("test", "type", "4") + client().prepareIndex("test", "type", "3") .setSource(jsonBuilder().startObject().field("query", spanNotQuery( spanNearQuery(spanTermQuery("field1", "quick"), 0) @@ -387,7 +383,7 @@ public void testPercolatorSpecificQueries() throws Exception { .get(); // doesn't match - client().prepareIndex("test", "type", "5") + client().prepareIndex("test", "type", "4") .setSource(jsonBuilder().startObject().field("query", spanNotQuery( spanNearQuery(spanTermQuery("field1", "quick"), 0) @@ -410,15 +406,13 @@ public void testPercolatorSpecificQueries() throws Exception { .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) .addSort("_id", SortOrder.ASC) .get(); - assertHitCount(response, 4); + assertHitCount(response, 3); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); assertThat(response.getHits().getAt(0).getScore(), equalTo(Float.NaN)); assertThat(response.getHits().getAt(1).getId(), equalTo("2")); assertThat(response.getHits().getAt(1).getScore(), equalTo(Float.NaN)); assertThat(response.getHits().getAt(2).getId(), equalTo("3")); assertThat(response.getHits().getAt(2).getScore(), equalTo(Float.NaN)); - assertThat(response.getHits().getAt(3).getId(), equalTo("4")); - assertThat(response.getHits().getAt(3).getScore(), equalTo(Float.NaN)); } public void testPercolatorQueryWithHighlighting() throws Exception { diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java index c07467187f05f..358e9176e19b5 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java @@ -28,8 +28,8 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.Term; import org.apache.lucene.queries.BlendedTermQuery; -import org.apache.lucene.queries.CommonTermsQuery; import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; @@ -44,7 +44,6 @@ import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; -import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.join.QueryBitSetProducer; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.spans.SpanFirstQuery; @@ -520,27 +519,10 @@ public void testExtractQueryMetadata_boostQuery() { assertThat(terms.get(0).bytes(), equalTo(termQuery1.getTerm().bytes())); } - public void testExtractQueryMetadata_commonTermsQuery() { - CommonTermsQuery commonTermsQuery = new CommonTermsQuery(BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD, 100); - commonTermsQuery.add(new Term("_field", "_term1")); - commonTermsQuery.add(new Term("_field", "_term2")); - Result result = analyze(commonTermsQuery, Version.CURRENT); - assertThat(result.verified, is(false)); - assertThat(result.minimumShouldMatch, equalTo(1)); - List terms = new ArrayList<>(result.extractions); - terms.sort(Comparator.comparing(qt -> qt.term)); - assertThat(terms.size(), equalTo(2)); - assertThat(result.minimumShouldMatch, equalTo(1)); - assertThat(terms.get(0).field(), equalTo("_field")); - assertThat(terms.get(0).text(), equalTo("_term1")); - assertThat(terms.get(1).field(), equalTo("_field")); - assertThat(terms.get(1).text(), equalTo("_term2")); - } - public void testExtractQueryMetadata_blendedTermQuery() { Term[] termsArr = new Term[]{new Term("_field", "_term1"), new Term("_field", "_term2")}; - BlendedTermQuery commonTermsQuery = BlendedTermQuery.dismaxBlendedQuery(termsArr, 1.0f); - Result result = analyze(commonTermsQuery, Version.CURRENT); + BlendedTermQuery blendedTermQuery = BlendedTermQuery.dismaxBlendedQuery(termsArr, 1.0f); + Result result = analyze(blendedTermQuery, Version.CURRENT); assertThat(result.verified, is(true)); assertThat(result.minimumShouldMatch, equalTo(1)); List terms = new ArrayList<>(result.extractions); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix.yml index aa6a5158b4795..f92b0ffda80e3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix.yml @@ -345,19 +345,3 @@ setup: type: bool_prefix fields: [ "my_field1", "my_field2" ] slop: 1 - ---- -"multi_match multiple fields with cutoff_frequency throws exception": - - - do: - catch: /\[cutoff_frequency\] not allowed for type \[bool_prefix\]/ - search: - rest_total_hits_as_int: true - index: test - body: - query: - multi_match: - query: "brown" - type: bool_prefix - fields: [ "my_field1", "my_field2" ] - cutoff_frequency: 0.001 diff --git a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java index f823f3a142690..5f00631ad6028 100644 --- a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java @@ -22,11 +22,8 @@ import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; -import org.apache.lucene.index.TermStates; import org.apache.lucene.index.TermState; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanClause.Occur; -import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.index.TermStates; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.Query; @@ -278,50 +275,6 @@ public int hashCode() { return Objects.hash(classHash(), Arrays.hashCode(equalsTerms())); } - /** - * @deprecated Since max_score optimization landed in 7.0, normal MultiMatchQuery - * will achieve the same result without any configuration. - */ - @Deprecated - public static BlendedTermQuery commonTermsBlendedQuery(Term[] terms, final float[] boosts, final float maxTermFrequency) { - return new BlendedTermQuery(terms, boosts) { - @Override - protected Query topLevelQuery(Term[] terms, TermStates[] ctx, int[] docFreqs, int maxDoc) { - BooleanQuery.Builder highBuilder = new BooleanQuery.Builder(); - BooleanQuery.Builder lowBuilder = new BooleanQuery.Builder(); - for (int i = 0; i < terms.length; i++) { - Query query = new TermQuery(terms[i], ctx[i]); - if (boosts != null && boosts[i] != 1f) { - query = new BoostQuery(query, boosts[i]); - } - if ((maxTermFrequency >= 1f && docFreqs[i] > maxTermFrequency) - || (docFreqs[i] > (int) Math.ceil(maxTermFrequency - * maxDoc))) { - highBuilder.add(query, BooleanClause.Occur.SHOULD); - } else { - lowBuilder.add(query, BooleanClause.Occur.SHOULD); - } - } - BooleanQuery high = highBuilder.build(); - BooleanQuery low = lowBuilder.build(); - if (low.clauses().isEmpty()) { - BooleanQuery.Builder queryBuilder = new BooleanQuery.Builder(); - for (BooleanClause booleanClause : high) { - queryBuilder.add(booleanClause.getQuery(), Occur.MUST); - } - return queryBuilder.build(); - } else if (high.clauses().isEmpty()) { - return low; - } else { - return new BooleanQuery.Builder() - .add(high, BooleanClause.Occur.SHOULD) - .add(low, BooleanClause.Occur.MUST) - .build(); - } - } - }; - } - public static BlendedTermQuery dismaxBlendedQuery(Term[] terms, final float tieBreakerMultiplier) { return dismaxBlendedQuery(terms, null, tieBreakerMultiplier); } diff --git a/server/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java b/server/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java deleted file mode 100644 index 2d70ed8b90a05..0000000000000 --- a/server/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.lucene.queries; - -import org.apache.lucene.search.BooleanClause.Occur; -import org.elasticsearch.common.lucene.search.Queries; - -/** - * Extended version of {@link CommonTermsQuery} that allows to pass in a - * {@code minimumNumberShouldMatch} specification that uses the actual num of high frequent terms - * to calculate the minimum matching terms. - * - * @deprecated Since max_optimization optimization landed in 7.0, normal MatchQuery - * will achieve the same result without any configuration. - */ -@Deprecated -public class ExtendedCommonTermsQuery extends CommonTermsQuery { - - public ExtendedCommonTermsQuery(Occur highFreqOccur, Occur lowFreqOccur, float maxTermFrequency) { - super(highFreqOccur, lowFreqOccur, maxTermFrequency); - } - - private String lowFreqMinNumShouldMatchSpec; - private String highFreqMinNumShouldMatchSpec; - - @Override - protected int calcLowFreqMinimumNumberShouldMatch(int numOptional) { - return calcMinimumNumberShouldMatch(lowFreqMinNumShouldMatchSpec, numOptional); - } - - protected int calcMinimumNumberShouldMatch(String spec, int numOptional) { - if (spec == null) { - return 0; - } - return Queries.calculateMinShouldMatch(numOptional, spec); - } - - @Override - protected int calcHighFreqMinimumNumberShouldMatch(int numOptional) { - return calcMinimumNumberShouldMatch(highFreqMinNumShouldMatchSpec, numOptional); - } - - public void setHighFreqMinimumNumberShouldMatch(String spec) { - this.highFreqMinNumShouldMatchSpec = spec; - } - - public String getHighFreqMinimumNumberShouldMatchSpec() { - return highFreqMinNumShouldMatchSpec; - } - - public void setLowFreqMinimumNumberShouldMatch(String spec) { - this.lowFreqMinNumShouldMatchSpec = spec; - } - - public String getLowFreqMinimumNumberShouldMatchSpec() { - return lowFreqMinNumShouldMatchSpec; - } - - public float getMaxTermFrequency() { - return this.maxTermFrequency; - } - -} diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java b/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java index 96a0cafc35b11..2e004dfad2e55 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.lucene.search; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.ExtendedCommonTermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; @@ -148,8 +147,6 @@ public static Query applyMinimumShouldMatch(BooleanQuery query, @Nullable String public static Query maybeApplyMinimumShouldMatch(Query query, @Nullable String minimumShouldMatch) { if (query instanceof BooleanQuery) { return applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch); - } else if (query instanceof ExtendedCommonTermsQuery) { - ((ExtendedCommonTermsQuery)query).setLowFreqMinimumNumberShouldMatch(minimumShouldMatch); } return query; } diff --git a/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java deleted file mode 100644 index 5b2853ac359c2..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java +++ /dev/null @@ -1,417 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.index.Term; -import org.apache.lucene.queries.ExtendedCommonTermsQuery; -import org.apache.lucene.search.BooleanClause.Occur; -import org.apache.lucene.search.Query; -import org.apache.lucene.util.BytesRefBuilder; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.mapper.MappedFieldType; - -import java.io.IOException; -import java.util.Objects; - -/** - * CommonTermsQuery query is a query that executes high-frequency terms in a - * optional sub-query to prevent slow queries due to "common" terms like - * stopwords. This query basically builds 2 queries off the {@code #add(Term) - * added} terms where low-frequency terms are added to a required boolean clause - * and high-frequency terms are added to an optional boolean clause. The - * optional clause is only executed if the required "low-frequency' clause - * matches. - * - * @deprecated Since max_optimization optimization landed in 7.0, normal MatchQuery - * will achieve the same result without any configuration. - */ -@Deprecated -public class CommonTermsQueryBuilder extends AbstractQueryBuilder { - - public static final String COMMON_TERMS_QUERY_DEPRECATION_MSG = "[match] query which can efficiently " + - "skip blocks of documents if the total number of hits is not tracked"; - - public static final String NAME = "common"; - - public static final float DEFAULT_CUTOFF_FREQ = 0.01f; - public static final Operator DEFAULT_HIGH_FREQ_OCCUR = Operator.OR; - public static final Operator DEFAULT_LOW_FREQ_OCCUR = Operator.OR; - - private static final ParseField CUTOFF_FREQUENCY_FIELD = new ParseField("cutoff_frequency"); - private static final ParseField MINIMUM_SHOULD_MATCH_FIELD = new ParseField("minimum_should_match"); - private static final ParseField LOW_FREQ_OPERATOR_FIELD = new ParseField("low_freq_operator"); - private static final ParseField HIGH_FREQ_OPERATOR_FIELD = new ParseField("high_freq_operator"); - private static final ParseField DISABLE_COORD_FIELD = new ParseField("disable_coord") - .withAllDeprecated("disable_coord has been removed"); - private static final ParseField ANALYZER_FIELD = new ParseField("analyzer"); - private static final ParseField QUERY_FIELD = new ParseField("query"); - private static final ParseField HIGH_FREQ_FIELD = new ParseField("high_freq"); - private static final ParseField LOW_FREQ_FIELD = new ParseField("low_freq"); - - private final String fieldName; - - private final Object text; - - private Operator highFreqOperator = DEFAULT_HIGH_FREQ_OCCUR; - - private Operator lowFreqOperator = DEFAULT_LOW_FREQ_OCCUR; - - private String analyzer = null; - - private String lowFreqMinimumShouldMatch = null; - - private String highFreqMinimumShouldMatch = null; - - private float cutoffFrequency = DEFAULT_CUTOFF_FREQ; - - /** - * Constructs a new common terms query. - * @deprecated See {@link CommonTermsQueryBuilder} for more details. - */ - @Deprecated - public CommonTermsQueryBuilder(String fieldName, Object text) { - if (Strings.isEmpty(fieldName)) { - throw new IllegalArgumentException("field name is null or empty"); - } - if (text == null) { - throw new IllegalArgumentException("text cannot be null"); - } - this.fieldName = fieldName; - this.text = text; - } - - /** - * Read from a stream. - * @deprecated See {@link CommonTermsQueryBuilder} for more details. - */ - @Deprecated - public CommonTermsQueryBuilder(StreamInput in) throws IOException { - super(in); - fieldName = in.readString(); - text = in.readGenericValue(); - highFreqOperator = Operator.readFromStream(in); - lowFreqOperator = Operator.readFromStream(in); - analyzer = in.readOptionalString(); - lowFreqMinimumShouldMatch = in.readOptionalString(); - highFreqMinimumShouldMatch = in.readOptionalString(); - cutoffFrequency = in.readFloat(); - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - out.writeString(this.fieldName); - out.writeGenericValue(this.text); - highFreqOperator.writeTo(out); - lowFreqOperator.writeTo(out); - out.writeOptionalString(analyzer); - out.writeOptionalString(lowFreqMinimumShouldMatch); - out.writeOptionalString(highFreqMinimumShouldMatch); - out.writeFloat(cutoffFrequency); - } - - public String fieldName() { - return this.fieldName; - } - - public Object value() { - return this.text; - } - - /** - * Sets the operator to use for terms with a high document frequency - * (greater than or equal to {@link #cutoffFrequency(float)}. Defaults to - * {@code AND}. - */ - public CommonTermsQueryBuilder highFreqOperator(Operator operator) { - this.highFreqOperator = (operator == null) ? DEFAULT_HIGH_FREQ_OCCUR : operator; - return this; - } - - public Operator highFreqOperator() { - return highFreqOperator; - } - - /** - * Sets the operator to use for terms with a low document frequency (less - * than {@link #cutoffFrequency(float)}. Defaults to {@code AND}. - */ - public CommonTermsQueryBuilder lowFreqOperator(Operator operator) { - this.lowFreqOperator = (operator == null) ? DEFAULT_LOW_FREQ_OCCUR : operator; - return this; - } - - public Operator lowFreqOperator() { - return lowFreqOperator; - } - - /** - * Explicitly set the analyzer to use. Defaults to use explicit mapping - * config for the field, or, if not set, the default search analyzer. - */ - public CommonTermsQueryBuilder analyzer(String analyzer) { - this.analyzer = analyzer; - return this; - } - - public String analyzer() { - return this.analyzer; - } - - /** - * Sets the cutoff document frequency for high / low frequent terms. A value - * in [0..1] (or absolute number >=1) representing the maximum threshold of - * a terms document frequency to be considered a low frequency term. - * Defaults to - * {@code {@value #DEFAULT_CUTOFF_FREQ}} - */ - public CommonTermsQueryBuilder cutoffFrequency(float cutoffFrequency) { - this.cutoffFrequency = cutoffFrequency; - return this; - } - - public float cutoffFrequency() { - return this.cutoffFrequency; - } - - /** - * Sets the minimum number of high frequent query terms that need to match in order to - * produce a hit when there are no low frequent terms. - */ - public CommonTermsQueryBuilder highFreqMinimumShouldMatch(String highFreqMinimumShouldMatch) { - this.highFreqMinimumShouldMatch = highFreqMinimumShouldMatch; - return this; - } - - public String highFreqMinimumShouldMatch() { - return this.highFreqMinimumShouldMatch; - } - - /** - * Sets the minimum number of low frequent query terms that need to match in order to - * produce a hit. - */ - public CommonTermsQueryBuilder lowFreqMinimumShouldMatch(String lowFreqMinimumShouldMatch) { - this.lowFreqMinimumShouldMatch = lowFreqMinimumShouldMatch; - return this; - } - - public String lowFreqMinimumShouldMatch() { - return this.lowFreqMinimumShouldMatch; - } - - @Override - protected void doXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(NAME); - builder.startObject(fieldName); - builder.field(QUERY_FIELD.getPreferredName(), text); - builder.field(HIGH_FREQ_OPERATOR_FIELD.getPreferredName(), highFreqOperator.toString()); - builder.field(LOW_FREQ_OPERATOR_FIELD.getPreferredName(), lowFreqOperator.toString()); - if (analyzer != null) { - builder.field(ANALYZER_FIELD.getPreferredName(), analyzer); - } - builder.field(CUTOFF_FREQUENCY_FIELD.getPreferredName(), cutoffFrequency); - if (lowFreqMinimumShouldMatch != null || highFreqMinimumShouldMatch != null) { - builder.startObject(MINIMUM_SHOULD_MATCH_FIELD.getPreferredName()); - if (lowFreqMinimumShouldMatch != null) { - builder.field(LOW_FREQ_FIELD.getPreferredName(), lowFreqMinimumShouldMatch); - } - if (highFreqMinimumShouldMatch != null) { - builder.field(HIGH_FREQ_FIELD.getPreferredName(), highFreqMinimumShouldMatch); - } - builder.endObject(); - } - printBoostAndQueryName(builder); - builder.endObject(); - builder.endObject(); - } - - public static CommonTermsQueryBuilder fromXContent(XContentParser parser) throws IOException { - String fieldName = null; - Object text = null; - float boost = AbstractQueryBuilder.DEFAULT_BOOST; - String analyzer = null; - String lowFreqMinimumShouldMatch = null; - String highFreqMinimumShouldMatch = null; - Operator highFreqOperator = CommonTermsQueryBuilder.DEFAULT_HIGH_FREQ_OCCUR; - Operator lowFreqOperator = CommonTermsQueryBuilder.DEFAULT_LOW_FREQ_OCCUR; - float cutoffFrequency = CommonTermsQueryBuilder.DEFAULT_CUTOFF_FREQ; - String queryName = null; - XContentParser.Token token; - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName); - fieldName = currentFieldName; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - String innerFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - innerFieldName = parser.currentName(); - } else if (token.isValue()) { - if (LOW_FREQ_FIELD.match(innerFieldName, parser.getDeprecationHandler())) { - lowFreqMinimumShouldMatch = parser.text(); - } else if (HIGH_FREQ_FIELD.match(innerFieldName, parser.getDeprecationHandler())) { - highFreqMinimumShouldMatch = parser.text(); - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME + - "] query does not support [" + innerFieldName - + "] for [" + currentFieldName + "]"); - } - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME + - "] unexpected token type [" + token - + "] after [" + innerFieldName + "]"); - } - } - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME + - "] query does not support [" + currentFieldName + "]"); - } - } else if (token.isValue()) { - if (QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - text = parser.objectText(); - } else if (ANALYZER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - analyzer = parser.text(); - } else if (DISABLE_COORD_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - // ignore - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - boost = parser.floatValue(); - } else if (HIGH_FREQ_OPERATOR_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - highFreqOperator = Operator.fromString(parser.text()); - } else if (LOW_FREQ_OPERATOR_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - lowFreqOperator = Operator.fromString(parser.text()); - } else if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - lowFreqMinimumShouldMatch = parser.text(); - } else if (CUTOFF_FREQUENCY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - cutoffFrequency = parser.floatValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - queryName = parser.text(); - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME + - "] query does not support [" + currentFieldName + "]"); - } - } - } - } else { - throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, parser.currentName()); - fieldName = parser.currentName(); - text = parser.objectText(); - } - } - - return new CommonTermsQueryBuilder(fieldName, text) - .lowFreqMinimumShouldMatch(lowFreqMinimumShouldMatch) - .highFreqMinimumShouldMatch(highFreqMinimumShouldMatch) - .analyzer(analyzer) - .highFreqOperator(highFreqOperator) - .lowFreqOperator(lowFreqOperator) - .cutoffFrequency(cutoffFrequency) - .boost(boost) - .queryName(queryName); - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - protected Query doToQuery(QueryShardContext context) throws IOException { - String field; - MappedFieldType fieldType = context.fieldMapper(fieldName); - if (fieldType != null) { - field = fieldType.name(); - } else { - field = fieldName; - } - - Analyzer analyzerObj; - if (analyzer == null) { - if (fieldType != null) { - analyzerObj = context.getSearchAnalyzer(fieldType); - } else { - analyzerObj = context.getMapperService().searchAnalyzer(); - } - } else { - analyzerObj = context.getMapperService().getIndexAnalyzers().get(analyzer); - if (analyzerObj == null) { - throw new QueryShardException(context, "[common] analyzer [" + analyzer + "] not found"); - } - } - - Occur highFreqOccur = highFreqOperator.toBooleanClauseOccur(); - Occur lowFreqOccur = lowFreqOperator.toBooleanClauseOccur(); - - ExtendedCommonTermsQuery commonsQuery = new ExtendedCommonTermsQuery(highFreqOccur, lowFreqOccur, cutoffFrequency); - return parseQueryString(commonsQuery, text, field, analyzerObj, lowFreqMinimumShouldMatch, highFreqMinimumShouldMatch); - } - - private static Query parseQueryString(ExtendedCommonTermsQuery query, Object queryString, String field, Analyzer analyzer, - String lowFreqMinimumShouldMatch, String highFreqMinimumShouldMatch) throws IOException { - // Logic similar to QueryParser#getFieldQuery - try (TokenStream source = analyzer.tokenStream(field, queryString.toString())) { - source.reset(); - CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class); - BytesRefBuilder builder = new BytesRefBuilder(); - while (source.incrementToken()) { - // UTF-8 - builder.copyChars(termAtt); - query.add(new Term(field, builder.toBytesRef())); - } - } - - query.setLowFreqMinimumNumberShouldMatch(lowFreqMinimumShouldMatch); - query.setHighFreqMinimumNumberShouldMatch(highFreqMinimumShouldMatch); - return query; - } - - @Override - protected int doHashCode() { - return Objects.hash(fieldName, text, highFreqOperator, lowFreqOperator, analyzer, - lowFreqMinimumShouldMatch, highFreqMinimumShouldMatch, cutoffFrequency); - } - - @Override - protected boolean doEquals(CommonTermsQueryBuilder other) { - return Objects.equals(fieldName, other.fieldName) && - Objects.equals(text, other.text) && - Objects.equals(highFreqOperator, other.highFreqOperator) && - Objects.equals(lowFreqOperator, other.lowFreqOperator) && - Objects.equals(analyzer, other.analyzer) && - Objects.equals(lowFreqMinimumShouldMatch, other.lowFreqMinimumShouldMatch) && - Objects.equals(highFreqMinimumShouldMatch, other.highFreqMinimumShouldMatch) && - Objects.equals(cutoffFrequency, other.cutoffFrequency); - } -} diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java index fa93550759324..8d5b6e71927e2 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java @@ -21,6 +21,7 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; @@ -43,17 +44,7 @@ */ public class MatchQueryBuilder extends AbstractQueryBuilder { - private static final String CUTOFF_FREQUENCY_DEPRECATION_MSG = "you can omit this option, " + - "the [match] query can skip block of documents efficiently if the total number of hits is not tracked"; - public static final ParseField ZERO_TERMS_QUERY_FIELD = new ParseField("zero_terms_query"); - /** - * @deprecated Since max_optimization optimization landed in 7.0, normal MatchQuery - * will achieve the same result without any configuration. - */ - @Deprecated - public static final ParseField CUTOFF_FREQUENCY_FIELD = - new ParseField("cutoff_frequency").withAllDeprecated(CUTOFF_FREQUENCY_DEPRECATION_MSG); public static final ParseField LENIENT_FIELD = new ParseField("lenient"); public static final ParseField FUZZY_TRANSPOSITIONS_FIELD = new ParseField("fuzzy_transpositions"); public static final ParseField FUZZY_REWRITE_FIELD = new ParseField("fuzzy_rewrite"); @@ -95,8 +86,6 @@ public class MatchQueryBuilder extends AbstractQueryBuilder { private MatchQuery.ZeroTermsQuery zeroTermsQuery = MatchQuery.DEFAULT_ZERO_TERMS_QUERY; - private Float cutoffFrequency = null; - private boolean autoGenerateSynonymsPhraseQuery = true; /** @@ -131,7 +120,10 @@ public MatchQueryBuilder(StreamInput in) throws IOException { minimumShouldMatch = in.readOptionalString(); fuzzyRewrite = in.readOptionalString(); fuzziness = in.readOptionalWriteable(Fuzziness::new); - cutoffFrequency = in.readOptionalFloat(); + // cutoff_frequency has been removed + if (in.getVersion().before(Version.V_8_0_0)) { + in.readOptionalFloat(); + } autoGenerateSynonymsPhraseQuery = in.readBoolean(); } @@ -150,7 +142,10 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeOptionalString(minimumShouldMatch); out.writeOptionalString(fuzzyRewrite); out.writeOptionalWriteable(fuzziness); - out.writeOptionalFloat(cutoffFrequency); + // cutoff_frequency has been removed + if (out.getVersion().before(Version.V_8_0_0)) { + out.writeOptionalFloat(null); + } out.writeBoolean(autoGenerateSynonymsPhraseQuery); } @@ -241,24 +236,6 @@ public int maxExpansions() { return this.maxExpansions; } - /** - * Set a cutoff value in [0..1] (or absolute number >=1) representing the - * maximum threshold of a terms document frequency to be considered a low - * frequency term. - * - * @deprecated see {@link MatchQueryBuilder#CUTOFF_FREQUENCY_FIELD} for more details - */ - @Deprecated - public MatchQueryBuilder cutoffFrequency(float cutoff) { - this.cutoffFrequency = cutoff; - return this; - } - - /** Gets the optional cutoff value, can be {@code null} if not set previously */ - public Float cutoffFrequency() { - return this.cutoffFrequency; - } - /** Sets optional minimumShouldMatch value to apply to the query */ public MatchQueryBuilder minimumShouldMatch(String minimumShouldMatch) { this.minimumShouldMatch = minimumShouldMatch; @@ -375,9 +352,6 @@ public void doXContent(XContentBuilder builder, Params params) throws IOExceptio builder.field(FUZZY_TRANSPOSITIONS_FIELD.getPreferredName(), fuzzyTranspositions); builder.field(LENIENT_FIELD.getPreferredName(), lenient); builder.field(ZERO_TERMS_QUERY_FIELD.getPreferredName(), zeroTermsQuery.toString()); - if (cutoffFrequency != null) { - builder.field(CUTOFF_FREQUENCY_FIELD.getPreferredName(), cutoffFrequency); - } builder.field(GENERATE_SYNONYMS_PHRASE_QUERY.getPreferredName(), autoGenerateSynonymsPhraseQuery); printBoostAndQueryName(builder); builder.endObject(); @@ -402,7 +376,6 @@ protected Query doToQuery(QueryShardContext context) throws IOException { matchQuery.setTranspositions(fuzzyTranspositions); matchQuery.setFuzzyRewriteMethod(QueryParsers.parseRewriteMethod(fuzzyRewrite, null, LoggingDeprecationHandler.INSTANCE)); matchQuery.setLenient(lenient); - matchQuery.setCommonTermsCutoff(cutoffFrequency); matchQuery.setZeroTermsQuery(zeroTermsQuery); matchQuery.setAutoGenerateSynonymsPhraseQuery(autoGenerateSynonymsPhraseQuery); @@ -424,7 +397,6 @@ protected boolean doEquals(MatchQueryBuilder other) { Objects.equals(lenient, other.lenient) && Objects.equals(fuzzyTranspositions, other.fuzzyTranspositions) && Objects.equals(zeroTermsQuery, other.zeroTermsQuery) && - Objects.equals(cutoffFrequency, other.cutoffFrequency) && Objects.equals(autoGenerateSynonymsPhraseQuery, other.autoGenerateSynonymsPhraseQuery); } @@ -432,7 +404,7 @@ protected boolean doEquals(MatchQueryBuilder other) { protected int doHashCode() { return Objects.hash(fieldName, value, operator, analyzer, fuzziness, prefixLength, maxExpansions, minimumShouldMatch, - fuzzyRewrite, lenient, fuzzyTranspositions, zeroTermsQuery, cutoffFrequency, autoGenerateSynonymsPhraseQuery); + fuzzyRewrite, lenient, fuzzyTranspositions, zeroTermsQuery, autoGenerateSynonymsPhraseQuery); } @Override @@ -453,7 +425,6 @@ public static MatchQueryBuilder fromXContent(XContentParser parser) throws IOExc boolean fuzzyTranspositions = FuzzyQuery.defaultTranspositions; String fuzzyRewrite = null; boolean lenient = MatchQuery.DEFAULT_LENIENCY; - Float cutOffFrequency = null; ZeroTermsQuery zeroTermsQuery = MatchQuery.DEFAULT_ZERO_TERMS_QUERY; boolean autoGenerateSynonymsPhraseQuery = true; String queryName = null; @@ -491,8 +462,6 @@ public static MatchQueryBuilder fromXContent(XContentParser parser) throws IOExc fuzzyTranspositions = parser.booleanValue(); } else if (LENIENT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { lenient = parser.booleanValue(); - } else if (CUTOFF_FREQUENCY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - cutOffFrequency = parser.floatValue(); } else if (ZERO_TERMS_QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { String zeroTermsValue = parser.text(); if ("none".equalsIgnoreCase(zeroTermsValue)) { @@ -539,14 +508,10 @@ public static MatchQueryBuilder fromXContent(XContentParser parser) throws IOExc matchQuery.fuzzyTranspositions(fuzzyTranspositions); matchQuery.maxExpansions(maxExpansion); matchQuery.lenient(lenient); - if (cutOffFrequency != null) { - matchQuery.cutoffFrequency(cutOffFrequency); - } matchQuery.zeroTermsQuery(zeroTermsQuery); matchQuery.autoGenerateSynonymsPhraseQuery(autoGenerateSynonymsPhraseQuery); matchQuery.queryName(queryName); matchQuery.boost(boost); return matchQuery; } - } diff --git a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java index fb400a9d3fc75..07f7ae4b79398 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java @@ -22,6 +22,7 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -51,9 +52,6 @@ */ public class MultiMatchQueryBuilder extends AbstractQueryBuilder { - private static final String CUTOFF_FREQUENCY_DEPRECATION_MSG = "you can omit this option, " + - "the [multi_match] query can skip block of documents efficiently if the total number of hits is not tracked"; - public static final String NAME = "multi_match"; public static final MultiMatchQueryBuilder.Type DEFAULT_TYPE = MultiMatchQueryBuilder.Type.BEST_FIELDS; @@ -67,8 +65,6 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder blendedFields) { - return blendTerms(context, new BytesRef[] {value}, commonTermsCutoff, tieBreaker, lenient, blendedFields); + return blendTerms(context, new BytesRef[] {value}, tieBreaker, lenient, blendedFields); } - static Query blendTerms(QueryShardContext context, BytesRef[] values, Float commonTermsCutoff, float tieBreaker, + static Query blendTerms(QueryShardContext context, BytesRef[] values, float tieBreaker, boolean lenient, List blendedFields) { List queries = new ArrayList<>(); @@ -276,11 +276,7 @@ static Query blendTerms(QueryShardContext context, BytesRef[] values, Float comm if (i > 0) { terms = Arrays.copyOf(terms, i); blendedBoost = Arrays.copyOf(blendedBoost, i); - if (commonTermsCutoff != null) { - queries.add(BlendedTermQuery.commonTermsBlendedQuery(terms, blendedBoost, commonTermsCutoff)); - } else { - queries.add(BlendedTermQuery.dismaxBlendedQuery(terms, blendedBoost, tieBreaker)); - } + queries.add(BlendedTermQuery.dismaxBlendedQuery(terms, blendedBoost, tieBreaker)); } if (queries.size() == 1) { return queries.get(0); diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index b0447d5781dfc..3e36def038e44 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -21,7 +21,6 @@ import org.apache.lucene.search.BooleanQuery; import org.elasticsearch.common.NamedRegistry; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.ShapesAvailability; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -34,7 +33,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.BoostingQueryBuilder; -import org.elasticsearch.index.query.CommonTermsQueryBuilder; import org.elasticsearch.index.query.ConstantScoreQueryBuilder; import org.elasticsearch.index.query.DisMaxQueryBuilder; import org.elasticsearch.index.query.DistanceFeatureQueryBuilder; @@ -272,7 +270,6 @@ import static java.util.Collections.unmodifiableMap; import static java.util.Objects.requireNonNull; -import static org.elasticsearch.index.query.CommonTermsQueryBuilder.COMMON_TERMS_QUERY_DEPRECATION_MSG; import static org.elasticsearch.index.query.SpanNearQueryBuilder.SpanGapQueryBuilder; /** @@ -769,8 +766,6 @@ private void registerQueryParsers(List plugins) { registerQuery(new QuerySpec<>(MoreLikeThisQueryBuilder.NAME, MoreLikeThisQueryBuilder::new, MoreLikeThisQueryBuilder::fromXContent)); registerQuery(new QuerySpec<>(WrapperQueryBuilder.NAME, WrapperQueryBuilder::new, WrapperQueryBuilder::fromXContent)); - registerQuery(new QuerySpec<>(new ParseField(CommonTermsQueryBuilder.NAME).withAllDeprecated(COMMON_TERMS_QUERY_DEPRECATION_MSG), - CommonTermsQueryBuilder::new, CommonTermsQueryBuilder::fromXContent)); registerQuery( new QuerySpec<>(SpanMultiTermQueryBuilder.NAME, SpanMultiTermQueryBuilder::new, SpanMultiTermQueryBuilder::fromXContent)); registerQuery(new QuerySpec<>(FunctionScoreQueryBuilder.NAME, FunctionScoreQueryBuilder::new, diff --git a/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java b/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java index 4e4b04d1ff19c..3c24dc2d42b82 100644 --- a/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java +++ b/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java @@ -30,7 +30,6 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.CommonTermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; @@ -147,21 +146,6 @@ public void testMultiPhrasePrefixQuery() throws Exception { BreakIterator.getSentenceInstance(Locale.ROOT), 0, outputs); } - public void testCommonTermsQuery() throws Exception { - final String[] inputs = { - "The quick brown fox." - }; - final String[] outputs = { - "The quick brown fox." - }; - CommonTermsQuery query = new CommonTermsQuery(BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD, 128); - query.add(new Term("text", "quick")); - query.add(new Term("text", "brown")); - query.add(new Term("text", "fox")); - assertHighlightOneDoc("text", inputs, new StandardAnalyzer(), query, Locale.ROOT, - BreakIterator.getSentenceInstance(Locale.ROOT), 0, outputs); - } - public void testSentenceBoundedBreakIterator() throws Exception { final String[] inputs = { "The quick brown fox in a long sentence with another quick brown fox. " + diff --git a/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryBuilderTests.java deleted file mode 100644 index d02b60c52d531..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryBuilderTests.java +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.apache.lucene.index.Term; -import org.apache.lucene.queries.ExtendedCommonTermsQuery; -import org.apache.lucene.search.Query; -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.test.AbstractQueryTestCase; - -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.index.query.QueryBuilders.commonTermsQuery; -import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.nullValue; - -public class CommonTermsQueryBuilderTests extends AbstractQueryTestCase { - - @Override - protected CommonTermsQueryBuilder doCreateTestQueryBuilder() { - int numberOfTerms = randomIntBetween(0, 10); - StringBuilder text = new StringBuilder(""); - for (int i = 0; i < numberOfTerms; i++) { - text.append(randomAlphaOfLengthBetween(1, 10)).append(" "); - } - - String fieldName = randomFrom(STRING_FIELD_NAME, - STRING_ALIAS_FIELD_NAME, - randomAlphaOfLengthBetween(1, 10)); - CommonTermsQueryBuilder query = new CommonTermsQueryBuilder(fieldName, text.toString()); - - if (randomBoolean()) { - query.cutoffFrequency(randomIntBetween(1, 10)); - } - - if (randomBoolean()) { - query.lowFreqOperator(randomFrom(Operator.values())); - } - - // number of low frequency terms that must match - if (randomBoolean()) { - query.lowFreqMinimumShouldMatch("" + randomIntBetween(1, 5)); - } - - if (randomBoolean()) { - query.highFreqOperator(randomFrom(Operator.values())); - } - - // number of high frequency terms that must match - if (randomBoolean()) { - query.highFreqMinimumShouldMatch("" + randomIntBetween(1, 5)); - } - - if (randomBoolean()) { - query.analyzer(randomAnalyzer()); - } - - return query; - } - - @Override - protected Map getAlternateVersions() { - Map alternateVersions = new HashMap<>(); - CommonTermsQueryBuilder commonTermsQuery = new CommonTermsQueryBuilder(randomAlphaOfLengthBetween(1, 10), - randomAlphaOfLengthBetween(1, 10)); - String contentString = "{\n" + - " \"common\" : {\n" + - " \"" + commonTermsQuery.fieldName() + "\" : \"" + commonTermsQuery.value() + "\"\n" + - " }\n" + - "}"; - alternateVersions.put(contentString, commonTermsQuery); - return alternateVersions; - } - - @Override - protected void doAssertLuceneQuery(CommonTermsQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { - assertThat(query, instanceOf(ExtendedCommonTermsQuery.class)); - ExtendedCommonTermsQuery extendedCommonTermsQuery = (ExtendedCommonTermsQuery) query; - - List terms = extendedCommonTermsQuery.getTerms(); - if (!terms.isEmpty()) { - String expectedFieldName = expectedFieldName(queryBuilder.fieldName()); - String actualFieldName = terms.iterator().next().field(); - assertThat(actualFieldName, equalTo(expectedFieldName)); - } - - assertThat(extendedCommonTermsQuery.getHighFreqMinimumNumberShouldMatchSpec(), equalTo(queryBuilder.highFreqMinimumShouldMatch())); - assertThat(extendedCommonTermsQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo(queryBuilder.lowFreqMinimumShouldMatch())); - } - - @Override - public void testUnknownField() throws IOException { - super.testUnknownField(); - assertDeprecationWarning(); - } - - @Override - public void testUnknownObjectException() throws IOException { - super.testUnknownObjectException(); - assertDeprecationWarning(); - } - - @Override - public void testFromXContent() throws IOException { - super.testFromXContent(); - assertDeprecationWarning(); - } - - @Override - public void testValidOutput() throws IOException { - super.testValidOutput(); - assertDeprecationWarning(); - } - - public void testIllegalArguments() { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new CommonTermsQueryBuilder(null, "text")); - assertEquals("field name is null or empty", e.getMessage()); - e = expectThrows(IllegalArgumentException.class, () -> new CommonTermsQueryBuilder("", "text")); - assertEquals("field name is null or empty", e.getMessage()); - e = expectThrows(IllegalArgumentException.class, () -> new CommonTermsQueryBuilder("fieldName", null)); - assertEquals("text cannot be null", e.getMessage()); - } - - public void testFromJson() throws IOException { - String query = - "{\n" + - " \"common\" : {\n" + - " \"body\" : {\n" + - " \"query\" : \"nelly the elephant not as a cartoon\",\n" + - " \"high_freq_operator\" : \"AND\",\n" + - " \"low_freq_operator\" : \"OR\",\n" + - " \"cutoff_frequency\" : 0.001,\n" + - " \"minimum_should_match\" : {\n" + - " \"low_freq\" : \"2\",\n" + - " \"high_freq\" : \"3\"\n" + - " },\n" + - " \"boost\" : 42.0\n" + - " }\n" + - " }\n" + - "}"; - - CommonTermsQueryBuilder queryBuilder = (CommonTermsQueryBuilder) parseQuery(query); - checkGeneratedJson(query, queryBuilder); - - assertEquals(query, 42, queryBuilder.boost, 0.00001); - assertEquals(query, 0.001, queryBuilder.cutoffFrequency(), 0.0001); - assertEquals(query, Operator.OR, queryBuilder.lowFreqOperator()); - assertEquals(query, Operator.AND, queryBuilder.highFreqOperator()); - assertEquals(query, "nelly the elephant not as a cartoon", queryBuilder.value()); - - assertDeprecationWarning(); - } - - public void testCommonTermsQuery1() throws IOException { - String query = copyToStringFromClasspath("/org/elasticsearch/index/query/commonTerms-query1.json"); - Query parsedQuery = parseQuery(query).toQuery(createShardContext()); - assertThat(parsedQuery, instanceOf(ExtendedCommonTermsQuery.class)); - ExtendedCommonTermsQuery ectQuery = (ExtendedCommonTermsQuery) parsedQuery; - assertThat(ectQuery.getHighFreqMinimumNumberShouldMatchSpec(), nullValue()); - assertThat(ectQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo("2")); - - assertDeprecationWarning(); - } - - public void testCommonTermsQuery2() throws IOException { - String query = copyToStringFromClasspath("/org/elasticsearch/index/query/commonTerms-query2.json"); - Query parsedQuery = parseQuery(query).toQuery(createShardContext()); - assertThat(parsedQuery, instanceOf(ExtendedCommonTermsQuery.class)); - ExtendedCommonTermsQuery ectQuery = (ExtendedCommonTermsQuery) parsedQuery; - assertThat(ectQuery.getHighFreqMinimumNumberShouldMatchSpec(), equalTo("50%")); - assertThat(ectQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo("5<20%")); - - assertDeprecationWarning(); - } - - public void testCommonTermsQuery3() throws IOException { - String query = copyToStringFromClasspath("/org/elasticsearch/index/query/commonTerms-query3.json"); - Query parsedQuery = parseQuery(query).toQuery(createShardContext()); - assertThat(parsedQuery, instanceOf(ExtendedCommonTermsQuery.class)); - ExtendedCommonTermsQuery ectQuery = (ExtendedCommonTermsQuery) parsedQuery; - assertThat(ectQuery.getHighFreqMinimumNumberShouldMatchSpec(), nullValue()); - assertThat(ectQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo("2")); - - assertDeprecationWarning(); - } - - // see #11730 - public void testCommonTermsQuery4() throws IOException { - Query parsedQuery = parseQuery(commonTermsQuery("field", "text")).toQuery(createShardContext()); - assertThat(parsedQuery, instanceOf(ExtendedCommonTermsQuery.class)); - - assertDeprecationWarning(); - } - - public void testParseFailsWithMultipleFields() throws IOException { - String json = "{\n" + - " \"common\" : {\n" + - " \"message1\" : {\n" + - " \"query\" : \"nelly the elephant not as a cartoon\"\n" + - " },\n" + - " \"message2\" : {\n" + - " \"query\" : \"nelly the elephant not as a cartoon\"\n" + - " }\n" + - " }\n" + - "}"; - - ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json)); - assertEquals("[common] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage()); - - String shortJson = "{\n" + - " \"common\" : {\n" + - " \"message1\" : \"nelly the elephant not as a cartoon\",\n" + - " \"message2\" : \"nelly the elephant not as a cartoon\"\n" + - " }\n" + - "}"; - e = expectThrows(ParsingException.class, () -> parseQuery(shortJson)); - assertEquals("[common] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage()); - - assertDeprecationWarning(); - } - - private void assertDeprecationWarning() { - assertWarnings("Deprecated field [common] used, replaced by [" + CommonTermsQueryBuilder.COMMON_TERMS_QUERY_DEPRECATION_MSG + "]"); - } -} diff --git a/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryParserTests.java b/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryParserTests.java deleted file mode 100644 index f393683a10f7f..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryParserTests.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.test.ESSingleNodeTestCase; - -public class CommonTermsQueryParserTests extends ESSingleNodeTestCase { - public void testWhenParsedQueryIsNullNoNullPointerExceptionIsThrown() { - final String index = "test-index"; - final String type = "test-type"; - client() - .admin() - .indices() - .prepareCreate(index) - .addMapping(type, "name", "type=text,analyzer=stop") - .execute() - .actionGet(); - ensureGreen(); - - CommonTermsQueryBuilder commonTermsQueryBuilder = - new CommonTermsQueryBuilder("name", "the").queryName("query-name"); - - // the named query parses to null; we are testing this does not cause a NullPointerException - SearchResponse response = - client().prepareSearch(index).setQuery(commonTermsQueryBuilder).execute().actionGet(); - - assertNotNull(response); - assertEquals(response.getHits().getHits().length, 0); - } -} diff --git a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index f79bbb86242d9..76ea5aa9dc6a0 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -23,7 +23,6 @@ import org.apache.lucene.analysis.CannedBinaryTokenStream; import org.apache.lucene.analysis.MockSynonymAnalyzer; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.ExtendedCommonTermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.FuzzyQuery; @@ -181,18 +180,6 @@ protected void doAssertLuceneQuery(MatchQueryBuilder queryBuilder, Query query, } } - if (query instanceof ExtendedCommonTermsQuery) { - assertTrue(queryBuilder.cutoffFrequency() != null); - ExtendedCommonTermsQuery ectq = (ExtendedCommonTermsQuery) query; - List terms = ectq.getTerms(); - if (!terms.isEmpty()) { - Term term = terms.iterator().next(); - String expectedFieldName = expectedFieldName(queryBuilder.fieldName()); - assertThat(term.field(), equalTo(expectedFieldName)); - } - assertEquals(queryBuilder.cutoffFrequency(), ectq.getMaxTermFrequency(), Float.MIN_VALUE); - } - if (query instanceof FuzzyQuery) { assertTrue(queryBuilder.fuzziness() != null); FuzzyQuery fuzzyQuery = (FuzzyQuery) query; diff --git a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java index 970a4c3a37ecb..cd77a940a80a1 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.query; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.ExtendedCommonTermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; @@ -171,7 +170,6 @@ protected void doAssertLuceneQuery(MultiMatchQueryBuilder queryBuilder, Query qu instanceOf(FuzzyQuery.class), instanceOf(MultiPhrasePrefixQuery.class), instanceOf(MatchAllDocsQuery.class), - instanceOf(ExtendedCommonTermsQuery.class), instanceOf(MatchNoDocsQuery.class), instanceOf(PhraseQuery.class), instanceOf(PointRangeQuery.class), diff --git a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java index 58baadd83573d..30438c49998ab 100644 --- a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java @@ -129,7 +129,7 @@ public void testBlendTerms() { Query expected = BlendedTermQuery.dismaxBlendedQuery(terms, boosts, 1.0f); Query actual = MultiMatchQuery.blendTerm( indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null), - new BytesRef("baz"), null, 1f, false, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); + new BytesRef("baz"), 1f, false, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); assertEquals(expected, actual); } @@ -145,7 +145,7 @@ public void testBlendTermsWithFieldBoosts() { Query expected = BlendedTermQuery.dismaxBlendedQuery(terms, boosts, 1.0f); Query actual = MultiMatchQuery.blendTerm( indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null), - new BytesRef("baz"), null, 1f, false, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); + new BytesRef("baz"), 1f, false, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); assertEquals(expected, actual); } @@ -167,7 +167,7 @@ public Query termQuery(Object value, QueryShardContext context) { ), 1f); Query actual = MultiMatchQuery.blendTerm( indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null), - new BytesRef("baz"), null, 1f, true, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); + new BytesRef("baz"), 1f, true, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); assertEquals(expected, actual); } @@ -181,7 +181,7 @@ public Query termQuery(Object value, QueryShardContext context) { ft.setName("bar"); expectThrows(IllegalArgumentException.class, () -> MultiMatchQuery.blendTerm( indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null), - new BytesRef("baz"), null, 1f, false, Arrays.asList(new FieldAndBoost(ft, 1)))); + new BytesRef("baz"), 1f, false, Arrays.asList(new FieldAndBoost(ft, 1)))); } public void testBlendNoTermQuery() { @@ -205,7 +205,7 @@ public Query termQuery(Object value, QueryShardContext context) { ), 1.0f); Query actual = MultiMatchQuery.blendTerm( indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null), - new BytesRef("baz"), null, 1f, false, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); + new BytesRef("baz"), 1f, false, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); assertEquals(expected, actual); } diff --git a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java index 2e019d1e2c432..512c80d5c0ef8 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -354,7 +354,7 @@ public List> getRescorers() { }; //add here deprecated queries to make sure we log a deprecation warnings when they are used - private static final String[] DEPRECATED_QUERIES = new String[] {"common"}; + private static final String[] DEPRECATED_QUERIES = new String[] {}; /** * Dummy test {@link AggregationBuilder} used to test registering aggregation builders. diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index f5e601fd97abd..855eb7286010c 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.fetch.subphase.highlight; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; @@ -81,7 +80,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.boostingQuery; -import static org.elasticsearch.index.query.QueryBuilders.commonTermsQuery; import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.existsQuery; import static org.elasticsearch.index.query.QueryBuilders.fuzzyQuery; @@ -1440,41 +1438,6 @@ public void testBoostingQueryTermVector() throws IOException { assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); } - public void testCommonTermsQuery() { - createIndex("test"); - ensureGreen(); - - client().prepareIndex("test", "type1") - .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog") - .get(); - refresh(); - - logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource() - .query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) - .highlighter(highlight().field("field2").order("score").preTags("").postTags("")); - - SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); - } - - public void testCommonTermsTermVector() throws IOException { - assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping())); - ensureGreen(); - - client().prepareIndex("test", "type1").setSource( - "field1", "this is a test", - "field2", "The quick brown fox jumps over the lazy dog").get(); - refresh(); - logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) - .highlighter(highlight().field("field2").order("score").preTags("").postTags("")); - - SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); - - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); - } - public void testPlainHighlightDifferentFragmenter() throws Exception { assertAcked(prepareCreate("test") .addMapping("type1", "tags", "type=text")); @@ -2295,24 +2258,6 @@ public void testPostingsHighlighterBoostingQuery() throws IOException { equalTo("The quick brown fox jumps over the lazy dog! Second sentence.")); } - public void testPostingsHighlighterCommonTermsQuery() throws IOException { - assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping())); - ensureGreen(); - - client().prepareIndex("test", "type1") - .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get(); - refresh(); - - logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) - .highlighter(highlight().field("field2").preTags("").postTags("")); - SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - assertHitCount(searchResponse, 1L); - - assertHighlight(searchResponse, 0, "field2", 0, 1, - equalTo("The quick brown fox jumps over the lazy dog! Second sentence.")); - } - private static XContentBuilder type1PostingsffsetsMapping() throws IOException { return XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties") diff --git a/server/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java index 5fec898155487..415fa40ea9db8 100644 --- a/server/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.query; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -72,8 +71,6 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.lessThan; public class MultiMatchQueryIT extends ESIntegTestCase { @@ -303,66 +300,6 @@ public void testSingleField() throws NoSuchFieldException, IllegalAccessExceptio } - public void testCutoffFreq() throws ExecutionException, InterruptedException { - final long numDocs = client().prepareSearch("test").setSize(0) - .setQuery(matchAllQuery()).get().getHits().getTotalHits().value; - MatchQuery.Type type = MatchQuery.Type.BOOLEAN; - Float cutoffFrequency = randomBoolean() ? Math.min(1, numDocs * 1.f / between(10, 20)) : 1.f / between(10, 20); - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(randomizeType(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category") - .operator(Operator.OR).cutoffFrequency(cutoffFrequency))).get(); - Set topNIds = Sets.newHashSet("theone", "theother"); - for (int i = 0; i < searchResponse.getHits().getHits().length; i++) { - topNIds.remove(searchResponse.getHits().getAt(i).getId()); - // very likely that we hit a random doc that has the same score so orders are random since - // the doc id is the tie-breaker - } - assertThat(topNIds, empty()); - assertThat(searchResponse.getHits().getHits()[0].getScore(), - greaterThanOrEqualTo(searchResponse.getHits().getHits()[1].getScore())); - - cutoffFrequency = randomBoolean() ? Math.min(1, numDocs * 1.f / between(10, 20)) : 1.f / between(10, 20); - searchResponse = client().prepareSearch("test") - .setQuery(randomizeType(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category") - .operator(Operator.OR).cutoffFrequency(cutoffFrequency).type(type))).get(); - assertFirstHit(searchResponse, anyOf(hasId("theone"), hasId("theother"))); - assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore())); - long size = searchResponse.getHits().getTotalHits().value; - - searchResponse = client().prepareSearch("test") - .setQuery(randomizeType(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category") - .operator(Operator.OR).type(type))).get(); - assertFirstHit(searchResponse, anyOf(hasId("theone"), hasId("theother"))); - assertThat("common terms expected to be a way smaller result set", size, lessThan(searchResponse.getHits().getTotalHits().value)); - - cutoffFrequency = randomBoolean() ? Math.min(1, numDocs * 1.f / between(10, 20)) : 1.f / between(10, 20); - searchResponse = client().prepareSearch("test") - .setQuery(randomizeType(multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category") - .operator(Operator.OR).cutoffFrequency(cutoffFrequency).type(type))).get(); - assertFirstHit(searchResponse, hasId("theother")); - - - searchResponse = client().prepareSearch("test") - .setQuery(randomizeType(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category") - .operator(Operator.AND).cutoffFrequency(cutoffFrequency).type(type))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = client().prepareSearch("test") - .setQuery(randomizeType(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category") - .operator(Operator.AND).cutoffFrequency(cutoffFrequency).type(type))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = client().prepareSearch("test") - .setQuery(randomizeType(multiMatchQuery("marvel hero", "first_name", "last_name", "category") - .operator(Operator.AND).cutoffFrequency(cutoffFrequency) - .analyzer("category") - .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theother")); - } - public void testEquivalence() { final int numDocs = (int) client().prepareSearch("test").setSize(0) @@ -559,21 +496,11 @@ public void testCrossFieldMode() throws ExecutionException, InterruptedException .analyzer("category"))).get(); assertFirstHit(searchResponse, hasId("theone")); - searchResponse = client().prepareSearch("test") - .setQuery(randomizeType(multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category") - .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .cutoffFrequency(0.1f) - .analyzer("category") - .operator(Operator.OR))).get(); - assertFirstHit(searchResponse, anyOf(hasId("theother"), hasId("theone"))); - long numResults = searchResponse.getHits().getTotalHits().value; - searchResponse = client().prepareSearch("test") .setQuery(randomizeType(multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category") .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) .analyzer("category") .operator(Operator.OR))).get(); - assertThat(numResults, lessThan(searchResponse.getHits().getTotalHits().value)); assertFirstHit(searchResponse, hasId("theone")); diff --git a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 2a7eb10313c51..afba40e2cb752 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -68,7 +68,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; -import static org.elasticsearch.index.query.QueryBuilders.commonTermsQuery; import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.existsQuery; import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; @@ -101,7 +100,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThirdHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasScore; import static org.hamcrest.Matchers.closeTo; @@ -270,97 +268,6 @@ public void testAllDocsQueryString() throws InterruptedException, ExecutionExcep } } - public void testCommonTermsQuery() throws Exception { - - client().admin().indices().prepareCreate("test") - .addMapping("type1", "field1", "type=text,analyzer=whitespace") - .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1)).get(); - indexRandom(true, client().prepareIndex("test", "type1", "3").setSource("field1", "quick lazy huge brown pidgin", "field2", - "the quick lazy huge brown fox jumps over the tree"), - client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox"), - client().prepareIndex("test", "type1", "2").setSource("field1", "the quick lazy huge brown fox jumps over the tree") ); - - - SearchResponse searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the quick brown").cutoffFrequency(3) - .lowFreqOperator(Operator.OR)).get(); - assertHitCount(searchResponse, 3L); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); - - searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the quick brown").cutoffFrequency(3) - .lowFreqOperator(Operator.AND)).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - - // Default - searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the quick brown").cutoffFrequency(3)).get(); - assertHitCount(searchResponse, 3L); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); - - - searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the huge fox").lowFreqMinimumShouldMatch("2")).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("2")); - - searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the lazy fox brown").cutoffFrequency(1) - .highFreqMinimumShouldMatch("3")).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("2")); - assertSecondHit(searchResponse, hasId("1")); - - searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the lazy fox brown").cutoffFrequency(1) - .highFreqMinimumShouldMatch("4")).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("2")); - - // Default - searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the lazy fox brown").cutoffFrequency(1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("2")); - - searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the quick brown").cutoffFrequency(3) - .analyzer("stop")).get(); - assertHitCount(searchResponse, 3L); - // stop drops "the" since its a stopword - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("3")); - assertThirdHit(searchResponse, hasId("2")); - - // try the same with match query - searchResponse = client().prepareSearch().setQuery(matchQuery("field1", "the quick brown").cutoffFrequency(3) - .operator(Operator.AND)).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - - searchResponse = client().prepareSearch().setQuery(matchQuery("field1", "the quick brown").cutoffFrequency(3) - .operator(Operator.OR)).get(); - assertHitCount(searchResponse, 3L); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); - - searchResponse = client().prepareSearch().setQuery(matchQuery("field1", "the quick brown").cutoffFrequency(3) - .operator(Operator.AND).analyzer("stop")).get(); - assertHitCount(searchResponse, 3L); - // stop drops "the" since its a stopword - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("3")); - assertThirdHit(searchResponse, hasId("2")); - - // try the same with multi match query - searchResponse = client().prepareSearch().setQuery(multiMatchQuery("the quick brown", "field1", "field2").cutoffFrequency(3) - .operator(Operator.AND)).get(); - assertHitCount(searchResponse, 3L); - assertFirstHit(searchResponse, hasId("3")); - assertSecondHit(searchResponse, hasId("1")); - assertThirdHit(searchResponse, hasId("2")); - } - public void testQueryStringAnalyzedWildcard() throws Exception { createIndex("test"); diff --git a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java index 5f730ad138f96..7691b3346d72f 100644 --- a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java +++ b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java @@ -189,7 +189,7 @@ public void testExplainFilteredAlias() { assertThat(validateQueryResponse.getQueryExplanation().get(0).getExplanation(), containsString("field:value1")); } - public void testExplainWithRewriteValidateQuery() throws Exception { + public void testExplainWithRewriteValidateQuery() { client().admin().indices().prepareCreate("test") .addMapping("type1", "field", "type=text,analyzer=whitespace") .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1)).get(); @@ -205,18 +205,6 @@ public void testExplainWithRewriteValidateQuery() throws Exception { assertExplanation(QueryBuilders.matchPhrasePrefixQuery("field", "ju"), containsString("field:jumps"), true); - // common terms queries - assertExplanation(QueryBuilders.commonTermsQuery("field", "huge brown pidgin").cutoffFrequency(1), - containsString("+field:pidgin field:huge field:brown"), true); - assertExplanation(QueryBuilders.commonTermsQuery("field", "the brown").analyzer("stop"), - containsString("field:brown"), true); - - // match queries with cutoff frequency - assertExplanation(QueryBuilders.matchQuery("field", "huge brown pidgin").cutoffFrequency(1), - containsString("+field:pidgin field:huge field:brown"), true); - assertExplanation(QueryBuilders.matchQuery("field", "the brown").analyzer("stop"), - containsString("field:brown"), true); - // fuzzy queries assertExplanation(QueryBuilders.fuzzyQuery("field", "the").fuzziness(Fuzziness.fromEdits(2)), containsString("field:the (field:tree)^0.3333333"), true); @@ -233,7 +221,7 @@ public void testExplainWithRewriteValidateQuery() throws Exception { containsString("field:huge field:pidgin"), true); } - public void testExplainWithRewriteValidateQueryAllShards() throws Exception { + public void testExplainWithRewriteValidateQueryAllShards() { client().admin().indices().prepareCreate("test") .addMapping("type1", "field", "type=text,analyzer=whitespace") .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put("index.number_of_routing_shards", 2)).get(); @@ -262,7 +250,7 @@ public void testExplainWithRewriteValidateQueryAllShards() throws Exception { ), true, true); } - public void testIrrelevantPropertiesBeforeQuery() throws IOException { + public void testIrrelevantPropertiesBeforeQuery() { createIndex("test"); ensureGreen(); refresh(); @@ -271,7 +259,7 @@ public void testIrrelevantPropertiesBeforeQuery() throws IOException { new BytesArray("{\"foo\": \"bar\", \"query\": {\"term\" : { \"user\" : \"kimchy\" }}}"))).get().isValid(), equalTo(false)); } - public void testIrrelevantPropertiesAfterQuery() throws IOException { + public void testIrrelevantPropertiesAfterQuery() { createIndex("test"); ensureGreen(); refresh(); @@ -311,7 +299,7 @@ private static void assertExplanations(QueryBuilder queryBuilder, } } - public void testExplainTermsQueryWithLookup() throws Exception { + public void testExplainTermsQueryWithLookup() { client().admin().indices().prepareCreate("twitter") .addMapping("_doc", "user", "type=integer", "followers", "type=integer") .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put("index.number_of_routing_shards", 2)).get(); diff --git a/server/src/test/resources/org/elasticsearch/index/query/commonTerms-query1.json b/server/src/test/resources/org/elasticsearch/index/query/commonTerms-query1.json deleted file mode 100644 index b2728dac09df4..0000000000000 --- a/server/src/test/resources/org/elasticsearch/index/query/commonTerms-query1.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "common" : { - "dogs" : { - "query" : "buck mia tom", - "cutoff_frequency" : 1, - "minimum_should_match" : { - "low_freq" : 2 - } - } - } -} diff --git a/server/src/test/resources/org/elasticsearch/index/query/commonTerms-query2.json b/server/src/test/resources/org/elasticsearch/index/query/commonTerms-query2.json deleted file mode 100644 index aeb281bb7592a..0000000000000 --- a/server/src/test/resources/org/elasticsearch/index/query/commonTerms-query2.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "common" : { - "dogs" : { - "query" : "buck mia tom", - "minimum_should_match" : { - "high_freq" : "50%", - "low_freq" : "5<20%" - } - } - } -} diff --git a/server/src/test/resources/org/elasticsearch/index/query/commonTerms-query3.json b/server/src/test/resources/org/elasticsearch/index/query/commonTerms-query3.json deleted file mode 100644 index f276209ffc7ed..0000000000000 --- a/server/src/test/resources/org/elasticsearch/index/query/commonTerms-query3.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "common" : { - "dogs" : { - "query" : "buck mia tom", - "cutoff_frequency" : 1, - "minimum_should_match" : 2 - } - } -} diff --git a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec index 936c7eef88191..00f7632c9fc44 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec @@ -1001,12 +1001,13 @@ Frank Herbert |God Emperor of Dune|7.0029488 optionalParamsForMatch // tag::optionalParamsForMatch -SELECT author, name, SCORE() FROM library WHERE MATCH(name, 'to the star', 'operator=or;cutoff_frequency=0.2'); +SELECT author, name, SCORE() FROM library WHERE MATCH(name, 'to the star', 'operator=OR;fuzziness=AUTO:1,5;minimum_should_match=1') +ORDER BY SCORE() DESC LIMIT 2; author | name | SCORE() -----------------+------------------------------------+--------------- -Peter F. Hamilton|Pandora's Star |3.0997515 Douglas Adams |The Hitchhiker's Guide to the Galaxy|3.1756816 +Peter F. Hamilton|Pandora's Star |3.0997515 // end::optionalParamsForMatch ; diff --git a/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec index cb410080e77bd..6379f6bf26f1a 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec @@ -92,14 +92,14 @@ SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH(first_nam ; matchQueryWithOptions -SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH(first_name, 'Erez', 'lenient=true;cutoff_frequency=2;fuzzy_rewrite=scoring_boolean;minimum_should_match=1;operator=AND;max_expansions=30;prefix_length=1;analyzer=english;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH(first_name, 'Erez', 'lenient=true;fuzzy_rewrite=scoring_boolean;minimum_should_match=1;operator=AND;max_expansions=30;prefix_length=1;analyzer=english;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); emp_no:i | first_name:s | gender:s | last_name:s 10076 |Erez |F |Ritzmann ; matchQueryWithOptionsInMultipleCommaSeparatedStrings -SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH(first_name, 'Erez', 'lenient=true;cutoff_frequency=2','fuzzy_rewrite=scoring_boolean;minimum_should_match=1','operator=AND', 'max_expansions=30;prefix_length=1;analyzer=english;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH(first_name, 'Erez', 'lenient=true','fuzzy_rewrite=scoring_boolean;minimum_should_match=1','operator=AND', 'max_expansions=30;prefix_length=1;analyzer=english;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); emp_no:i | first_name:s | gender:s | last_name:s 10076 |Erez |F |Ritzmann @@ -113,14 +113,14 @@ SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH('first_na ; multiMatchQueryAllOptions -SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH('first_name,last_name', 'Morton', 'slop=1;lenient=true;cutoff_frequency=2;tie_breaker=0.1;fuzzy_rewrite=scoring_boolean;minimum_should_match=1;operator=AND;max_expansions=30;prefix_length=1;analyzer=english;type=best_fields;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH('first_name,last_name', 'Morton', 'slop=1;lenient=true;tie_breaker=0.1;fuzzy_rewrite=scoring_boolean;minimum_should_match=1;operator=AND;max_expansions=30;prefix_length=1;analyzer=english;type=best_fields;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); emp_no:i | first_name:s | gender:s | last_name:s 10095 |Hilari |M |Morton ; multiMatchQueryWithInMultipleCommaSeparatedStrings -SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH('first_name,last_name', 'Morton', 'slop=1;lenient=true', 'cutoff_frequency=2','tie_breaker=0.1;fuzzy_rewrite=scoring_boolean','minimum_should_match=1;operator=AND;max_expansions=30;prefix_length=1;analyzer=english;type=best_fields;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH('first_name,last_name', 'Morton', 'slop=1;lenient=true', 'tie_breaker=0.1;fuzzy_rewrite=scoring_boolean','minimum_should_match=1;operator=AND;max_expansions=30;prefix_length=1;analyzer=english;type=best_fields;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); emp_no:i | first_name:s | gender:s | last_name:s 10095 |Hilari |M |Morton diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java index 58ca09da929f9..3fca7630bd5a6 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java @@ -32,7 +32,6 @@ public class MatchQuery extends LeafQuery { BUILDER_APPLIERS = Map.ofEntries( entry("analyzer", MatchQueryBuilder::analyzer), entry("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))), - entry("cutoff_frequency", (qb, s) -> qb.cutoffFrequency(Float.valueOf(s))), entry("fuzziness", (qb, s) -> qb.fuzziness(Fuzziness.fromString(s))), entry("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))), entry("fuzzy_rewrite", MatchQueryBuilder::fuzzyRewrite), diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java index f51f8275a898c..ab6190ad6eee1 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java @@ -32,7 +32,6 @@ public class MultiMatchQuery extends LeafQuery { // appliers.put("zero_terms_query", (qb, s) -> qb.zeroTermsQuery(s)); entry("analyzer", MultiMatchQueryBuilder::analyzer), entry("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))), - entry("cutoff_frequency", (qb, s) -> qb.cutoffFrequency(Float.valueOf(s))), entry("fuzziness", (qb, s) -> qb.fuzziness(Fuzziness.fromString(s))), entry("fuzzy_rewrite", MultiMatchQueryBuilder::fuzzyRewrite), entry("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))), From 2eca4a614bd2fab06e5d9c60b4d3edfe951e73e2 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Fri, 31 May 2019 08:53:59 -0700 Subject: [PATCH 218/224] Clarify that inner_hits must be used to access nested fields. (#42724) This PR updates the docs for `docvalue_fields` and `stored_fields` to clarify that nested fields must be accessed through `inner_hits`. It also tweaks the nested fields documentation to make this point more visible. Addresses #23766. --- docs/reference/mapping/types/nested.asciidoc | 31 ++++++++++--------- .../search/request/docvalue-fields.asciidoc | 4 +++ .../search/request/stored-fields.asciidoc | 5 +++ 3 files changed, 25 insertions(+), 15 deletions(-) diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc index de0f3f2a5f1cd..63bb4591369e5 100644 --- a/docs/reference/mapping/types/nested.asciidoc +++ b/docs/reference/mapping/types/nested.asciidoc @@ -159,6 +159,22 @@ Nested documents can be: * sorted with <>. * retrieved and highlighted with <>. +[IMPORTANT] +============================================= + +Because nested documents are indexed as separate documents, they can only be +accessed within the scope of the `nested` query, the +`nested`/`reverse_nested` aggregations, or <>. + +For instance, if a string field within a nested document has +<> set to `offsets` to allow use of the postings +during the highlighting, these offsets will not be available during the main highlighting +phase. Instead, highlighting needs to be performed via +<>. The same consideration applies when loading +fields during a search through <> +or <>. + +============================================= [[nested-params]] ==== Parameters for `nested` fields @@ -178,21 +194,6 @@ The following parameters are accepted by `nested` fields: may be added to an existing nested object. -[IMPORTANT] -============================================= - -Because nested documents are indexed as separate documents, they can only be -accessed within the scope of the `nested` query, the -`nested`/`reverse_nested` aggregations, or <>. - -For instance, if a string field within a nested document has -<> set to `offsets` to allow use of the postings -during the highlighting, these offsets will not be available during the main highlighting -phase. Instead, highlighting needs to be performed via -<>. - -============================================= - [float] === Limits on `nested` mappings and objects diff --git a/docs/reference/search/request/docvalue-fields.asciidoc b/docs/reference/search/request/docvalue-fields.asciidoc index 6697b5bb3e383..784cc94015366 100644 --- a/docs/reference/search/request/docvalue-fields.asciidoc +++ b/docs/reference/search/request/docvalue-fields.asciidoc @@ -67,3 +67,7 @@ on their mappings: `long`, `double` and other numeric fields are formatted as numbers, `keyword` fields are formatted as strings, `date` fields are formatted with the configured `date` format, etc. +NOTE: On its own, `docvalue_fields` cannot be used to load fields in nested +objects -- if a field contains a nested object in its path, then no data will +be returned for that docvalue field. To access nested fields, `docvalue_fields` +must be used within an <> block. \ No newline at end of file diff --git a/docs/reference/search/request/stored-fields.asciidoc b/docs/reference/search/request/stored-fields.asciidoc index 195dc39f11e73..b55e0fce45757 100644 --- a/docs/reference/search/request/stored-fields.asciidoc +++ b/docs/reference/search/request/stored-fields.asciidoc @@ -49,6 +49,11 @@ Script fields can also be automatically detected and used as fields, so things like `_source.obj1.field1` can be used, though not recommended, as `obj1.field1` will work as well. +NOTE: On its own, `stored_fields` cannot be used to load fields in nested +objects -- if a field contains a nested object in its path, then no data will +be returned for that stored field. To access nested fields, `stored_fields` +must be used within an <> block. + ==== Disable stored fields entirely To disable the stored fields (and metadata fields) entirely use: `_none_`: From 3f15d57901394e660dbc9e3a5d84352ce4dea597 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 31 May 2019 12:06:52 -0400 Subject: [PATCH 219/224] Remove locale-dependent string checking We were checking if an exception was caused by a specific reason "Not a directory". Alas, this reason is locale-dependent and can fail on systems that are not set to en_US.UTF-8. This commit addresses this by deriving what the locale-dependent error message would be and using that for comparison with the actual exception thrown. Closes #41689 --- .../org/elasticsearch/plugins/PluginsServiceTests.java | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index b9459b926d372..deafe203ac8dc 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -171,7 +171,15 @@ public void testDesktopServicesStoreFiles() throws IOException { if (Constants.WINDOWS) { assertThat(e.getCause(), instanceOf(NoSuchFileException.class)); } else { - assertThat(e.getCause(), hasToString(containsString("Not a directory"))); + // force a "Not a directory" exception to be thrown so that we can extract the locale-dependent message + final String expected; + try (InputStream ignored = Files.newInputStream(desktopServicesStore.resolve("not-a-directory"))) { + throw new AssertionError(); + } catch (final FileSystemException inner) { + // locale-dependent translation of "Not a directory" + expected = inner.getReason(); + } + assertThat(e.getCause(), hasToString(containsString(expected))); } } } From d6ad87461302480404f7026273758fbdf3024422 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Fri, 31 May 2019 13:03:41 -0400 Subject: [PATCH 220/224] [DOCS] Remove unneeded options from `[source,sql]` code blocks (#42759) In AsciiDoc, `subs="attributes,callouts,macros"` options were required to render `include-tagged::` in a code block. With elastic/docs#827, Elasticsearch Reference documentation migrated from AsciiDoc to Asciidoctor. In Asciidoctor, the `subs="attributes,callouts,macros"` options are no longer needed to render `include-tagged::` in a code block. This commit removes those unneeded options. Resolves #41589 --- .../sql/functions/conditional.asciidoc | 42 +++++----- .../sql/functions/date-time.asciidoc | 80 +++++++++---------- .../reference/sql/functions/grouping.asciidoc | 10 +-- .../sql/functions/like-rlike.asciidoc | 4 +- .../sql/functions/operators.asciidoc | 36 ++++----- docs/reference/sql/functions/search.asciidoc | 16 ++-- docs/reference/sql/functions/string.asciidoc | 42 +++++----- docs/reference/sql/functions/system.asciidoc | 4 +- .../sql/functions/type-conversion.asciidoc | 10 +-- docs/reference/sql/language/indices.asciidoc | 12 +-- .../syntax/commands/describe-table.asciidoc | 2 +- .../language/syntax/commands/select.asciidoc | 58 +++++++------- .../syntax/commands/show-columns.asciidoc | 2 +- .../syntax/commands/show-functions.asciidoc | 10 +-- .../syntax/commands/show-tables.asciidoc | 12 +-- docs/reference/sql/security.asciidoc | 2 +- .../qa/src/main/resources/docs/docs.csv-spec | 4 +- 17 files changed, 173 insertions(+), 173 deletions(-) diff --git a/docs/reference/sql/functions/conditional.asciidoc b/docs/reference/sql/functions/conditional.asciidoc index d0b8e7d2ff3f1..0206115c182c1 100644 --- a/docs/reference/sql/functions/conditional.asciidoc +++ b/docs/reference/sql/functions/conditional.asciidoc @@ -33,17 +33,17 @@ If the condition’s result is true, the value of the result expression that fol the subsequent when clauses will be skipped and not processed. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[case] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[caseReturnNull] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[caseWithElse] ---- @@ -70,12 +70,12 @@ CASE WHEN expression = value1 THEN result1 END ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[caseWithOperand] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[caseWithOperandAndElse] ---- @@ -155,12 +155,12 @@ If all arguments are null, then it returns `null`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[coalesceReturnNonNull] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[coalesceReturnNull] ---- @@ -199,12 +199,12 @@ If all arguments are null, then it returns `null`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[greatestReturnNonNull] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[greatestReturnNull] ---- @@ -237,12 +237,12 @@ If all arguments are null, then it returns `null`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[ifNullReturnFirst] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[ifNullReturnSecond] ---- @@ -277,12 +277,12 @@ logic of programming languages. If the 3rd expression is not provided and the co `null` is returned. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[iifWithDefaultValue] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[iifWithoutDefaultValue] ---- @@ -325,12 +325,12 @@ If all arguments are null, then it returns `null`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[isNullReturnFirst] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[isNullReturnSecond] ---- @@ -370,12 +370,12 @@ If all arguments are null, then it returns `null`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[leastReturnNonNull] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[leastReturnNull] ---- @@ -407,12 +407,12 @@ Returns `null` when the two input expressions are equal and if not, it returns the 1st expression. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[nullIfReturnFirst] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[nullIfReturnNull] ---- @@ -446,12 +446,12 @@ If all arguments are null, then it returns `null`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[nvlReturnFirst] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[nvlReturnSecond] ---- diff --git a/docs/reference/sql/functions/date-time.asciidoc b/docs/reference/sql/functions/date-time.asciidoc index d9d5e7bcf14e5..45231393521c7 100644 --- a/docs/reference/sql/functions/date-time.asciidoc +++ b/docs/reference/sql/functions/date-time.asciidoc @@ -57,32 +57,32 @@ s|Description Basic arithmetic operators (`+`, `-`, etc) support date/time parameters as indicated below: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dtIntervalPlusInterval] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dtDateTimePlusInterval] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dtMinusInterval] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dtIntervalMinusInterval] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dtDateTimeMinusInterval] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dtIntervalMul] -------------------------------------------------- @@ -116,17 +116,17 @@ Unlike CURRENT_DATE, `CURDATE()` can only be used as a function with no argument This method always returns the same value for its every occurrence within the same query. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[currentDate] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[currentDateFunction] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[curDateFunction] -------------------------------------------------- @@ -134,7 +134,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[curDateFunction] Typically, this function (as well as its twin <> function is used for relative date filtering: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[filterToday] -------------------------------------------------- @@ -165,29 +165,29 @@ meaning a milliseconds precision current time will be returned. This method always returns the same value for its every occurrence within the same query. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[currentTime] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[currentTimeFunction] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[curTimeFunction] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[currentTimeFunctionPrecision] -------------------------------------------------- Typically, this function is used for relative date/time filtering: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[filterCurrentTime] -------------------------------------------------- @@ -221,17 +221,17 @@ meaning a milliseconds precision current date/time will be returned. This method always returns the same value for its every occurrence within the same query. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[curTs] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[curTsFunction] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[curTsFunctionPrecision] -------------------------------------------------- @@ -239,7 +239,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[curTsFunctionPrecision] Typically, this function (as well as its twin <> function is used for relative date/time filtering: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[filterNow] -------------------------------------------------- @@ -267,7 +267,7 @@ DAY_OF_MONTH(datetime_exp) <1> Extract the day of the month from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dayOfMonth] -------------------------------------------------- @@ -291,7 +291,7 @@ DAY_OF_WEEK(datetime_exp) <1> Extract the day of the week from a date/datetime. Sunday is `1`, Monday is `2`, etc. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dayOfWeek] -------------------------------------------------- @@ -315,7 +315,7 @@ DAY_OF_YEAR(datetime_exp) <1> Extract the day of the year from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dayOfYear] -------------------------------------------------- @@ -339,7 +339,7 @@ DAY_NAME(datetime_exp) <1> Extract the day of the week from a date/datetime in text format (`Monday`, `Tuesday`...). -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dayName] -------------------------------------------------- @@ -363,7 +363,7 @@ HOUR_OF_DAY(datetime_exp) <1> Extract the hour of the day from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[hourOfDay] -------------------------------------------------- @@ -388,7 +388,7 @@ ISO_DAY_OF_WEEK(datetime_exp) <1> Extract the day of the week from a date/datetime, following the https://en.wikipedia.org/wiki/ISO_week_date[ISO 8601 standard]. Monday is `1`, Tuesday is `2`, etc. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[isoDayOfWeek] -------------------------------------------------- @@ -413,7 +413,7 @@ ISO_WEEK_OF_YEAR(datetime_exp) <1> Extract the week of the year from a date/datetime, following https://en.wikipedia.org/wiki/ISO_week_date[ISO 8601 standard]. The first week of a year is the first week with a majority (4 or more) of its days in January. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[isoWeekOfYear] -------------------------------------------------- @@ -437,7 +437,7 @@ MINUTE_OF_DAY(datetime_exp) <1> Extract the minute of the day from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[minuteOfDay] -------------------------------------------------- @@ -461,7 +461,7 @@ MINUTE_OF_HOUR(datetime_exp) <1> Extract the minute of the hour from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[minuteOfHour] -------------------------------------------------- @@ -485,7 +485,7 @@ MONTH(datetime_exp) <1> Extract the month of the year from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[monthOfYear] -------------------------------------------------- @@ -509,7 +509,7 @@ MONTH_NAME(datetime_exp) <1> Extract the month from a date/datetime in text format (`January`, `February`...). -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[monthName] -------------------------------------------------- @@ -533,7 +533,7 @@ This function offers the same functionality as <> function is used for relative date/time filtering: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[filterNow] -------------------------------------------------- @@ -565,7 +565,7 @@ SECOND_OF_MINUTE(datetime_exp) <1> Extract the second of the minute from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[secondOfMinute] -------------------------------------------------- @@ -589,7 +589,7 @@ QUARTER(datetime_exp) <1> Extract the year quarter the date/datetime falls in. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[quarter] -------------------------------------------------- @@ -613,7 +613,7 @@ This function offers the same functionality as <> function is used for relative date filtering: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[filterToday] -------------------------------------------------- @@ -645,7 +645,7 @@ WEEK_OF_YEAR(datetime_exp) <1> Extract the week of the year from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[weekOfYear] -------------------------------------------------- @@ -669,7 +669,7 @@ YEAR(datetime_exp) <1> Extract the year from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[year] -------------------------------------------------- @@ -697,14 +697,14 @@ EXTRACT( Extract fields from a date/datetime by specifying the name of a <>. The following -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[extractDayOfYear] -------------------------------------------------- is the equivalent to -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dayOfYear] -------------------------------------------------- diff --git a/docs/reference/sql/functions/grouping.asciidoc b/docs/reference/sql/functions/grouping.asciidoc index 0a498a1aacef0..6f2f5a1b6e4c2 100644 --- a/docs/reference/sql/functions/grouping.asciidoc +++ b/docs/reference/sql/functions/grouping.asciidoc @@ -44,14 +44,14 @@ NOTE:: The histogram in SQL does *NOT* return empty buckets for missing interval `Histogram` can be applied on either numeric fields: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[histogramNumeric] ---- or date/time fields: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[histogramDateTime] ---- @@ -59,14 +59,14 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[histogramDateTime] Expressions inside the histogram are also supported as long as the return type is numeric: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[histogramNumericExpression] ---- Do note that histograms (and grouping functions in general) allow custom expressions but cannot have any functions applied to them in the `GROUP BY`. In other words, the following statement is *NOT* allowed: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[expressionOnHistogramNotAllowed] ---- @@ -75,7 +75,7 @@ as it requires two groupings (one for histogram followed by a second for applyin Instead one can rewrite the query to move the expression on the histogram _inside_ of it: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[histogramDateTimeExpression] ---- diff --git a/docs/reference/sql/functions/like-rlike.asciidoc b/docs/reference/sql/functions/like-rlike.asciidoc index 73212bc113542..2d5ef0b62f93f 100644 --- a/docs/reference/sql/functions/like-rlike.asciidoc +++ b/docs/reference/sql/functions/like-rlike.asciidoc @@ -38,7 +38,7 @@ with the `LIKE` operator: The percent sign represents zero, one or multiple characters. The underscore represents a single number or character. These symbols can be used in combinations. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[simpleLike] ---- @@ -75,7 +75,7 @@ and underscore (`_`); the pattern in this case is a regular expression which all For more details about the regular expressions syntax, https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/util/regex/Pattern.html[Java's Pattern class javadoc] is a good starting point. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[simpleRLike] ---- diff --git a/docs/reference/sql/functions/operators.asciidoc b/docs/reference/sql/functions/operators.asciidoc index 4b7e8990290dd..02841c84b58e5 100644 --- a/docs/reference/sql/functions/operators.asciidoc +++ b/docs/reference/sql/functions/operators.asciidoc @@ -8,7 +8,7 @@ Boolean operator for comparing against one or multiple expressions. [[sql-operators-equality]] ==== `Equality (=)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereFieldEquality] -------------------------------------------------- @@ -16,12 +16,12 @@ include-tagged::{sql-specs}/filter.sql-spec[whereFieldEquality] [[sql-operators-null-safe-equality]] ==== `Null safe Equality (<=>)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[nullEqualsCompareWithNull] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[nullEqualsCompareTwoNulls] -------------------------------------------------- @@ -29,7 +29,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[nullEqualsCompareTwoNulls] [[sql-operators-inequality]] ==== `Inequality (<> or !=)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereFieldNonEquality] -------------------------------------------------- @@ -37,7 +37,7 @@ include-tagged::{sql-specs}/filter.sql-spec[whereFieldNonEquality] [[sql-operators-comparison]] ==== `Comparison (<, <=, >, >=)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereFieldLessThan] -------------------------------------------------- @@ -45,7 +45,7 @@ include-tagged::{sql-specs}/filter.sql-spec[whereFieldLessThan] [[sql-operators-between]] ==== `BETWEEN` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereBetween] -------------------------------------------------- @@ -53,7 +53,7 @@ include-tagged::{sql-specs}/filter.sql-spec[whereBetween] [[sql-operators-is-null]] ==== `IS NULL/IS NOT NULL` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereIsNotNullAndIsNull] -------------------------------------------------- @@ -61,7 +61,7 @@ include-tagged::{sql-specs}/filter.sql-spec[whereIsNotNullAndIsNull] [[sql-operators-in]] ==== `IN (, , ...)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereWithInAndMultipleValues] -------------------------------------------------- @@ -74,7 +74,7 @@ Boolean operator for evaluating one or two expressions. [[sql-operators-and]] ==== `AND` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereFieldAndComparison] -------------------------------------------------- @@ -82,7 +82,7 @@ include-tagged::{sql-specs}/filter.sql-spec[whereFieldAndComparison] [[sql-operators-or]] ==== `OR` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereFieldOrComparison] -------------------------------------------------- @@ -90,7 +90,7 @@ include-tagged::{sql-specs}/filter.sql-spec[whereFieldOrComparison] [[sql-operators-not]] ==== `NOT` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereFieldEqualityNot] -------------------------------------------------- @@ -104,7 +104,7 @@ The result is a value of numeric type. [[sql-operators-plus]] ==== `Add (+)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/arithmetic.sql-spec[plus] -------------------------------------------------- @@ -112,7 +112,7 @@ include-tagged::{sql-specs}/arithmetic.sql-spec[plus] [[sql-operators-subtract]] ==== `Subtract (infix -)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/arithmetic.sql-spec[minus] -------------------------------------------------- @@ -120,7 +120,7 @@ include-tagged::{sql-specs}/arithmetic.sql-spec[minus] [[sql-operators-negate]] ==== `Negate (unary -)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/arithmetic.sql-spec[unaryMinus] -------------------------------------------------- @@ -128,7 +128,7 @@ include-tagged::{sql-specs}/arithmetic.sql-spec[unaryMinus] [[sql-operators-multiply]] ==== `Multiply (*)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/arithmetic.sql-spec[multiply] -------------------------------------------------- @@ -136,7 +136,7 @@ include-tagged::{sql-specs}/arithmetic.sql-spec[multiply] [[sql-operators-divide]] ==== `Divide (/)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/arithmetic.sql-spec[divide] -------------------------------------------------- @@ -144,7 +144,7 @@ include-tagged::{sql-specs}/arithmetic.sql-spec[divide] [[sql-operators-remainder]] ==== `Modulo or Remainder(%)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/arithmetic.sql-spec[mod] -------------------------------------------------- @@ -157,7 +157,7 @@ include-tagged::{sql-specs}/arithmetic.sql-spec[mod] `::` provides an alternative syntax to the <> function. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[conversionStringToLongCastOperator] -------------------------------------------------- diff --git a/docs/reference/sql/functions/search.asciidoc b/docs/reference/sql/functions/search.asciidoc index 079571a4e37c6..34716e070434f 100644 --- a/docs/reference/sql/functions/search.asciidoc +++ b/docs/reference/sql/functions/search.asciidoc @@ -33,7 +33,7 @@ and <> {es} queries. The first parameter is the field or fields to match against. In case it receives one value only, {es-sql} will use a `match` query to perform the search: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[simpleMatch] ---- @@ -41,7 +41,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[simpleMatch] However, it can also receive a list of fields and their corresponding optional `boost` value. In this case, {es-sql} will use a `multi_match` query to match the documents: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[multiFieldsMatch] ---- @@ -53,7 +53,7 @@ the final score than the `author` field when searching for `frank dune` text in Both options above can be used in combination with the optional third parameter of the `MATCH()` predicate, where one can specify additional configuration parameters (separated by semicolon `;`) for either `match` or `multi_match` queries. For example: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[optionalParamsForMatch] ---- @@ -90,14 +90,14 @@ Just like `MATCH`, `QUERY` is a full-text search predicate that gives the user c The first parameter is basically the input that will be passed as is to the `query_string` query, which means that anything that `query_string` accepts in its `query` field can be used here as well: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[simpleQueryQuery] ---- A more advanced example, showing more of the features that `query_string` supports, of course possible with {es-sql}: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[advancedQueryQuery] ---- @@ -108,7 +108,7 @@ regex and fuzziness queries for the `name` field. If one needs to customize various configuration options that `query_string` exposes, this can be done using the second _optional_ parameter. Multiple settings can be specified separated by a semicolon `;`: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[optionalParameterQuery] ---- @@ -144,14 +144,14 @@ combined using the same rules as {es}'s Typically `SCORE` is used for ordering the results of a query based on their relevance: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[orderByScore] ---- However, it is perfectly fine to return the score without sorting by it: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[scoreWithMatch] ---- diff --git a/docs/reference/sql/functions/string.asciidoc b/docs/reference/sql/functions/string.asciidoc index 7acc358763512..a82ac66adce1c 100644 --- a/docs/reference/sql/functions/string.asciidoc +++ b/docs/reference/sql/functions/string.asciidoc @@ -24,7 +24,7 @@ ASCII(string_exp) <1> Returns the ASCII code value of the leftmost character of `string_exp` as an integer. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringAscii] -------------------------------------------------- @@ -47,7 +47,7 @@ BIT_LENGTH(string_exp) <1> Returns the length in bits of the `string_exp` input expression. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringBitLength] -------------------------------------------------- @@ -70,7 +70,7 @@ CHAR(code) <1> Returns the character that has the ASCII code value specified by the numeric input. The value should be between 0 and 255; otherwise, the return value is data source–dependent. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringChar] -------------------------------------------------- @@ -93,7 +93,7 @@ CHAR_LENGTH(string_exp) <1> Returns the length in characters of the input, if the string expression is of a character data type; otherwise, returns the length in bytes of the string expression (the smallest integer not less than the number of bits divided by 8). -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringCharLength] -------------------------------------------------- @@ -119,7 +119,7 @@ CONCAT( Returns a character string that is the result of concatenating `string_exp1` to `string_exp2`. If one of the string is `NULL`, the other string will be returned. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringConcat] -------------------------------------------------- @@ -149,7 +149,7 @@ INSERT( Returns a string where `length` characters have been deleted from `source`, beginning at `start`, and where `replacement` has been inserted into `source`, beginning at `start`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringInsert] -------------------------------------------------- @@ -172,7 +172,7 @@ LCASE(string_exp) <1> Returns a string equal to that in `string_exp`, with all uppercase characters converted to lowercase. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringLCase] -------------------------------------------------- @@ -198,7 +198,7 @@ LEFT( Returns the leftmost count characters of `string_exp`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringLeft] -------------------------------------------------- @@ -221,7 +221,7 @@ LENGTH(string_exp) <1> Returns the number of characters in `string_exp`, excluding trailing blanks. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringLength] -------------------------------------------------- @@ -250,12 +250,12 @@ LOCATE( Returns the starting position of the first occurrence of `pattern` within `source`. The search for the first occurrence of `pattern` begins with the first character position in `source` unless the optional argument, `start`, is specified. If `start` is specified, the search begins with the character position indicated by the value of `start`. The first character position in `source` is indicated by the value 1. If `pattern` is not found within `source`, the value 0 is returned. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringLocateWoStart] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringLocateWithStart] -------------------------------------------------- @@ -278,7 +278,7 @@ LTRIM(string_exp) <1> Returns the characters of `string_exp`, with leading blanks removed. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringLTrim] -------------------------------------------------- @@ -301,7 +301,7 @@ OCTET_LENGTH(string_exp) <1> Returns the length in bytes of the `string_exp` input expression. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringOctetLength] -------------------------------------------------- @@ -327,7 +327,7 @@ POSITION( Returns the position of the `string_exp1` in `string_exp2`. The result is an exact numeric. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringPosition] -------------------------------------------------- @@ -353,7 +353,7 @@ REPEAT( Returns a character string composed of `string_exp` repeated `count` times. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringRepeat] -------------------------------------------------- @@ -381,7 +381,7 @@ REPLACE( Search `source` for occurrences of `pattern`, and replace with `replacement`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringReplace] -------------------------------------------------- @@ -407,7 +407,7 @@ RIGHT( Returns the rightmost count characters of `string_exp`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringRight] -------------------------------------------------- @@ -430,7 +430,7 @@ RTRIM(string_exp) <1> Returns the characters of `string_exp` with trailing blanks removed. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringRTrim] -------------------------------------------------- @@ -453,7 +453,7 @@ SPACE(count) <1> Returns a character string consisting of `count` spaces. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringSpace] -------------------------------------------------- @@ -481,7 +481,7 @@ SUBSTRING( Returns a character string that is derived from `source`, beginning at the character position specified by `start` for `length` characters. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringSubString] -------------------------------------------------- @@ -504,7 +504,7 @@ UCASE(string_exp) <1> Returns a string equal to that of the input, with all lowercase characters converted to uppercase. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringUCase] -------------------------------------------------- diff --git a/docs/reference/sql/functions/system.asciidoc b/docs/reference/sql/functions/system.asciidoc index dfca7d526d3a6..b2d604728c165 100644 --- a/docs/reference/sql/functions/system.asciidoc +++ b/docs/reference/sql/functions/system.asciidoc @@ -24,7 +24,7 @@ Returns the name of the database being queried. In the case of Elasticsearch SQL is the name of the Elasticsearch cluster. This function should always return a non-null value. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[database] -------------------------------------------------- @@ -46,7 +46,7 @@ USER() Returns the username of the authenticated user executing the query. This function can return `null` in case {stack-ov}/elasticsearch-security.html[Security] is disabled. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[user] -------------------------------------------------- diff --git a/docs/reference/sql/functions/type-conversion.asciidoc b/docs/reference/sql/functions/type-conversion.asciidoc index 7f8488be40f64..c6c761305519e 100644 --- a/docs/reference/sql/functions/type-conversion.asciidoc +++ b/docs/reference/sql/functions/type-conversion.asciidoc @@ -25,17 +25,17 @@ Casts the result of the given expression to the target <> with slightly differen Moreover, apart from the standard <> it supports the corresponding https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/explicit-data-type-conversion-function?view=sql-server-2017[ODBC data types]. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[conversionStringToIntConvertODBCDataType] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[conversionStringToIntConvertESDataType] ---- diff --git a/docs/reference/sql/language/indices.asciidoc b/docs/reference/sql/language/indices.asciidoc index 82c7f30fb041e..8f48177ce03a9 100644 --- a/docs/reference/sql/language/indices.asciidoc +++ b/docs/reference/sql/language/indices.asciidoc @@ -14,7 +14,7 @@ is supported _as long_ as it is quoted or escaped as a table identifier. For example: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesEsMultiIndex] ---- @@ -28,7 +28,7 @@ The same kind of patterns can also be used to query multiple indices or tables. For example: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[fromTablePatternQuoted] ---- @@ -44,7 +44,7 @@ or multiple `%` characters. Using `SHOW TABLES` command again: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesLikeWildcard] ---- @@ -53,7 +53,7 @@ The pattern matches all tables that start with `emp`. This command supports _escaping_ as well, for example: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesLikeEscape] ---- @@ -101,13 +101,13 @@ Set to `true` properties `index_include_frozen` in the <> or `index.in dedicated keyword:: Explicitly perform the inclusion through the dedicated `FROZEN` keyword in the `FROM` clause or `INCLUDE FROZEN` in the `SHOW` commands: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesIncludeFrozen] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[fromTableIncludeFrozen] ---- diff --git a/docs/reference/sql/language/syntax/commands/describe-table.asciidoc b/docs/reference/sql/language/syntax/commands/describe-table.asciidoc index da02f1fa23817..9aad578da479e 100644 --- a/docs/reference/sql/language/syntax/commands/describe-table.asciidoc +++ b/docs/reference/sql/language/syntax/commands/describe-table.asciidoc @@ -30,7 +30,7 @@ DESC `DESC` and `DESCRIBE` are aliases to <>. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[describeTable] ---- diff --git a/docs/reference/sql/language/syntax/commands/select.asciidoc b/docs/reference/sql/language/syntax/commands/select.asciidoc index 08ebe0ae96497..0a4922a3cff98 100644 --- a/docs/reference/sql/language/syntax/commands/select.asciidoc +++ b/docs/reference/sql/language/syntax/commands/select.asciidoc @@ -36,7 +36,7 @@ The general execution of `SELECT` is as follows: As with a table, every output column of a `SELECT` has a name which can be either specified per column through the `AS` keyword : -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[selectColumnAlias] ---- @@ -46,14 +46,14 @@ which is why it is recommended to specify it. assigned by {es-sql} if no name is given: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[selectInline] ---- or if it's a simple column reference, use its name as the column name: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[selectColumn] ---- @@ -63,7 +63,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[selectColumn] To select all the columns in the source, one can use `*`: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[wildcardWithOrder] ---- @@ -89,14 +89,14 @@ Represents the name (optionally qualified) of an existing table, either a concre If the table name contains special SQL characters (such as `.`,`-`,`*`,etc...) use double quotes to escape them: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[fromTableQuoted] ---- The name can be a <> pointing to multiple indices (likely requiring quoting as mentioned above) with the restriction that *all* resolved concrete tables have **exact mapping**. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[fromTablePatternQuoted] ---- @@ -104,7 +104,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[fromTablePatternQuoted] `alias`:: A substitute name for the `FROM` item containing the alias. An alias is used for brevity or to eliminate ambiguity. When an alias is provided, it completely hides the actual name of the table and must be used in its place. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[fromTableAlias] ---- @@ -125,7 +125,7 @@ where: Represents an expression that evaluates to a `boolean`. Only the rows that match the condition (to `true`) are returned. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[basicWhere] ---- @@ -148,34 +148,34 @@ Represents an expression on which rows are being grouped _on_. It can be a colum A common, group by column name: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByColumn] ---- Grouping by output ordinal: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByOrdinal] ---- Grouping by alias: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByAlias] ---- And grouping by column expression (typically used along-side an alias): -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByExpression] ---- Or a mixture of the above: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByMulti] ---- @@ -185,21 +185,21 @@ When a `GROUP BY` clause is used in a `SELECT`, _all_ output expressions must be To wit: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByAndAgg] ---- Expressions over aggregates used in output: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByAndAggExpression] ---- Multiple aggregates used: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByAndMultipleAggs] ---- @@ -216,14 +216,14 @@ As such, the query emits only a single row (as there is only a single group). A common example is counting the number of records: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByImplicitCount] ---- Of course, multiple aggregations can be applied: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByImplicitMultipleAggs] ---- @@ -249,14 +249,14 @@ Both `WHERE` and `HAVING` are used for filtering however there are several signi . `WHERE` works on individual *rows*, `HAVING` works on the *groups* created by ``GROUP BY`` . `WHERE` is evaluated *before* grouping, `HAVING` is evaluated *after* grouping -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByHaving] ---- Further more, one can use multiple aggregate expressions inside `HAVING` even ones that are not used in the output (`SELECT`): -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByHavingMultiple] ---- @@ -269,14 +269,14 @@ As such, the query emits only a single row (as there is only a single group) and In this example, `HAVING` matches: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByHavingImplicitMatch] ---- //However `HAVING` can also not match, in which case an empty result is returned: // -//["source","sql",subs="attributes,callouts,macros"] +//[source, sql] //---- //include-tagged::{sql-specs}/docs/docs.csv-spec[groupByHavingImplicitNoMatch] //---- @@ -304,7 +304,7 @@ IMPORTANT: When used along-side, `GROUP BY` expression can point _only_ to the c For example, the following query sorts by an arbitrary input field (`page_count`): -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[orderByBasic] ---- @@ -318,20 +318,20 @@ NOTE: With `GROUP BY`, make sure the ordering targets the resulting group - appl For example, to order groups simply indicate the grouping key: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[orderByGroup] ---- Multiple keys can be specified of course: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByMulti] ---- Further more, it is possible to order groups based on aggregations of their values: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[orderByAgg] ---- @@ -352,7 +352,7 @@ combined using the same rules as {es}'s To sort based on the `score`, use the special function `SCORE()`: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[orderByScore] ---- @@ -360,7 +360,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[orderByScore] Note that you can return `SCORE()` by using a full-text search predicate in the `WHERE` clause. This is possible even if `SCORE()` is not used for sorting: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[orderByScoreWithMatch] ---- @@ -387,7 +387,7 @@ ALL:: indicates there is no limit and thus all results are being returned. To return -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[limitBasic] ---- diff --git a/docs/reference/sql/language/syntax/commands/show-columns.asciidoc b/docs/reference/sql/language/syntax/commands/show-columns.asciidoc index b21c02358e526..9cb90af6b656f 100644 --- a/docs/reference/sql/language/syntax/commands/show-columns.asciidoc +++ b/docs/reference/sql/language/syntax/commands/show-columns.asciidoc @@ -21,7 +21,7 @@ patterns. List the columns in table and their data type (and other attributes). -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showColumns] ---- diff --git a/docs/reference/sql/language/syntax/commands/show-functions.asciidoc b/docs/reference/sql/language/syntax/commands/show-functions.asciidoc index 47c000e81d9fd..8689788867c20 100644 --- a/docs/reference/sql/language/syntax/commands/show-functions.asciidoc +++ b/docs/reference/sql/language/syntax/commands/show-functions.asciidoc @@ -15,7 +15,7 @@ SHOW FUNCTIONS [LIKE pattern?]? <1> List all the SQL functions and their type. The `LIKE` clause can be used to restrict the list of names to the given pattern. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showFunctions] ---- @@ -23,25 +23,25 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[showFunctions] The list of functions returned can be customized based on the pattern. It can be an exact match: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showFunctionsLikeExact] ---- A wildcard for exactly one character: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showFunctionsLikeChar] ---- A wildcard matching zero or more characters: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showFunctionsLikeWildcard] ---- Or of course, a variation of the above: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showFunctionsWithPattern] ---- diff --git a/docs/reference/sql/language/syntax/commands/show-tables.asciidoc b/docs/reference/sql/language/syntax/commands/show-tables.asciidoc index 554819e24b178..d5a40337713d4 100644 --- a/docs/reference/sql/language/syntax/commands/show-tables.asciidoc +++ b/docs/reference/sql/language/syntax/commands/show-tables.asciidoc @@ -24,7 +24,7 @@ patterns. List the tables available to the current user and their type. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTables] ---- @@ -32,7 +32,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[showTables] Match multiple indices by using {es} <> notation: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesEsMultiIndex] ---- @@ -40,26 +40,26 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesEsMultiIndex] One can also use the `LIKE` clause to restrict the list of names to the given pattern. The pattern can be an exact match: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesLikeExact] ---- Multiple chars: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesLikeWildcard] ---- A single char: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesLikeOneChar] ---- Or a mixture of single and multiple chars: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesLikeMixed] ---- diff --git a/docs/reference/sql/security.asciidoc b/docs/reference/sql/security.asciidoc index ad946c33e2d98..cbf41b46997a4 100644 --- a/docs/reference/sql/security.asciidoc +++ b/docs/reference/sql/security.asciidoc @@ -33,7 +33,7 @@ the API require `cluster:monitor/main`. The following example configures a role that can run SQL in JDBC querying the `test` and `bort` indices: -["source","yaml",subs="attributes,callouts,macros"] +[source, yaml] -------------------------------------------------- include-tagged::{sql-tests}/security/roles.yml[cli_drivers] -------------------------------------------------- diff --git a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec index 00f7632c9fc44..3cb2c3f45d4aa 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec @@ -643,9 +643,9 @@ M |57 ; groupByAndAggExpression -// tag::groupByAndAggExpression schema::g:s|salary:i -SELECT gender AS g, ROUND( (MIN(salary) / 100) ) AS salary FROM emp GROUP BY gender; +// tag::groupByAndAggExpression +SELECT gender AS g, ROUND((MIN(salary) / 100)) AS salary FROM emp GROUP BY gender; g | salary ---------------+--------------- From 70fb3d8c52138242eeaea35da1e0ae00ba0bcece Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Fri, 31 May 2019 12:47:38 -0500 Subject: [PATCH 221/224] address SmokeTestWatcherWithSecurityIT#testSearchInputWithInsufficientPrivileges (#42764) This commit adds busy wait and increases the interval for SmokeTestWatcherWithSecurityIT#testSearchInputWithInsufficientPrivileges. Watcher will not allow the same watch to be executed concurrently. If it finds that case, it will update the watch history with a "not_executed_already_queued" status. Given a slow machine, and 1 second interval this is possible. To address this, this commit increases the interval so the watch can fire at most 2 times with a greater interval between the executions and adds a busy wait for the expected state. While this does not gaurntee a fix, it should greatly reduce the chances of this test erroring. --- .../smoketest/SmokeTestWatcherWithSecurityIT.java | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java index e184ef19596af..d2f688889a95f 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java @@ -164,7 +164,7 @@ public void testSearchInputWithInsufficientPrivileges() throws Exception { String indexName = "index_not_allowed_to_read"; try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); - builder.startObject("trigger").startObject("schedule").field("interval", "1s").endObject().endObject(); + builder.startObject("trigger").startObject("schedule").field("interval", "4s").endObject().endObject(); builder.startObject("input").startObject("search").startObject("request") .startArray("indices").value(indexName).endArray() .startObject("body").startObject("query").startObject("match_all").endObject().endObject().endObject() @@ -180,8 +180,10 @@ public void testSearchInputWithInsufficientPrivileges() throws Exception { // check history, after watch has fired ObjectPath objectPath = getWatchHistoryEntry(watchId); - String state = objectPath.evaluate("hits.hits.0._source.state"); - assertThat(state, is("execution_not_needed")); + assertBusy(() -> { + String state = objectPath.evaluate("hits.hits.0._source.state"); + assertThat(state, is("execution_not_needed")); + }); boolean conditionMet = objectPath.evaluate("hits.hits.0._source.result.condition.met"); assertThat(conditionMet, is(false)); } From 56c1ed5bc4038e93b1ddf425403557955114d17c Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Fri, 31 May 2019 12:17:44 -0600 Subject: [PATCH 222/224] Remove XPackClient from x-pack (#42729) This commit removes the XPackClient class from x-pack. This class is a relic of the TransportClient and simply a wrapper around it. Calls are replaced with direct usage of a client. Additionally, the XPackRestHandler class has been removed as it only served to provide the XPackClient to implementing rest handlers. --- .../collector/ccr/StatsCollectorTests.java | 8 +- .../license/GetBasicStatusRequestBuilder.java | 4 +- .../license/GetTrialStatusRequestBuilder.java | 4 +- .../license/LicensingClient.java | 8 +- .../license/PostStartBasicRequestBuilder.java | 4 +- .../license/PostStartTrialRequestBuilder.java | 4 +- .../license/RestDeleteLicenseAction.java | 12 +- .../license/RestGetBasicStatus.java | 12 +- .../license/RestGetLicenseAction.java | 14 +- .../license/RestGetTrialStatus.java | 12 +- .../license/RestPostStartBasicLicense.java | 12 +- .../license/RestPostStartTrialLicense.java | 14 +- .../license/RestPutLicenseAction.java | 15 +-- .../elasticsearch/xpack/core/XPackClient.java | 121 ------------------ .../xpack/core/rest/XPackRestHandler.java | 30 ----- .../rest/action/RestFreezeIndexAction.java | 11 +- .../core/rest/action/RestXPackInfoAction.java | 15 ++- .../rest/action/RestXPackUsageAction.java | 14 +- .../core/template/IndexTemplateRegistry.java | 3 +- .../index/engine/FrozenIndexTests.java | 69 +++++----- .../graph/rest/action/RestGraphAction.java | 18 +-- .../collector/ccr/StatsCollector.java | 12 +- .../collector/ml/JobStatsCollector.java | 10 +- .../exporter/local/LocalExporter.java | 4 +- .../rest/action/RestMonitoringBulkAction.java | 53 ++++---- .../collector/ml/JobStatsCollectorTests.java | 9 +- .../action/RestMonitoringBulkActionTests.java | 51 ++------ .../test/MonitoringIntegTestCase.java | 3 +- .../AbstractWatcherIntegrationTestCase.java | 3 +- 29 files changed, 181 insertions(+), 368 deletions(-) delete mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java delete mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/XPackRestHandler.java diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollectorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollectorTests.java index bb44fd59da5d2..2b85cd11e927f 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollectorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollectorTests.java @@ -15,9 +15,8 @@ import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ccr.AutoFollowStats; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; -import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; -import org.elasticsearch.xpack.core.ccr.client.CcrClient; +import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; import org.elasticsearch.xpack.monitoring.BaseCollectorTestCase; @@ -33,6 +32,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -127,7 +127,7 @@ public void testDoCollect() throws Exception { whenClusterStateWithUUID(clusterUuid); final MonitoringDoc.Node node = randomMonitoringNode(random()); - final CcrClient client = mock(CcrClient.class); + final Client client = mock(Client.class); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); final List statuses = mockStatuses(); @@ -142,7 +142,7 @@ public void testDoCollect() throws Exception { final ActionFuture future = (ActionFuture) mock(ActionFuture.class); final CcrStatsAction.Response response = new CcrStatsAction.Response(autoFollowStats, statsResponse); - when(client.stats(any())).thenReturn(future); + when(client.execute(eq(CcrStatsAction.INSTANCE), any(CcrStatsAction.Request.class))).thenReturn(future); when(future.actionGet(timeout)).thenReturn(response); final StatsCollector collector = new StatsCollector(settings, clusterService, licenseState, client, threadContext); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusRequestBuilder.java index e56451c4a92ff..368bd4a9b735e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusRequestBuilder.java @@ -10,7 +10,7 @@ class GetBasicStatusRequestBuilder extends ActionRequestBuilder { - GetBasicStatusRequestBuilder(ElasticsearchClient client, GetBasicStatusAction action) { - super(client, action, new GetBasicStatusRequest()); + GetBasicStatusRequestBuilder(ElasticsearchClient client) { + super(client, GetBasicStatusAction.INSTANCE, new GetBasicStatusRequest()); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusRequestBuilder.java index d220b476bc578..4102e98f45d68 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusRequestBuilder.java @@ -10,7 +10,7 @@ class GetTrialStatusRequestBuilder extends ActionRequestBuilder { - GetTrialStatusRequestBuilder(ElasticsearchClient client, GetTrialStatusAction action) { - super(client, action, new GetTrialStatusRequest()); + GetTrialStatusRequestBuilder(ElasticsearchClient client) { + super(client, GetTrialStatusAction.INSTANCE, new GetTrialStatusRequest()); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java index 80d1fb68f0e2a..4dcfa94f6e4d2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java @@ -45,11 +45,11 @@ public void deleteLicense(DeleteLicenseRequest request, ActionListener listener) { @@ -61,10 +61,10 @@ public void postStartBasic(PostStartBasicRequest request, ActionListener { - PostStartBasicRequestBuilder(ElasticsearchClient client, PostStartBasicAction action) { - super(client, action, new PostStartBasicRequest()); + PostStartBasicRequestBuilder(ElasticsearchClient client) { + super(client, PostStartBasicAction.INSTANCE, new PostStartBasicRequest()); } public PostStartBasicRequestBuilder setAcknowledge(boolean acknowledge) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequestBuilder.java index 8e12c879f997d..93a6207d519d1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequestBuilder.java @@ -10,8 +10,8 @@ class PostStartTrialRequestBuilder extends ActionRequestBuilder { - PostStartTrialRequestBuilder(ElasticsearchClient client, PostStartTrialAction action) { - super(client, action, new PostStartTrialRequest()); + PostStartTrialRequestBuilder(ElasticsearchClient client) { + super(client, PostStartTrialAction.INSTANCE, new PostStartTrialRequest()); } public PostStartTrialRequestBuilder setAcknowledge(boolean acknowledge) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java index 5383726adc318..56c1898ac793f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java @@ -7,20 +7,20 @@ package org.elasticsearch.license; import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.DELETE; -public class RestDeleteLicenseAction extends XPackRestHandler { +public class RestDeleteLicenseAction extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestDeleteLicenseAction.class)); @@ -29,7 +29,7 @@ public class RestDeleteLicenseAction extends XPackRestHandler { // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( DELETE, "/_license", this, - DELETE, URI_BASE + "/license", deprecationLogger); + DELETE, "/_xpack/license", deprecationLogger); } @Override @@ -38,12 +38,12 @@ public String getName() { } @Override - public RestChannelConsumer doPrepareRequest(final RestRequest request, final XPackClient client) throws IOException { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteLicenseRequest deleteLicenseRequest = new DeleteLicenseRequest(); deleteLicenseRequest.timeout(request.paramAsTime("timeout", deleteLicenseRequest.timeout())); deleteLicenseRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteLicenseRequest.masterNodeTimeout())); - return channel -> client.es().admin().cluster().execute(DeleteLicenseAction.INSTANCE, deleteLicenseRequest, + return channel -> client.admin().cluster().execute(DeleteLicenseAction.INSTANCE, deleteLicenseRequest, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetBasicStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetBasicStatus.java index 0195b350b050c..3e61c3978bcbc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetBasicStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetBasicStatus.java @@ -7,17 +7,17 @@ package org.elasticsearch.license; import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import static org.elasticsearch.rest.RestRequest.Method.GET; -public class RestGetBasicStatus extends XPackRestHandler { +public class RestGetBasicStatus extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestGetBasicStatus.class)); @@ -26,12 +26,12 @@ public class RestGetBasicStatus extends XPackRestHandler { // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( GET, "/_license/basic_status", this, - GET, URI_BASE + "/license/basic_status", deprecationLogger); + GET, "/_xpack/license/basic_status", deprecationLogger); } @Override - protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) { - return channel -> client.licensing().prepareGetStartBasic().execute(new RestToXContentListener<>(channel)); + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + return channel -> new GetBasicStatusRequestBuilder(client).execute(new RestToXContentListener<>(channel)); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetLicenseAction.java index 02809ae974cd7..4c1102208c840 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetLicenseAction.java @@ -7,18 +7,18 @@ package org.elasticsearch.license; import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.protocol.xpack.license.GetLicenseRequest; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; import java.util.HashMap; @@ -28,7 +28,7 @@ import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; -public class RestGetLicenseAction extends XPackRestHandler { +public class RestGetLicenseAction extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestGetLicenseAction.class)); @@ -37,7 +37,7 @@ public class RestGetLicenseAction extends XPackRestHandler { // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( GET, "/_license", this, - GET, URI_BASE + "/license", deprecationLogger); + GET, "/_xpack/license", deprecationLogger); } @Override @@ -52,15 +52,15 @@ public String getName() { * The licenses are sorted by latest issue_date */ @Override - public RestChannelConsumer doPrepareRequest(final RestRequest request, final XPackClient client) throws IOException { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final Map overrideParams = new HashMap<>(2); overrideParams.put(License.REST_VIEW_MODE, "true"); overrideParams.put(License.LICENSE_VERSION_MODE, String.valueOf(License.VERSION_CURRENT)); final ToXContent.Params params = new ToXContent.DelegatingMapParams(overrideParams, request); GetLicenseRequest getLicenseRequest = new GetLicenseRequest(); getLicenseRequest.local(request.paramAsBoolean("local", getLicenseRequest.local())); - return channel -> client.es().admin().cluster().execute(GetLicenseAction.INSTANCE, getLicenseRequest, - new RestBuilderListener(channel) { + return channel -> client.admin().cluster().execute(GetLicenseAction.INSTANCE, getLicenseRequest, + new RestBuilderListener<>(channel) { @Override public RestResponse buildResponse(GetLicenseResponse response, XContentBuilder builder) throws Exception { // Default to pretty printing, but allow ?pretty=false to disable diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetTrialStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetTrialStatus.java index 20366328e5031..2ee79da977357 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetTrialStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetTrialStatus.java @@ -7,17 +7,17 @@ package org.elasticsearch.license; import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import static org.elasticsearch.rest.RestRequest.Method.GET; -public class RestGetTrialStatus extends XPackRestHandler { +public class RestGetTrialStatus extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestGetTrialStatus.class)); @@ -26,12 +26,12 @@ public class RestGetTrialStatus extends XPackRestHandler { // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( GET, "/_license/trial_status", this, - GET, URI_BASE + "/license/trial_status", deprecationLogger); + GET, "/_xpack/license/trial_status", deprecationLogger); } @Override - protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) { - return channel -> client.licensing().prepareGetStartTrial().execute(new RestToXContentListener<>(channel)); + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + return channel -> new GetTrialStatusRequestBuilder(client).execute(new RestToXContentListener<>(channel)); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartBasicLicense.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartBasicLicense.java index 79e8849669c8f..77f09383b407d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartBasicLicense.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartBasicLicense.java @@ -7,19 +7,19 @@ package org.elasticsearch.license; import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestStatusToXContentListener; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.POST; -public class RestPostStartBasicLicense extends XPackRestHandler { +public class RestPostStartBasicLicense extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestPostStartBasicLicense.class)); @@ -28,16 +28,16 @@ public class RestPostStartBasicLicense extends XPackRestHandler { // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( POST, "/_license/start_basic", this, - POST, URI_BASE + "/license/start_basic", deprecationLogger); + POST, "/_xpack/license/start_basic", deprecationLogger); } @Override - protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException { + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { PostStartBasicRequest startBasicRequest = new PostStartBasicRequest(); startBasicRequest.acknowledge(request.paramAsBoolean("acknowledge", false)); startBasicRequest.timeout(request.paramAsTime("timeout", startBasicRequest.timeout())); startBasicRequest.masterNodeTimeout(request.paramAsTime("master_timeout", startBasicRequest.masterNodeTimeout())); - return channel -> client.licensing().postStartBasic(startBasicRequest, new RestStatusToXContentListener<>(channel)); + return channel -> client.execute(PostStartBasicAction.INSTANCE, startBasicRequest, new RestStatusToXContentListener<>(channel)); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java index a263d0d82c26a..d2ce0776305da 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java @@ -7,23 +7,23 @@ package org.elasticsearch.license; import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.POST; -public class RestPostStartTrialLicense extends XPackRestHandler { +public class RestPostStartTrialLicense extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestPostStartTrialLicense.class)); @@ -32,16 +32,16 @@ public class RestPostStartTrialLicense extends XPackRestHandler { // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( POST, "/_license/start_trial", this, - POST, URI_BASE + "/license/start_trial", deprecationLogger); + POST, "/_xpack/license/start_trial", deprecationLogger); } @Override - protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException { + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { PostStartTrialRequest startTrialRequest = new PostStartTrialRequest(); startTrialRequest.setType(request.param("type", "trial")); startTrialRequest.acknowledge(request.paramAsBoolean("acknowledge", false)); - return channel -> client.licensing().postStartTrial(startTrialRequest, - new RestBuilderListener(channel) { + return channel -> client.execute(PostStartTrialAction.INSTANCE, startTrialRequest, + new RestBuilderListener<>(channel) { @Override public RestResponse buildResponse(PostStartTrialResponse response, XContentBuilder builder) throws Exception { PostStartTrialResponse.Status status = response.getStatus(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPutLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPutLicenseAction.java index 986dacb687738..698ec440a6cbe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPutLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPutLicenseAction.java @@ -7,20 +7,20 @@ package org.elasticsearch.license; import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; -public class RestPutLicenseAction extends XPackRestHandler { +public class RestPutLicenseAction extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestPutLicenseAction.class)); @@ -30,11 +30,11 @@ public class RestPutLicenseAction extends XPackRestHandler { // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( POST, "/_license", this, - POST, URI_BASE + "/license", deprecationLogger); + POST, "/_xpack/license", deprecationLogger); // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( PUT, "/_license", this, - PUT, URI_BASE + "/license", deprecationLogger); + PUT, "/_xpack/license", deprecationLogger); } @Override @@ -43,7 +43,7 @@ public String getName() { } @Override - public RestChannelConsumer doPrepareRequest(final RestRequest request, final XPackClient client) throws IOException { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { if (request.hasContent() == false) { throw new IllegalArgumentException("The license must be provided in the request body"); } @@ -58,8 +58,7 @@ public RestChannelConsumer doPrepareRequest(final RestRequest request, final XPa "/_license/start_basic API to install a basic license that does not expire."); } - return channel -> client.es().admin().cluster().execute(PutLicenseAction.INSTANCE, putLicenseRequest, - new RestToXContentListener<>(channel)); + return channel -> client.execute(PutLicenseAction.INSTANCE, putLicenseRequest, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java deleted file mode 100644 index acf97b63684ae..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.license.LicensingClient; -import org.elasticsearch.protocol.xpack.XPackInfoRequest; -import org.elasticsearch.protocol.xpack.XPackInfoResponse; -import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction.FreezeIndexAction; -import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction.FreezeRequest; -import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction.FreezeResponse; -import org.elasticsearch.xpack.core.action.XPackInfoAction; -import org.elasticsearch.xpack.core.action.XPackInfoRequestBuilder; -import org.elasticsearch.xpack.core.ccr.client.CcrClient; -import org.elasticsearch.xpack.core.indexlifecycle.client.ILMClient; -import org.elasticsearch.xpack.core.ml.client.MachineLearningClient; -import org.elasticsearch.xpack.core.monitoring.client.MonitoringClient; -import org.elasticsearch.xpack.core.watcher.client.WatcherClient; - -import java.util.Collections; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.ExecutionException; - -import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; -import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; - -public class XPackClient { - - private final Client client; - - private final CcrClient ccrClient; - private final LicensingClient licensingClient; - private final MonitoringClient monitoringClient; - private final WatcherClient watcherClient; - private final MachineLearningClient machineLearning; - private final ILMClient ilmClient; - - public XPackClient(Client client) { - this.client = Objects.requireNonNull(client, "client"); - this.ccrClient = new CcrClient(client); - this.licensingClient = new LicensingClient(client); - this.monitoringClient = new MonitoringClient(client); - this.watcherClient = new WatcherClient(client); - this.machineLearning = new MachineLearningClient(client); - this.ilmClient = new ILMClient(client); - } - - public Client es() { - return client; - } - - public CcrClient ccr() { - return ccrClient; - } - - public LicensingClient licensing() { - return licensingClient; - } - - public MonitoringClient monitoring() { - return monitoringClient; - } - - public WatcherClient watcher() { - return watcherClient; - } - - public MachineLearningClient machineLearning() { - return machineLearning; - } - - public ILMClient ilmClient() { - return ilmClient; - } - - public XPackClient withHeaders(Map headers) { - return new XPackClient(client.filterWithHeader(headers)); - } - - /** - * Returns a client that will call xpack APIs on behalf of the given user. - * - * @param username The username of the user - * @param passwd The password of the user. This char array can be cleared after calling this method. - */ - public XPackClient withAuth(String username, char[] passwd) { - return withHeaders(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue(username, new SecureString(passwd)))); - } - - public XPackInfoRequestBuilder prepareInfo() { - return new XPackInfoRequestBuilder(client); - } - - public void info(XPackInfoRequest request, ActionListener listener) { - client.execute(XPackInfoAction.INSTANCE, request, listener); - } - - /** - * Freezes or unfreeze one or more indices - */ - public void freeze(FreezeRequest request, ActionListener listener) { - client.execute(FreezeIndexAction.INSTANCE, request, listener); - } - - /** - * Freeze or unfreeze one or more indices - */ - public FreezeResponse freeze(FreezeRequest request) - throws ExecutionException, InterruptedException { - PlainActionFuture future = new PlainActionFuture<>(); - freeze(request, future); - return future.get(); - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/XPackRestHandler.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/XPackRestHandler.java deleted file mode 100644 index 5ac0969624bc8..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/XPackRestHandler.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.rest; - -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.xpack.core.XPackClient; - -import java.io.IOException; - -public abstract class XPackRestHandler extends BaseRestHandler { - - protected static String URI_BASE = "/_xpack"; - - public XPackRestHandler(Settings settings) { - super(settings); - } - - @Override - public final RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - return doPrepareRequest(request, new XPackClient(client)); - } - - protected abstract RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException; -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestFreezeIndexAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestFreezeIndexAction.java index 9604cdd8b3183..580acbae0dc87 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestFreezeIndexAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestFreezeIndexAction.java @@ -7,16 +7,17 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; +import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction.FreezeIndexAction; -public final class RestFreezeIndexAction extends XPackRestHandler { +public final class RestFreezeIndexAction extends BaseRestHandler { public RestFreezeIndexAction(Settings settings, RestController controller) { super(settings); controller.registerHandler(RestRequest.Method.POST, "/{index}/_freeze", this); @@ -24,7 +25,7 @@ public RestFreezeIndexAction(Settings settings, RestController controller) { } @Override - protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) { + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { boolean freeze = request.path().endsWith("/_freeze"); TransportFreezeIndexAction.FreezeRequest freezeRequest = new TransportFreezeIndexAction.FreezeRequest(Strings.splitStringByCommaToArray(request.param("index"))); @@ -36,7 +37,7 @@ protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient freezeRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards)); } freezeRequest.setFreeze(freeze); - return channel -> client.freeze(freezeRequest, new RestToXContentListener<>(channel)); + return channel -> client.execute(FreezeIndexAction.INSTANCE, freezeRequest, new RestToXContentListener<>(channel)); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackInfoAction.java index c057c04cc637d..2a41be1cb0cb1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackInfoAction.java @@ -5,13 +5,14 @@ */ package org.elasticsearch.xpack.core.rest.action; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.protocol.xpack.XPackInfoRequest; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; +import org.elasticsearch.xpack.core.action.XPackInfoRequestBuilder; import java.io.IOException; import java.util.EnumSet; @@ -19,11 +20,11 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.HEAD; -public class RestXPackInfoAction extends XPackRestHandler { +public class RestXPackInfoAction extends BaseRestHandler { public RestXPackInfoAction(Settings settings, RestController controller) { super(settings); - controller.registerHandler(HEAD, URI_BASE, this); - controller.registerHandler(GET, URI_BASE, this); + controller.registerHandler(HEAD, "/_xpack", this); + controller.registerHandler(GET, "/_xpack", this); } @Override @@ -32,7 +33,7 @@ public String getName() { } @Override - public RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException { + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { // we piggyback verbosity on "human" output boolean verbose = request.paramAsBoolean("human", true); @@ -40,7 +41,7 @@ public RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient cli EnumSet categories = XPackInfoRequest.Category .toSet(request.paramAsStringArray("categories", new String[] { "_all" })); return channel -> - client.prepareInfo() + new XPackInfoRequestBuilder(client) .setVerbose(verbose) .setCategories(categories) .execute(new RestToXContentListener<>(channel)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java index 0f09f17dbb066..44b04c3548f7b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java @@ -6,29 +6,29 @@ package org.elasticsearch.xpack.core.rest.action; import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.action.XPackUsageRequestBuilder; import org.elasticsearch.xpack.core.action.XPackUsageResponse; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestStatus.OK; -public class RestXPackUsageAction extends XPackRestHandler { +public class RestXPackUsageAction extends BaseRestHandler { public RestXPackUsageAction(Settings settings, RestController controller) { super(settings); - controller.registerHandler(GET, URI_BASE + "/usage", this); + controller.registerHandler(GET, "/_xpack/usage", this); } @Override @@ -37,11 +37,11 @@ public String getName() { } @Override - public RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException { + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { final TimeValue masterTimeout = request.paramAsTime("master_timeout", MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT); - return channel -> new XPackUsageRequestBuilder(client.es()) + return channel -> new XPackUsageRequestBuilder(client) .setMasterNodeTimeout(masterTimeout) - .execute(new RestBuilderListener(channel) { + .execute(new RestBuilderListener<>(channel) { @Override public RestResponse buildResponse(XPackUsageResponse response, XContentBuilder builder) throws Exception { builder.startObject(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java index d0a086bd649f0..1d57df3b54199 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; @@ -222,7 +221,7 @@ public void onFailure(Exception e) { creationCheck.set(false); onPutPolicyFailure(policy, e); } - }, (req, listener) -> new XPackClient(client).ilmClient().putLifecyclePolicy(req, listener)); + }, (req, listener) -> client.execute(PutLifecycleAction.INSTANCE, req, listener)); }); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java index 9231bad9a8dfe..39407ef735974 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.index.engine; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; @@ -23,6 +24,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -37,9 +39,9 @@ import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ShardSearchLocalRequest; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction; +import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction.FreezeIndexAction; import org.hamcrest.Matchers; import java.io.IOException; @@ -69,8 +71,7 @@ public void testCloseFreezeAndOpen() throws ExecutionException, InterruptedExcep client().prepareIndex("index", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); client().prepareIndex("index", "_doc", "2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); client().prepareIndex("index", "_doc", "3").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("index"))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("index")).actionGet()); expectThrows(ClusterBlockException.class, () -> client().prepareIndex("index", "_doc", "4").setSource("field", "value") .setRefreshPolicy(IMMEDIATE).get()); IndicesService indexServices = getInstanceFromNode(IndicesService.class); @@ -115,8 +116,7 @@ public void testSearchAndGetAPIsAreThrottled() throws InterruptedException, IOEx for (int i = 0; i < 10; i++) { client().prepareIndex("index", "_doc", "" + i).setSource("field", "foo bar baz").get(); } - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("index"))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("index")).actionGet()); int numRequests = randomIntBetween(20, 50); CountDownLatch latch = new CountDownLatch(numRequests); ActionListener listener = ActionListener.wrap(latch::countDown); @@ -159,8 +159,7 @@ public void testFreezeAndUnfreeze() throws InterruptedException, ExecutionExcept // sometimes close it assertAcked(client().admin().indices().prepareClose("index").get()); } - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("index"))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("index")).actionGet()); { IndicesService indexServices = getInstanceFromNode(IndicesService.class); Index index = resolveIndex("index"); @@ -169,7 +168,8 @@ public void testFreezeAndUnfreeze() throws InterruptedException, ExecutionExcept IndexShard shard = indexService.getShard(0); assertEquals(0, shard.refreshStats().getTotal()); } - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("index").setFreeze(false))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, + new TransportFreezeIndexAction.FreezeRequest("index").setFreeze(false)).actionGet()); { IndicesService indexServices = getInstanceFromNode(IndicesService.class); Index index = resolveIndex("index"); @@ -192,13 +192,12 @@ private void assertIndexFrozen(String idx) { public void testDoubleFreeze() throws ExecutionException, InterruptedException { createIndex("test-idx", Settings.builder().put("index.number_of_shards", 2).build()); - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("test-idx"))); - ExecutionException executionException = expectThrows(ExecutionException.class, - () -> xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("test-idx") + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("test-idx")).actionGet()); + ResourceNotFoundException exception = expectThrows(ResourceNotFoundException.class, + () -> client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("test-idx") .indicesOptions(new IndicesOptions(EnumSet.noneOf(IndicesOptions.Option.class), - EnumSet.of(IndicesOptions.WildcardStates.OPEN))))); - assertEquals("no index found to freeze", executionException.getCause().getMessage()); + EnumSet.of(IndicesOptions.WildcardStates.OPEN)))).actionGet()); + assertEquals("no index found to freeze", exception.getMessage()); } public void testUnfreezeClosedIndices() throws ExecutionException, InterruptedException { @@ -206,11 +205,10 @@ public void testUnfreezeClosedIndices() throws ExecutionException, InterruptedEx client().prepareIndex("idx", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); createIndex("idx-closed", Settings.builder().put("index.number_of_shards", 1).build()); client().prepareIndex("idx-closed", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("idx"))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("idx")).actionGet()); assertAcked(client().admin().indices().prepareClose("idx-closed").get()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("idx*").setFreeze(false) - .indicesOptions(IndicesOptions.strictExpand()))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("idx*").setFreeze(false) + .indicesOptions(IndicesOptions.strictExpand())).actionGet()); ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get(); assertEquals(IndexMetaData.State.CLOSE, stateResponse.getState().getMetaData().index("idx-closed").getState()); assertEquals(IndexMetaData.State.OPEN, stateResponse.getState().getMetaData().index("idx").getState()); @@ -222,8 +220,7 @@ public void testFreezePattern() throws ExecutionException, InterruptedException client().prepareIndex("test-idx", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); createIndex("test-idx-1", Settings.builder().put("index.number_of_shards", 1).build()); client().prepareIndex("test-idx-1", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("test-idx"))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("test-idx")).actionGet()); assertIndexFrozen("test-idx"); IndicesStatsResponse index = client().admin().indices().prepareStats("test-idx").clear().setRefresh(true).get(); @@ -232,7 +229,7 @@ public void testFreezePattern() throws ExecutionException, InterruptedException index = client().admin().indices().prepareStats("test-idx").clear().setRefresh(true).get(); assertEquals(1, index.getTotal().refresh.getTotal()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("test*"))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("test*")).actionGet()); assertIndexFrozen("test-idx"); assertIndexFrozen("test-idx-1"); index = client().admin().indices().prepareStats("test-idx").clear().setRefresh(true).get(); @@ -269,8 +266,7 @@ public void testCanMatch() throws ExecutionException, InterruptedException, IOEx new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, -1, null, null))); } - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("index"))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("index")).actionGet()); { IndicesService indexServices = getInstanceFromNode(IndicesService.class); @@ -301,8 +297,7 @@ public void testCanMatch() throws ExecutionException, InterruptedException, IOEx public void testWriteToFrozenIndex() throws ExecutionException, InterruptedException { createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build()); client().prepareIndex("idx", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("idx"))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("idx")).actionGet()); assertIndexFrozen("idx"); expectThrows(ClusterBlockException.class, () -> client().prepareIndex("idx", "_doc", "2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get()); @@ -312,9 +307,8 @@ public void testIgnoreUnavailable() throws ExecutionException, InterruptedExcept createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build()); createIndex("idx-close", Settings.builder().put("index.number_of_shards", 1).build()); assertAcked(client().admin().indices().prepareClose("idx-close")); - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("idx*", "not_available") - .indicesOptions(IndicesOptions.fromParameters(null, "true", null, null, IndicesOptions.strictExpandOpen())))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("idx*", "not_available") + .indicesOptions(IndicesOptions.fromParameters(null, "true", null, null, IndicesOptions.strictExpandOpen()))).actionGet()); assertIndexFrozen("idx"); assertEquals(IndexMetaData.State.CLOSE, client().admin().cluster().prepareState().get().getState().metaData().index("idx-close").getState()); @@ -322,17 +316,17 @@ public void testIgnoreUnavailable() throws ExecutionException, InterruptedExcept public void testUnfreezeClosedIndex() throws ExecutionException, InterruptedException { createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build()); - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("idx"))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("idx")).actionGet()); assertAcked(client().admin().indices().prepareClose("idx")); assertEquals(IndexMetaData.State.CLOSE, client().admin().cluster().prepareState().get().getState().metaData().index("idx").getState()); - expectThrows(ExecutionException.class, - () -> xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("id*").setFreeze(false) + expectThrows(IndexNotFoundException.class, + () -> client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("id*").setFreeze(false) .indicesOptions(new IndicesOptions(EnumSet.noneOf(IndicesOptions.Option.class), - EnumSet.of(IndicesOptions.WildcardStates.OPEN))))); + EnumSet.of(IndicesOptions.WildcardStates.OPEN)))).actionGet()); // we don't resolve to closed indices - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("idx").setFreeze(false))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, + new TransportFreezeIndexAction.FreezeRequest("idx").setFreeze(false)).actionGet()); assertEquals(IndexMetaData.State.OPEN, client().admin().cluster().prepareState().get().getState().metaData().index("idx").getState()); } @@ -345,8 +339,7 @@ public void testFreezeIndexIncreasesIndexSettingsVersion() throws ExecutionExcep final long settingsVersion = client().admin().cluster().prepareState().get() .getState().metaData().index(index).getSettingsVersion(); - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest(index))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest(index)).actionGet()); assertIndexFrozen(index); assertThat(client().admin().cluster().prepareState().get().getState().metaData().index(index).getSettingsVersion(), greaterThan(settingsVersion)); @@ -374,7 +367,7 @@ public void testFreezeEmptyIndexWithTranslogOps() throws Exception { assertThat(indexService.getShard(0).getGlobalCheckpoint(), greaterThanOrEqualTo(nbNoOps - 1L)); }); - assertAcked(new XPackClient(client()).freeze(new TransportFreezeIndexAction.FreezeRequest(indexName))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest(indexName)).actionGet()); assertIndexFrozen(indexName); } @@ -390,7 +383,7 @@ public void testRecoveryState() throws ExecutionException, InterruptedException assertThat(indexResponse.status(), is(RestStatus.CREATED)); } - assertAcked(new XPackClient(client()).freeze(new TransportFreezeIndexAction.FreezeRequest(indexName))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest(indexName)).actionGet()); assertIndexFrozen(indexName); final IndexMetaData indexMetaData = client().admin().cluster().prepareState().get().getState().metaData().index(indexName); diff --git a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java index 130d6deed567f..80d4d638a0a09 100644 --- a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java +++ b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationLogger; @@ -19,11 +20,10 @@ import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest.TermBoost; import org.elasticsearch.protocol.xpack.graph.Hop; import org.elasticsearch.protocol.xpack.graph.VertexRequest; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; import java.util.HashMap; @@ -38,7 +38,7 @@ /** * @see GraphExploreRequest */ -public class RestGraphAction extends XPackRestHandler { +public class RestGraphAction extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestGraphAction.class)); public static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + @@ -68,19 +68,19 @@ public RestGraphAction(Settings settings, RestController controller) { // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( GET, "/{index}/_graph/explore", this, - GET, "/{index}" + URI_BASE + "/graph/_explore", deprecationLogger); + GET, "/{index}/_xpack/graph/_explore", deprecationLogger); // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( POST, "/{index}/_graph/explore", this, - POST, "/{index}" + URI_BASE + "/graph/_explore", deprecationLogger); + POST, "/{index}/_xpack/graph/_explore", deprecationLogger); // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( GET, "/{index}/{type}/_graph/explore", this, - GET, "/{index}/{type}" + URI_BASE + "/graph/_explore", deprecationLogger); + GET, "/{index}/{type}/_xpack/graph/_explore", deprecationLogger); // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( POST, "/{index}/{type}/_graph/explore", this, - POST, "/{index}/{type}" + URI_BASE + "/graph/_explore", deprecationLogger); + POST, "/{index}/{type}/_xpack/graph/_explore", deprecationLogger); } @Override @@ -89,7 +89,7 @@ public String getName() { } @Override - public RestChannelConsumer doPrepareRequest(final RestRequest request, final XPackClient client) throws IOException { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { GraphExploreRequest graphRequest = new GraphExploreRequest(Strings.splitStringByCommaToArray(request.param("index"))); graphRequest.indicesOptions(IndicesOptions.fromRequest(request, graphRequest.indicesOptions())); graphRequest.routing(request.param("routing")); @@ -117,7 +117,7 @@ public RestChannelConsumer doPrepareRequest(final RestRequest request, final XPa deprecationLogger.deprecatedAndMaybeLog("graph_with_types", TYPES_DEPRECATION_MESSAGE); graphRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); } - return channel -> client.es().execute(INSTANCE, graphRequest, new RestToXContentListener<>(channel)); + return channel -> client.execute(INSTANCE, graphRequest, new RestToXContentListener<>(channel)); } private void parseHop(XContentParser parser, Hop currentHop, GraphExploreRequest graphRequest) throws IOException { diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollector.java index 6aab3114b7807..8f8072ef5b864 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollector.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollector.java @@ -14,10 +14,8 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; -import org.elasticsearch.xpack.core.ccr.client.CcrClient; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; import org.elasticsearch.xpack.monitoring.collector.Collector; @@ -37,25 +35,25 @@ public final class StatsCollector extends Collector { private final Settings settings; private final ThreadContext threadContext; - private final CcrClient ccrClient; + private final Client client; public StatsCollector( final Settings settings, final ClusterService clusterService, final XPackLicenseState licenseState, final Client client) { - this(settings, clusterService, licenseState, new XPackClient(client).ccr(), client.threadPool().getThreadContext()); + this(settings, clusterService, licenseState, client, client.threadPool().getThreadContext()); } StatsCollector( final Settings settings, final ClusterService clusterService, final XPackLicenseState licenseState, - final CcrClient ccrClient, + final Client client, final ThreadContext threadContext) { super(TYPE, clusterService, CCR_STATS_TIMEOUT, licenseState); this.settings = settings; - this.ccrClient = ccrClient; + this.client = client; this.threadContext = threadContext; } @@ -79,7 +77,7 @@ protected Collection doCollect( final String clusterUuid = clusterUuid(clusterState); final CcrStatsAction.Request request = new CcrStatsAction.Request(); - final CcrStatsAction.Response response = ccrClient.stats(request).actionGet(getCollectionTimeout()); + final CcrStatsAction.Response response = client.execute(CcrStatsAction.INSTANCE, request).actionGet(getCollectionTimeout()); final AutoFollowStatsMonitoringDoc autoFollowStatsDoc = new AutoFollowStatsMonitoringDoc(clusterUuid, timestamp, interval, node, response.getAutoFollowStats()); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollector.java index 855780d4836ae..be34af2850ec6 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollector.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollector.java @@ -14,10 +14,8 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; -import org.elasticsearch.xpack.core.ml.client.MachineLearningClient; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; import org.elasticsearch.xpack.monitoring.collector.Collector; @@ -43,15 +41,15 @@ public class JobStatsCollector extends Collector { private final Settings settings; private final ThreadContext threadContext; - private final MachineLearningClient client; + private final Client client; public JobStatsCollector(final Settings settings, final ClusterService clusterService, final XPackLicenseState licenseState, final Client client) { - this(settings, clusterService, licenseState, new XPackClient(client).machineLearning(), client.threadPool().getThreadContext()); + this(settings, clusterService, licenseState, client, client.threadPool().getThreadContext()); } JobStatsCollector(final Settings settings, final ClusterService clusterService, - final XPackLicenseState licenseState, final MachineLearningClient client, final ThreadContext threadContext) { + final XPackLicenseState licenseState, final Client client, final ThreadContext threadContext) { super(JobStatsMonitoringDoc.TYPE, clusterService, JOB_STATS_TIMEOUT, licenseState); this.settings = settings; this.client = client; @@ -74,7 +72,7 @@ protected List doCollect(final MonitoringDoc.Node node, // fetch details about all jobs try (ThreadContext.StoredContext ignore = threadContext.stashWithOrigin(MONITORING_ORIGIN)) { final GetJobsStatsAction.Response jobs = - client.getJobsStats(new GetJobsStatsAction.Request(MetaData.ALL)) + client.execute(GetJobsStatsAction.INSTANCE, new GetJobsStatsAction.Request(MetaData.ALL)) .actionGet(getCollectionTimeout()); final long timestamp = timestamp(); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java index 12f2f77a9d40c..8661c77345591 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java @@ -40,7 +40,6 @@ import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils; @@ -431,8 +430,7 @@ private boolean hasValidVersion(final Object version, final long minimumVersion) */ private void getClusterAlertsInstallationAsyncActions(final boolean indexExists, final List asyncActions, final AtomicInteger pendingResponses) { - final XPackClient xpackClient = new XPackClient(client); - final WatcherClient watcher = xpackClient.watcher(); + final WatcherClient watcher = new WatcherClient(client); final boolean canAddWatches = licenseState.isMonitoringClusterAlertsAllowed(); for (final String watchId : ClusterAlertsUtil.WATCH_IDS) { diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java index 0a1576393ba26..04faf82d8c1d1 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java @@ -8,21 +8,22 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkRequestBuilder; import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkResponse; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; import java.util.Arrays; @@ -34,7 +35,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; -public class RestMonitoringBulkAction extends XPackRestHandler { +public class RestMonitoringBulkAction extends BaseRestHandler { public static final String MONITORING_ID = "system_id"; public static final String MONITORING_VERSION = "system_api_version"; @@ -68,7 +69,7 @@ public String getName() { } @Override - public RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException { + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { final String id = request.param(MONITORING_ID); if (Strings.isEmpty(id)) { @@ -98,27 +99,9 @@ public RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient cli final long timestamp = System.currentTimeMillis(); final long intervalMillis = parseTimeValue(intervalAsString, INTERVAL).getMillis(); - final MonitoringBulkRequestBuilder requestBuilder = client.monitoring().prepareMonitoringBulk(); + final MonitoringBulkRequestBuilder requestBuilder = new MonitoringBulkRequestBuilder(client); requestBuilder.add(system, request.content(), request.getXContentType(), timestamp, intervalMillis); - return channel -> requestBuilder.execute(new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(MonitoringBulkResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - { - builder.field("took", response.getTookInMillis()); - builder.field("ignored", response.isIgnored()); - - final MonitoringBulkResponse.Error error = response.getError(); - builder.field("errors", error != null); - - if (error != null) { - builder.field("error", response.getError()); - } - } - builder.endObject(); - return new BytesRestResponse(response.status(), builder); - } - }); + return channel -> requestBuilder.execute(getRestBuilderListener(channel)); } @Override @@ -138,4 +121,26 @@ private boolean isSupportedSystemVersion(final MonitoredSystem system, final Str final List monitoredSystem = supportedApiVersions.getOrDefault(system, emptyList()); return monitoredSystem.contains(version); } + + static RestBuilderListener getRestBuilderListener(RestChannel channel) { + return new RestBuilderListener<>(channel) { + @Override + public RestResponse buildResponse(MonitoringBulkResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + { + builder.field("took", response.getTookInMillis()); + builder.field("ignored", response.isIgnored()); + + final MonitoringBulkResponse.Error error = response.getError(); + builder.field("errors", error != null); + + if (error != null) { + builder.field("error", response.getError()); + } + } + builder.endObject(); + return new BytesRestResponse(response.status(), builder); + } + }; + } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollectorTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollectorTests.java index 0713e26f80667..61b485e77ae27 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollectorTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollectorTests.java @@ -6,16 +6,17 @@ package org.elasticsearch.xpack.monitoring.collector.ml; import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction.Request; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction.Response; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction.Response.JobStats; -import org.elasticsearch.xpack.core.action.util.QueryPage; -import org.elasticsearch.xpack.core.ml.client.MachineLearningClient; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; @@ -128,7 +129,7 @@ public void testDoCollect() throws Exception { whenClusterStateWithUUID(clusterUuid); final MonitoringDoc.Node node = randomMonitoringNode(random()); - final MachineLearningClient client = mock(MachineLearningClient.class); + final Client client = mock(Client.class); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(1, 120)); @@ -143,7 +144,7 @@ public void testDoCollect() throws Exception { final ActionFuture future = (ActionFuture)mock(ActionFuture.class); final Response response = new Response(new QueryPage<>(jobStats, jobStats.size(), Job.RESULTS_FIELD)); - when(client.getJobsStats(eq(new Request(MetaData.ALL)))).thenReturn(future); + when(client.execute(eq(GetJobsStatsAction.INSTANCE), eq(new Request(MetaData.ALL)))).thenReturn(future); when(future.actionGet(timeout)).thenReturn(response); final long interval = randomNonNegativeLong(); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkActionTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkActionTests.java index 7a4427c9f0fdc..fb79751f797e6 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkActionTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkActionTests.java @@ -6,8 +6,7 @@ package org.elasticsearch.xpack.monitoring.rest.action; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -25,22 +24,16 @@ import org.elasticsearch.rest.action.RestBuilderListener; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; -import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkRequestBuilder; import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkResponse; -import org.elasticsearch.xpack.core.monitoring.client.MonitoringClient; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils; import java.util.HashMap; import java.util.Map; -import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils.TEMPLATE_VERSION; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -115,8 +108,7 @@ public void testUnknownSystemVersion() { public void testNoErrors() throws Exception { final MonitoringBulkResponse response = new MonitoringBulkResponse(randomLong(), false); - final FakeRestRequest request = createRestRequest(randomSystemId(), TEMPLATE_VERSION, "10s"); - final RestResponse restResponse = getRestBuilderListener(request).buildResponse(response); + final RestResponse restResponse = getRestBuilderListener().buildResponse(response); assertThat(restResponse.status(), is(RestStatus.OK)); assertThat(restResponse.content().utf8ToString(), @@ -125,8 +117,7 @@ public void testNoErrors() throws Exception { public void testNoErrorsButIgnored() throws Exception { final MonitoringBulkResponse response = new MonitoringBulkResponse(randomLong(), true); - final FakeRestRequest request = createRestRequest(randomSystemId(), TEMPLATE_VERSION, "10s"); - final RestResponse restResponse = getRestBuilderListener(request).buildResponse(response); + final RestResponse restResponse = getRestBuilderListener().buildResponse(response); assertThat(restResponse.status(), is(RestStatus.OK)); assertThat(restResponse.content().utf8ToString(), @@ -139,8 +130,7 @@ public void testWithErrors() throws Exception { final MonitoringBulkResponse response = new MonitoringBulkResponse(randomLong(), error); final String errorJson; - final FakeRestRequest request = createRestRequest(randomSystemId(), TEMPLATE_VERSION, "10s"); - final RestResponse restResponse = getRestBuilderListener(request).buildResponse(response); + final RestResponse restResponse = getRestBuilderListener().buildResponse(response); try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { error.toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -167,35 +157,18 @@ private static String randomSystemId() { } private void prepareRequest(final RestRequest restRequest) throws Exception { - getRestBuilderListener(restRequest); - } - - private RestBuilderListener getRestBuilderListener(final RestRequest restRequest) throws Exception { - final Client client = mock(Client.class); - final XPackClient xpackClient = mock(XPackClient.class); - final MonitoringClient monitoringClient = mock(MonitoringClient.class); - final AtomicReference> listenerReference = new AtomicReference<>(); - final MonitoringBulkRequestBuilder builder = new MonitoringBulkRequestBuilder(client){ - @SuppressWarnings("unchecked") - @Override - public void execute(ActionListener listener) { - listenerReference.set((RestBuilderListener)listener); - } - }; - when(monitoringClient.prepareMonitoringBulk()).thenReturn(builder); - when(xpackClient.monitoring()).thenReturn(monitoringClient); - - final CheckedConsumer consumer = action.doPrepareRequest(restRequest, xpackClient); - + final NodeClient client = mock(NodeClient.class); + final CheckedConsumer consumer = action.prepareRequest(restRequest, client); final RestChannel channel = mock(RestChannel.class); when(channel.newBuilder()).thenReturn(JsonXContent.contentBuilder()); - - // trigger/capture execution + // trigger execution consumer.accept(channel); + } - assertThat(listenerReference.get(), not(nullValue())); - - return listenerReference.get(); + private RestBuilderListener getRestBuilderListener() throws Exception { + final RestChannel channel = mock(RestChannel.class); + when(channel.newBuilder()).thenReturn(JsonXContent.contentBuilder()); + return RestMonitoringBulkAction.getRestBuilderListener(channel); } private static FakeRestRequest createRestRequest(final String systemId, final String systemApiVersion, final String interval) { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java index c350b9a374ab2..c3921b7e7199b 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java @@ -19,7 +19,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.monitoring.client.MonitoringClient; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils; import org.elasticsearch.xpack.core.monitoring.test.MockPainlessScriptEngine; @@ -76,7 +75,7 @@ protected Collection> nodePlugins() { } protected MonitoringClient monitoringClient() { - return randomBoolean() ? new XPackClient(client()).monitoring() : new MonitoringClient(client()); + return new MonitoringClient(client()); } @Override diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java index 5b11b444db3ca..5199aa6bf22f2 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java @@ -41,7 +41,6 @@ import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.watcher.WatcherState; import org.elasticsearch.xpack.core.watcher.client.WatcherClient; @@ -328,7 +327,7 @@ protected WatchParser watchParser() { } protected WatcherClient watcherClient() { - return randomBoolean() ? new XPackClient(client()).watcher() : new WatcherClient(client()); + return new WatcherClient(client()); } private IndexNameExpressionResolver indexNameExpressionResolver() { From b84ba2ac811dc0a2e1812c89e5fd1bd881a165b6 Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Fri, 31 May 2019 14:55:03 -0600 Subject: [PATCH 223/224] Remove MonitoringClient from x-pack (#42770) This commit removes the monitoring client from x-pack. This class is a relic of the TransportClient and was only used in a test. --- .../monitoring/client/MonitoringClient.java | 60 ------------------- .../local/LocalExporterIntegTests.java | 2 +- .../test/MonitoringIntegTestCase.java | 5 -- 3 files changed, 1 insertion(+), 66 deletions(-) delete mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/client/MonitoringClient.java diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/client/MonitoringClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/client/MonitoringClient.java deleted file mode 100644 index 2dba6e6a4664f..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/client/MonitoringClient.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.monitoring.client; - -import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkAction; -import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkRequest; -import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkRequestBuilder; -import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkResponse; - -import java.util.Map; - -public class MonitoringClient { - - private final Client client; - - @Inject - public MonitoringClient(Client client) { - this.client = client; - } - - - /** - * Creates a request builder that bulk index monitoring documents. - * - * @return The request builder - */ - public MonitoringBulkRequestBuilder prepareMonitoringBulk() { - return new MonitoringBulkRequestBuilder(client); - } - - /** - * Executes a bulk of index operations that concern monitoring documents. - * - * @param request The monitoring bulk request - * @param listener A listener to be notified with a result - */ - public void bulk(MonitoringBulkRequest request, ActionListener listener) { - client.execute(MonitoringBulkAction.INSTANCE, request, listener); - } - - /** - * Executes a bulk of index operations that concern monitoring documents. - * - * @param request The monitoring bulk request - */ - public ActionFuture bulk(MonitoringBulkRequest request) { - return client.execute(MonitoringBulkAction.INSTANCE, request); - } - - public MonitoringClient filterWithHeader(Map headers) { - return new MonitoringClient(client.filterWithHeader(headers)); - } -} diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java index 5db71f72cf6ef..137721f84462b 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java @@ -105,7 +105,7 @@ public void testExport() throws Exception { } assertBusy(() -> { - MonitoringBulkRequestBuilder bulk = monitoringClient().prepareMonitoringBulk(); + MonitoringBulkRequestBuilder bulk = new MonitoringBulkRequestBuilder(client()); monitoringDocs.forEach(bulk::add); assertEquals(RestStatus.OK, bulk.get().status()); refresh(); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java index c3921b7e7199b..6917fd5c5697e 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java @@ -19,7 +19,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.xpack.core.monitoring.client.MonitoringClient; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils; import org.elasticsearch.xpack.core.monitoring.test.MockPainlessScriptEngine; import org.elasticsearch.xpack.monitoring.LocalStateMonitoring; @@ -74,10 +73,6 @@ protected Collection> nodePlugins() { MockIngestPlugin.class, CommonAnalysisPlugin.class); } - protected MonitoringClient monitoringClient() { - return new MonitoringClient(client()); - } - @Override protected Set excludeTemplates() { return new HashSet<>(monitoringTemplateNames()); From a3cf3024855fe5ab871d9c20fe4d39dedce70f1d Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 31 May 2019 16:34:16 -0700 Subject: [PATCH 224/224] Use an anonymous inner class instead of lambda for UP-TO-DATE support --- .../java/org/elasticsearch/gradle/JdkDownloadPlugin.java | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java index a408b66ec817d..d4f0d9941dacf 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java @@ -23,6 +23,7 @@ import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; +import org.gradle.api.Task; import org.gradle.api.UnknownTaskException; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.ConfigurationContainer; @@ -165,7 +166,12 @@ private static void setupRootJdkDownload(Project rootProject, String platform, S } String extractDir = rootProject.getBuildDir().toPath().resolve("jdks/openjdk-" + jdkVersion + "_" + platform).toString(); TaskProvider extractTask = rootProject.getTasks().register(extractTaskName, Copy.class, copyTask -> { - copyTask.doFirst(t -> rootProject.delete(extractDir)); + copyTask.doFirst(new Action() { + @Override + public void execute(Task t) { + rootProject.delete(extractDir); + } + }); copyTask.into(extractDir); copyTask.from(fileGetter, removeRootDir); });