diff --git a/build.gradle b/build.gradle index 22505ed69a66d..0785ed9d51b69 100644 --- a/build.gradle +++ b/build.gradle @@ -160,7 +160,7 @@ task verifyVersions { * after the backport of the backcompat code is complete. */ final boolean bwc_tests_enabled = false -final String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/37951" /* place a PR link here when committing bwc changes */ +final String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/37951,https://github.com/elastic/elasticsearch/pull/38180" /* place a PR link here when committing bwc changes */ if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index a740b8c3b41a0..a46b8a3f8a87c 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -21,7 +21,7 @@ type. |======================================================================= |Option |Description| Default -|`tree |deprecated[6.6, PrefixTrees no longer used] Name of the PrefixTree +|`tree` |deprecated[6.6, PrefixTrees no longer used] Name of the PrefixTree implementation to be used: `geohash` for GeohashPrefixTree and `quadtree` for QuadPrefixTree. Note: This parameter is only relevant for `term` and `recursive` strategies. @@ -127,6 +127,20 @@ the `tree` or `strategy` parameters according to the appropriate <>. Note that these parameters are now deprecated and will be removed in a future version. +*IMPORTANT NOTES* + +The following features are not yet supported with the new indexing approach: + +* `geo_shape` query with `MultiPoint` geometry types - Elasticsearch currently prevents searching + geo_shape fields with a MultiPoint geometry type to avoid a brute force linear search + over each individual point. For now, if this is absolutely needed, this can be achieved + using a `bool` query with each individual point. + +* `CONTAINS` relation query - when using the new default vector indexing strategy, `geo_shape` + queries with `relation` defined as `contains` are not yet supported. If this query relation + is an absolute necessity, it is recommended to set `strategy` to `quadtree` and use the + deprecated PrefixTree strategy indexing approach. + [[prefix-trees]] [float] ==== Prefix trees diff --git a/docs/reference/migration/migrate_7_0/settings.asciidoc b/docs/reference/migration/migrate_7_0/settings.asciidoc index 0b18c267748b5..2e5631b378652 100644 --- a/docs/reference/migration/migrate_7_0/settings.asciidoc +++ b/docs/reference/migration/migrate_7_0/settings.asciidoc @@ -138,11 +138,11 @@ used. TLS version 1.0 is now disabled by default as it suffers from https://www.owasp.org/index.php/Transport_Layer_Protection_Cheat_Sheet#Rule_-_Only_Support_Strong_Protocols[known security issues]. -The default protocols are now TLSv1.2 and TLSv1.1. +The default protocols are now TLSv1.3 (if supported), TLSv1.2 and TLSv1.1. You can enable TLS v1.0 by configuring the relevant `ssl.supported_protocols` setting to include `"TLSv1"`, for example: [source,yaml] -------------------------------------------------- -xpack.security.http.ssl.supported_protocols: [ "TLSv1.2", "TLSv1.1", "TLSv1" ] +xpack.security.http.ssl.supported_protocols: [ "TLSv1.3", "TLSv1.2", "TLSv1.1", "TLSv1" ] -------------------------------------------------- [float] diff --git a/docs/reference/ml/apis/find-file-structure.asciidoc b/docs/reference/ml/apis/find-file-structure.asciidoc index 9650efff16189..caed632bda0e5 100644 --- a/docs/reference/ml/apis/find-file-structure.asciidoc +++ b/docs/reference/ml/apis/find-file-structure.asciidoc @@ -606,11 +606,11 @@ If the request does not encounter errors, you receive the following result: }, "tpep_dropoff_datetime" : { "type" : "date", - "format" : "8yyyy-MM-dd HH:mm:ss" + "format" : "yyyy-MM-dd HH:mm:ss" }, "tpep_pickup_datetime" : { "type" : "date", - "format" : "8yyyy-MM-dd HH:mm:ss" + "format" : "yyyy-MM-dd HH:mm:ss" }, "trip_distance" : { "type" : "double" @@ -624,7 +624,7 @@ If the request does not encounter errors, you receive the following result: "field" : "tpep_pickup_datetime", "timezone" : "{{ beat.timezone }}", "formats" : [ - "8yyyy-MM-dd HH:mm:ss" + "yyyy-MM-dd HH:mm:ss" ] } } @@ -1398,7 +1398,7 @@ this: "field" : "timestamp", "timezone" : "{{ beat.timezone }}", "formats" : [ - "8yyyy-MM-dd'T'HH:mm:ss,SSS" + "yyyy-MM-dd'T'HH:mm:ss,SSS" ] } }, @@ -1558,7 +1558,7 @@ this: "field" : "timestamp", "timezone" : "{{ beat.timezone }}", "formats" : [ - "8yyyy-MM-dd'T'HH:mm:ss,SSS" + "yyyy-MM-dd'T'HH:mm:ss,SSS" ] } }, diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 16ce60e986b93..393428373f8c0 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -480,7 +480,8 @@ and `full`. Defaults to `full`. See <> for an explanation of these values. `ssl.supported_protocols`:: -Supported protocols for TLS/SSL (with versions). Defaults to `TLSv1.2,TLSv1.1`. +Supported protocols for TLS/SSL (with versions). Defaults to `TLSv1.3,TLSv1.2,TLSv1.1` if +the JVM supports TLSv1.3, otherwise `TLSv1.2,TLSv1.1`. `ssl.cipher_suites`:: Specifies the cipher suites that should be supported when communicating with the LDAP server. @@ -724,7 +725,8 @@ and `full`. Defaults to `full`. See <> for an explanation of these values. `ssl.supported_protocols`:: -Supported protocols for TLS/SSL (with versions). Defaults to `TLSv1.2, TLSv1.1`. +Supported protocols for TLS/SSL (with versions). Defaults to `TLSv1.3,TLSv1.2,TLSv1.1` if +the JVM supports TLSv1.3, otherwise `TLSv1.2,TLSv1.1`. `ssl.cipher_suites`:: Specifies the cipher suites that should be supported when communicating with the Active Directory server. @@ -1132,7 +1134,8 @@ Defaults to `full`. See <> for a more detailed explanation of these values. `ssl.supported_protocols`:: -Specifies the supported protocols for TLS/SSL. +Specifies the supported protocols for TLS/SSL. Defaults to `TLSv1.3,TLSv1.2,TLSv1.1` if +the JVM supports TLSv1.3, otherwise `TLSv1.2,TLSv1.1`. `ssl.cipher_suites`:: Specifies the @@ -1206,7 +1209,8 @@ settings. For more information, see `ssl.supported_protocols`:: Supported protocols with versions. Valid protocols: `SSLv2Hello`, -`SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`. Defaults to `TLSv1.2`, `TLSv1.1`. +`SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`, `TLSv1.3`. Defaults to `TLSv1.3,TLSv1.2,TLSv1.1` if +the JVM supports TLSv1.3, otherwise `TLSv1.2,TLSv1.1`. + -- NOTE: If `xpack.security.fips_mode.enabled` is `true`, you cannot use `SSLv2Hello` diff --git a/docs/reference/settings/ssl-settings.asciidoc b/docs/reference/settings/ssl-settings.asciidoc index a04f5581f2abd..a4422b8fb2d3c 100644 --- a/docs/reference/settings/ssl-settings.asciidoc +++ b/docs/reference/settings/ssl-settings.asciidoc @@ -11,7 +11,8 @@ endif::server[] +{ssl-prefix}.ssl.supported_protocols+:: Supported protocols with versions. Valid protocols: `SSLv2Hello`, -`SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`. Defaults to `TLSv1.2`, `TLSv1.1`. +`SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`, `TLSv1.3`. Defaults to `TLSv1.3,TLSv1.2,TLSv1.1` if +the JVM supports TLSv1.3, otherwise `TLSv1.2,TLSv1.1`. ifdef::server[] diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java index 146ba916b6b07..68df7d248340d 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java @@ -24,11 +24,14 @@ import javax.net.ssl.X509ExtendedTrustManager; import java.nio.file.Path; import java.security.GeneralSecurityException; -import java.util.Arrays; +import java.security.NoSuchAlgorithmException; import java.util.Collection; import java.util.Collections; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.List; +import java.util.Map; +import java.util.Map.Entry; import java.util.Objects; import java.util.Set; @@ -40,6 +43,30 @@ */ public class SslConfiguration { + /** + * An ordered map of protocol algorithms to SSLContext algorithms. The map is ordered from most + * secure to least secure. The names in this map are taken from the + * + * Java Security Standard Algorithm Names Documentation for Java 11. + */ + static final Map ORDERED_PROTOCOL_ALGORITHM_MAP; + static { + LinkedHashMap protocolAlgorithmMap = new LinkedHashMap<>(); + try { + SSLContext.getInstance("TLSv1.3"); + protocolAlgorithmMap.put("TLSv1.3", "TLSv1.3"); + } catch (NoSuchAlgorithmException e) { + // ignore since we support JVMs that do not support TLSv1.3 + } + protocolAlgorithmMap.put("TLSv1.2", "TLSv1.2"); + protocolAlgorithmMap.put("TLSv1.1", "TLSv1.1"); + protocolAlgorithmMap.put("TLSv1", "TLSv1"); + protocolAlgorithmMap.put("SSLv3", "SSLv3"); + protocolAlgorithmMap.put("SSLv2", "SSL"); + protocolAlgorithmMap.put("SSLv2Hello", "SSL"); + ORDERED_PROTOCOL_ALGORITHM_MAP = Collections.unmodifiableMap(protocolAlgorithmMap); + } + private final SslTrustConfig trustConfig; private final SslKeyConfig keyConfig; private final SslVerificationMode verificationMode; @@ -124,12 +151,13 @@ private String contextProtocol() { if (supportedProtocols.isEmpty()) { throw new SslConfigException("no SSL/TLS protocols have been configured"); } - for (String tryProtocol : Arrays.asList("TLSv1.2", "TLSv1.1", "TLSv1", "SSLv3")) { - if (supportedProtocols.contains(tryProtocol)) { - return tryProtocol; + for (Entry entry : ORDERED_PROTOCOL_ALGORITHM_MAP.entrySet()) { + if (supportedProtocols.contains(entry.getKey())) { + return entry.getValue(); } } - return "SSL"; + throw new SslConfigException("no supported SSL/TLS protocol was found in the configured supported protocols: " + + supportedProtocols); } @Override diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java index efe87f7c30322..6e511565a9f53 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java @@ -26,12 +26,14 @@ import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Objects; import java.util.function.Function; import java.util.stream.Collectors; import static org.elasticsearch.common.ssl.KeyStoreUtil.inferKeyStoreType; +import static org.elasticsearch.common.ssl.SslConfiguration.ORDERED_PROTOCOL_ALGORITHM_MAP; import static org.elasticsearch.common.ssl.SslConfigurationKeys.CERTIFICATE; import static org.elasticsearch.common.ssl.SslConfigurationKeys.CERTIFICATE_AUTHORITIES; import static org.elasticsearch.common.ssl.SslConfigurationKeys.CIPHERS; @@ -68,7 +70,9 @@ */ public abstract class SslConfigurationLoader { - static final List DEFAULT_PROTOCOLS = Arrays.asList("TLSv1.2", "TLSv1.1"); + static final List DEFAULT_PROTOCOLS = Collections.unmodifiableList( + ORDERED_PROTOCOL_ALGORITHM_MAP.containsKey("TLSv1.3") ? + Arrays.asList("TLSv1.3", "TLSv1.2", "TLSv1.1") : Arrays.asList("TLSv1.2", "TLSv1.1")); static final List DEFAULT_CIPHERS = loadDefaultCiphers(); private static final char[] EMPTY_PASSWORD = new char[0]; diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java index 696c4a72bdba8..1c44daea4e982 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java @@ -460,9 +460,13 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws I @Override protected void extractInnerHitBuilders(Map innerHits) { if (innerHitBuilder != null) { + String name = innerHitBuilder.getName() != null ? innerHitBuilder.getName() : type; + if (innerHits.containsKey(name)) { + throw new IllegalArgumentException("[inner_hits] already contains an entry for key [" + name + "]"); + } + Map children = new HashMap<>(); InnerHitContextBuilder.extractInnerHits(query, children); - String name = innerHitBuilder.getName() != null ? innerHitBuilder.getName() : type; InnerHitContextBuilder innerHitContextBuilder = new ParentChildInnerHitContextBuilder(type, true, query, innerHitBuilder, children); innerHits.put(name, innerHitContextBuilder); diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java index e98fdb9e9699d..30a2718aab054 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java @@ -285,9 +285,13 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws I @Override protected void extractInnerHitBuilders(Map innerHits) { if (innerHitBuilder != null) { + String name = innerHitBuilder.getName() != null ? innerHitBuilder.getName() : type; + if (innerHits.containsKey(name)) { + throw new IllegalArgumentException("[inner_hits] already contains an entry for key [" + name + "]"); + } + Map children = new HashMap<>(); InnerHitContextBuilder.extractInnerHits(query, children); - String name = innerHitBuilder.getName() != null ? innerHitBuilder.getName() : type; InnerHitContextBuilder innerHitContextBuilder = new ParentChildInnerHitContextBuilder(type, false, query, innerHitBuilder, children); innerHits.put(name, innerHitContextBuilder); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java index eea01d61386de..2a28e232b5eda 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java @@ -367,4 +367,12 @@ public void testIgnoreUnmappedWithRewrite() throws IOException { assertThat(query, notNullValue()); assertThat(query, instanceOf(MatchNoDocsQuery.class)); } + + public void testExtractInnerHitBuildersWithDuplicate() { + final HasChildQueryBuilder queryBuilder + = new HasChildQueryBuilder(CHILD_DOC, new WrapperQueryBuilder(new MatchAllQueryBuilder().toString()), ScoreMode.None); + queryBuilder.innerHit(new InnerHitBuilder("some_name")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> InnerHitContextBuilder.extractInnerHits(queryBuilder, Collections.singletonMap("some_name", null))); + } } diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java index 164405f653444..ea77ad80799ba 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java @@ -268,4 +268,12 @@ public void testIgnoreUnmappedWithRewrite() throws IOException { assertThat(query, notNullValue()); assertThat(query, instanceOf(MatchNoDocsQuery.class)); } + + public void testExtractInnerHitBuildersWithDuplicate() { + final HasParentQueryBuilder queryBuilder + = new HasParentQueryBuilder(CHILD_DOC, new WrapperQueryBuilder(new MatchAllQueryBuilder().toString()), false); + queryBuilder.innerHit(new InnerHitBuilder("some_name")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> InnerHitContextBuilder.extractInnerHits(queryBuilder, Collections.singletonMap("some_name", null))); + } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRestClientSslTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRestClientSslTests.java index f71d124986699..87ab4b3241410 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRestClientSslTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRestClientSslTests.java @@ -143,6 +143,7 @@ public void testClientSucceedsWithCertificateAuthorities() throws IOException { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/38212") public void testClientSucceedsWithVerificationDisabled() throws IOException { assertFalse("Cannot disable verification in FIPS JVM", inFipsJvm()); final List threads = new ArrayList<>(); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml index 13cb3321841cf..7c6136d273979 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml @@ -55,8 +55,8 @@ "PUT mapping with _doc on an index that has types": - skip: - version: " - 6.6.99" - reason: include_type_name is only supported as of 6.7 + version: "all" + reason: include_type_name is only supported as of 6.7 # AwaitsFix: https://github.com/elastic/elasticsearch/issues/38202 - do: diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 602a7123d0014..64627ee4977ea 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -205,17 +205,14 @@ public SearchRequest(StreamInput in) throws IOException { localClusterAlias = in.readOptionalString(); if (localClusterAlias != null) { absoluteStartMillis = in.readVLong(); + finalReduce = in.readBoolean(); } else { absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; + finalReduce = true; } } else { localClusterAlias = null; absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; - } - //TODO move to the 6_7_0 branch once backported to 6.x - if (in.getVersion().onOrAfter(Version.V_7_0_0)) { - finalReduce = in.readBoolean(); - } else { finalReduce = true; } if (in.getVersion().onOrAfter(Version.V_7_0_0)) { @@ -245,12 +242,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(localClusterAlias); if (localClusterAlias != null) { out.writeVLong(absoluteStartMillis); + out.writeBoolean(finalReduce); } } - //TODO move to the 6_7_0 branch once backported to 6.x - if (out.getVersion().onOrAfter(Version.V_7_0_0)) { - out.writeBoolean(finalReduce); - } if (out.getVersion().onOrAfter(Version.V_7_0_0)) { out.writeBoolean(ccsMinimizeRoundtrips); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index d73d33a0635c0..231f5555e8ff1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -1131,6 +1131,21 @@ public Iterable getFoundPeers() { return peerFinder.getFoundPeers(); } + /** + * If there is any current committed publication, this method cancels it. + * This method is used exclusively by tests. + * @return true if publication was cancelled, false if there is no current committed publication. + */ + boolean cancelCommittedPublication() { + synchronized (mutex) { + if (currentPublication.isPresent() && currentPublication.get().isCommitted()) { + currentPublication.get().cancel("cancelCommittedPublication"); + return true; + } + return false; + } + } + class CoordinatorPublication extends Publication { private final PublishRequest publishRequest; diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java index 6b15d1f24581d..aacda43864e51 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java @@ -147,16 +147,7 @@ public boolean match(DiscoveryNode node) { } } else if ("_host".equals(attr)) { for (String value : values) { - if (Regex.simpleMatch(value, node.getHostName())) { - if (opType == OpType.OR) { - return true; - } - } else { - if (opType == OpType.AND) { - return false; - } - } - if (Regex.simpleMatch(value, node.getHostAddress())) { + if (Regex.simpleMatch(value, node.getHostName()) || Regex.simpleMatch(value, node.getHostAddress())) { if (opType == OpType.OR) { return true; } diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index 3c3856e208f04..ee8062308ac11 100644 --- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -317,10 +317,14 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws @Override public void extractInnerHitBuilders(Map innerHits) { if (innerHitBuilder != null) { + String name = innerHitBuilder.getName() != null ? innerHitBuilder.getName() : path; + if (innerHits.containsKey(name)) { + throw new IllegalArgumentException("[inner_hits] already contains an entry for key [" + name + "]"); + } + Map children = new HashMap<>(); InnerHitContextBuilder.extractInnerHits(query, children); InnerHitContextBuilder innerHitContextBuilder = new NestedInnerHitContextBuilder(path, query, innerHitBuilder, children); - String name = innerHitBuilder.getName() != null ? innerHitBuilder.getName() : path; innerHits.put(name, innerHitContextBuilder); } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index b8fa8c6f1a9c8..49fd26c070af1 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -314,6 +314,12 @@ public ClusterState execute(ClusterState currentState) { currentIndexMetaData.getMappingVersion() + 1)); indexMdBuilder.settingsVersion(Math.max(snapshotIndexMetaData.getSettingsVersion(), currentIndexMetaData.getSettingsVersion() + 1)); + + for (int shard = 0; shard < snapshotIndexMetaData.getNumberOfShards(); shard++) { + indexMdBuilder.primaryTerm(shard, + Math.max(snapshotIndexMetaData.primaryTerm(shard), currentIndexMetaData.primaryTerm(shard))); + } + if (!request.includeAliases()) { // Remove all snapshot aliases if (!snapshotIndexMetaData.getAliases().isEmpty()) { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index c139b75f45c42..df9725ce89bff 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -91,14 +91,10 @@ public void testRandomVersionSerialization() throws IOException { if (version.before(Version.V_6_7_0)) { assertNull(deserializedRequest.getLocalClusterAlias()); assertAbsoluteStartMillisIsCurrentTime(deserializedRequest); + assertTrue(deserializedRequest.isFinalReduce()); } else { assertEquals(searchRequest.getLocalClusterAlias(), deserializedRequest.getLocalClusterAlias()); assertEquals(searchRequest.getOrCreateAbsoluteStartMillis(), deserializedRequest.getOrCreateAbsoluteStartMillis()); - } - //TODO move to the 6_7_0 branch once backported to 6.x - if (version.before(Version.V_7_0_0)) { - assertTrue(deserializedRequest.isFinalReduce()); - } else { assertEquals(searchRequest.isFinalReduce(), deserializedRequest.isFinalReduce()); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java index ed14d11946f75..b0980481d38e2 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java @@ -171,6 +171,7 @@ public void testFinalReduce() { assertEquals(2, searchResponse.getHits().getTotalHits().value); Aggregations aggregations = searchResponse.getAggregations(); LongTerms longTerms = aggregations.get("terms"); + assertEquals(2, longTerms.getBuckets().size()); } } } diff --git a/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java similarity index 73% rename from server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java rename to server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java index d2f65d1168da8..49b4086372d21 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java @@ -17,12 +17,14 @@ * under the License. */ -package org.elasticsearch.indices.state; +package org.elasticsearch.cluster.coordination; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterState; @@ -40,7 +42,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.Discovery; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; @@ -51,10 +53,9 @@ import org.elasticsearch.test.disruption.BlockClusterStateProcessing; import org.elasticsearch.test.junit.annotations.TestLogging; -import java.util.Arrays; import java.util.List; import java.util.Map; -import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -86,7 +87,7 @@ protected int numberOfReplicas() { return 0; } - public void testAssignmentWithJustAddedNodes() throws Exception { + public void testAssignmentWithJustAddedNodes() { internalCluster().startNode(); final String index = "index"; prepareCreate(index).setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) @@ -149,22 +150,20 @@ public void onFailure(String source, Exception e) { }); } + private ActionFuture executeAndCancelCommittedPublication( + ActionRequestBuilder req) throws Exception { + ActionFuture future = req.execute(); + assertBusy(() -> assertTrue(((Coordinator)internalCluster().getMasterNodeInstance(Discovery.class)).cancelCommittedPublication())); + return future; + } + public void testDeleteCreateInOneBulk() throws Exception { - internalCluster().startMasterOnlyNode(Settings.builder() - .put(TestZenDiscovery.USE_ZEN2.getKey(), false) // TODO: convert test to support Zen2 - .build()); - String dataNode = internalCluster().startDataOnlyNode(Settings.builder() - .put(TestZenDiscovery.USE_ZEN2.getKey(), false) // TODO: convert test to support Zen2 - .build()); + internalCluster().startMasterOnlyNode(); + String dataNode = internalCluster().startDataOnlyNode(); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)).addMapping("type").get(); ensureGreen("test"); - // now that the cluster is stable, remove publishing timeout - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0") - .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s"))); - // block none master node. BlockClusterStateProcessing disruption = new BlockClusterStateProcessing(dataNode, random()); internalCluster().setDisruptionScheme(disruption); @@ -173,10 +172,14 @@ public void testDeleteCreateInOneBulk() throws Exception { refresh(); disruption.startDisrupting(); logger.info("--> delete index and recreate it"); - assertFalse(client().admin().indices().prepareDelete("test").setTimeout("200ms").get().isAcknowledged()); - assertFalse(prepareCreate("test").setTimeout("200ms").setSettings(Settings.builder().put(IndexMetaData - .SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), "0")).get().isAcknowledged()); + executeAndCancelCommittedPublication(client().admin().indices().prepareDelete("test").setTimeout("0s")) + .get(10, TimeUnit.SECONDS); + executeAndCancelCommittedPublication(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData + .SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), "0")).setTimeout("0s")) + .get(10, TimeUnit.SECONDS); + logger.info("--> letting cluster proceed"); + disruption.stopDisrupting(); ensureGreen(TimeValue.timeValueMinutes(30), "test"); // due to publish_timeout of 0, wait for data node to have cluster state fully applied @@ -196,12 +199,7 @@ public void testDelayedMappingPropagationOnPrimary() throws Exception { // but the change might not be on the node that performed the indexing // operation yet - Settings settings = Settings.builder() - .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // explicitly set so it won't default to publish timeout - .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait post commit as we are blocking things by design - .put(TestZenDiscovery.USE_ZEN2.getKey(), false) // TODO: convert test to support Zen2 - .build(); - final List nodeNames = internalCluster().startNodes(2, settings); + final List nodeNames = internalCluster().startNodes(2); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); final String master = internalCluster().getMasterName(); @@ -242,19 +240,10 @@ public void testDelayedMappingPropagationOnPrimary() throws Exception { disruption.startDisrupting(); // Add a new mapping... - final AtomicReference putMappingResponse = new AtomicReference<>(); - client().admin().indices().preparePutMapping("index").setType("type").setSource("field", "type=long").execute( - new ActionListener() { - @Override - public void onResponse(AcknowledgedResponse response) { - putMappingResponse.set(response); - } + ActionFuture putMappingResponse = + executeAndCancelCommittedPublication(client().admin().indices().preparePutMapping("index") + .setType("type").setSource("field", "type=long")); - @Override - public void onFailure(Exception e) { - putMappingResponse.set(e); - } - }); // ...and wait for mappings to be available on master assertBusy(() -> { ImmutableOpenMap indexMappings = client().admin().indices() @@ -273,36 +262,24 @@ public void onFailure(Exception e) { assertNotNull(fieldMapping); }); - final AtomicReference docIndexResponse = new AtomicReference<>(); - client().prepareIndex("index", "type", "1").setSource("field", 42).execute(new ActionListener() { - @Override - public void onResponse(IndexResponse response) { - docIndexResponse.set(response); - } - - @Override - public void onFailure(Exception e) { - docIndexResponse.set(e); - } - }); + // this request does not change the cluster state, because mapping is already created, + // we don't await and cancel committed publication + ActionFuture docIndexResponse = + client().prepareIndex("index", "type", "1").setSource("field", 42).execute(); // Wait a bit to make sure that the reason why we did not get a response // is that cluster state processing is blocked and not just that it takes // time to process the indexing request Thread.sleep(100); - assertThat(putMappingResponse.get(), equalTo(null)); - assertThat(docIndexResponse.get(), equalTo(null)); + assertFalse(putMappingResponse.isDone()); + assertFalse(docIndexResponse.isDone()); // Now make sure the indexing request finishes successfully disruption.stopDisrupting(); assertBusy(() -> { - assertThat(putMappingResponse.get(), instanceOf(AcknowledgedResponse.class)); - AcknowledgedResponse resp = (AcknowledgedResponse) putMappingResponse.get(); - assertTrue(resp.isAcknowledged()); - assertThat(docIndexResponse.get(), instanceOf(IndexResponse.class)); - IndexResponse docResp = (IndexResponse) docIndexResponse.get(); - assertEquals(Arrays.toString(docResp.getShardInfo().getFailures()), - 1, docResp.getShardInfo().getTotal()); + assertTrue(putMappingResponse.get(10, TimeUnit.SECONDS).isAcknowledged()); + assertThat(docIndexResponse.get(10, TimeUnit.SECONDS), instanceOf(IndexResponse.class)); + assertEquals(1, docIndexResponse.get(10, TimeUnit.SECONDS).getShardInfo().getTotal()); }); } @@ -312,12 +289,7 @@ public void testDelayedMappingPropagationOnReplica() throws Exception { // Here we want to test that everything goes well if the mappings that // are needed for a document are not available on the replica at the // time of indexing it - final List nodeNames = internalCluster().startNodes(2, - Settings.builder() - .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // explicitly set so it won't default to publish timeout - .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait post commit as we are blocking things by design - .put(TestZenDiscovery.USE_ZEN2.getKey(), false) // TODO: convert test to support Zen2 - .build()); + final List nodeNames = internalCluster().startNodes(2); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); final String master = internalCluster().getMasterName(); @@ -359,19 +331,10 @@ public void testDelayedMappingPropagationOnReplica() throws Exception { BlockClusterStateProcessing disruption = new BlockClusterStateProcessing(otherNode, random()); internalCluster().setDisruptionScheme(disruption); disruption.startDisrupting(); - final AtomicReference putMappingResponse = new AtomicReference<>(); - client().admin().indices().preparePutMapping("index").setType("type").setSource("field", "type=long").execute( - new ActionListener() { - @Override - public void onResponse(AcknowledgedResponse response) { - putMappingResponse.set(response); - } + final ActionFuture putMappingResponse = + executeAndCancelCommittedPublication(client().admin().indices().preparePutMapping("index") + .setType("type").setSource("field", "type=long")); - @Override - public void onFailure(Exception e) { - putMappingResponse.set(e); - } - }); final Index index = resolveIndex("index"); // Wait for mappings to be available on master assertBusy(() -> { @@ -384,25 +347,17 @@ public void onFailure(Exception e) { assertNotNull(mapper.mappers().getMapper("field")); }); - final AtomicReference docIndexResponse = new AtomicReference<>(); - client().prepareIndex("index", "type", "1").setSource("field", 42).execute(new ActionListener() { - @Override - public void onResponse(IndexResponse response) { - docIndexResponse.set(response); - } - - @Override - public void onFailure(Exception e) { - docIndexResponse.set(e); - } - }); + final ActionFuture docIndexResponse = client().prepareIndex("index", "type", "1").setSource("field", 42).execute(); assertBusy(() -> assertTrue(client().prepareGet("index", "type", "1").get().isExists())); // index another document, this time using dynamic mappings. // The ack timeout of 0 on dynamic mapping updates makes it possible for the document to be indexed on the primary, even // if the dynamic mapping update is not applied on the replica yet. - ActionFuture dynamicMappingsFut = client().prepareIndex("index", "type", "2").setSource("field2", 42).execute(); + // this request does not change the cluster state, because the mapping is dynamic, + // we need to await and cancel committed publication + ActionFuture dynamicMappingsFut = + executeAndCancelCommittedPublication(client().prepareIndex("index", "type", "2").setSource("field2", 42)); // ...and wait for second mapping to be available on master assertBusy(() -> { @@ -421,22 +376,18 @@ public void onFailure(Exception e) { // We wait on purpose to make sure that the document is not indexed because the shard operation is stalled // and not just because it takes time to replicate the indexing request to the replica Thread.sleep(100); - assertThat(putMappingResponse.get(), equalTo(null)); - assertThat(docIndexResponse.get(), equalTo(null)); + assertFalse(putMappingResponse.isDone()); + assertFalse(docIndexResponse.isDone()); // Now make sure the indexing request finishes successfully disruption.stopDisrupting(); assertBusy(() -> { - assertThat(putMappingResponse.get(), instanceOf(AcknowledgedResponse.class)); - AcknowledgedResponse resp = (AcknowledgedResponse) putMappingResponse.get(); - assertTrue(resp.isAcknowledged()); - assertThat(docIndexResponse.get(), instanceOf(IndexResponse.class)); - IndexResponse docResp = (IndexResponse) docIndexResponse.get(); - assertEquals(Arrays.toString(docResp.getShardInfo().getFailures()), - 2, docResp.getShardInfo().getTotal()); // both shards should have succeeded + assertTrue(putMappingResponse.get(10, TimeUnit.SECONDS).isAcknowledged()); + assertThat(docIndexResponse.get(10, TimeUnit.SECONDS), instanceOf(IndexResponse.class)); + assertEquals(2, docIndexResponse.get(10, TimeUnit.SECONDS).getShardInfo().getTotal()); // both shards should have succeeded }); - assertThat(dynamicMappingsFut.get().getResult(), equalTo(CREATED)); + assertThat(dynamicMappingsFut.get(10, TimeUnit.SECONDS).getResult(), equalTo(CREATED)); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java index d6e6d1691a042..b22518a2e52b2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java @@ -235,6 +235,26 @@ public void testIpPublishFilteringMatchingOr() { assertThat(filters.match(node), equalTo(true)); } + public void testHostNameFilteringMatchingAnd() { + Settings settings = shuffleSettings(Settings.builder() + .put("xxx._host", "A") + .build()); + DiscoveryNodeFilters filters = buildFromSettings(AND, "xxx.", settings); + + DiscoveryNode node = new DiscoveryNode("", "", "", "A", "192.1.1.54", localAddress, emptyMap(), emptySet(), null); + assertThat(filters.match(node), equalTo(true)); + } + + public void testHostAddressFilteringMatchingAnd() { + Settings settings = shuffleSettings(Settings.builder() + .put("xxx._host", "192.1.1.54") + .build()); + DiscoveryNodeFilters filters = buildFromSettings(AND, "xxx.", settings); + + DiscoveryNode node = new DiscoveryNode("", "", "", "A", "192.1.1.54", localAddress, emptyMap(), emptySet(), null); + assertThat(filters.match(node), equalTo(true)); + } + public void testIpPublishFilteringNotMatchingOr() { Settings settings = shuffleSettings(Settings.builder() .put("xxx.tag", "A") diff --git a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java index ac9ae8d0fa7fb..a3b6376a048f2 100644 --- a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java @@ -41,6 +41,7 @@ import org.hamcrest.Matchers; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -354,4 +355,12 @@ public void testBuildIgnoreUnmappedNestQuery() throws Exception { nestedContextBuilder.build(searchContext, innerHitsContext); assertThat(innerHitsContext.getInnerHits().size(), Matchers.equalTo(0)); } + + public void testExtractInnerHitBuildersWithDuplicate() { + final NestedQueryBuilder queryBuilder + = new NestedQueryBuilder("path", new WrapperQueryBuilder(new MatchAllQueryBuilder().toString()), ScoreMode.None); + queryBuilder.innerHit(new InnerHitBuilder("some_name")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> InnerHitContextBuilder.extractInnerHits(queryBuilder,Collections.singletonMap("some_name", null))); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java index d68c85ab652ae..14fa6a9f565ef 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java @@ -21,10 +21,13 @@ import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.InnerHitBuilder; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; @@ -46,6 +49,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.nestedQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; @@ -57,6 +61,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; @@ -674,4 +679,46 @@ public void testFilterAggInsideNestedAgg() throws Exception { numStringParams = bucket.getAggregations().get("num_string_params"); assertThat(numStringParams.getDocCount(), equalTo(0L)); } + + public void testExtractInnerHitBuildersWithDuplicateHitName() throws Exception { + assertAcked( + prepareCreate("idxduplicatehitnames") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) + .addMapping("product", "categories", "type=keyword", "name", "type=text", "property", "type=nested") + ); + ensureGreen("idxduplicatehitnames"); + + SearchRequestBuilder searchRequestBuilder = client() + .prepareSearch("idxduplicatehitnames") + .setQuery(boolQuery() + .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder("ih1"))) + .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder("ih2"))) + .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder("ih1")))); + + assertFailures( + searchRequestBuilder, + RestStatus.BAD_REQUEST, + containsString("[inner_hits] already contains an entry for key [ih1]")); + } + + public void testExtractInnerHitBuildersWithDuplicatePath() throws Exception { + assertAcked( + prepareCreate("idxnullhitnames") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) + .addMapping("product", "categories", "type=keyword", "name", "type=text", "property", "type=nested") + ); + ensureGreen("idxnullhitnames"); + + SearchRequestBuilder searchRequestBuilder = client() + .prepareSearch("idxnullhitnames") + .setQuery(boolQuery() + .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder())) + .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder())) + .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder()))); + + assertFailures( + searchRequestBuilder, + RestStatus.BAD_REQUEST, + containsString("[inner_hits] already contains an entry for key [property]")); + } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 1a1b886e0e373..d633493622dcd 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -116,6 +116,7 @@ import java.util.function.Consumer; import java.util.function.Predicate; import java.util.stream.Collectors; +import java.util.stream.IntStream; import java.util.stream.Stream; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; @@ -3704,6 +3705,48 @@ public void testAbortedSnapshotDuringInitDoesNotStart() throws Exception { } } + public void testRestoreIncreasesPrimaryTerms() { + final String indexName = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); + createIndex(indexName, Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 2) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build()); + ensureGreen(indexName); + + if (randomBoolean()) { + // open and close the index to increase the primary terms + for (int i = 0; i < randomInt(3); i++) { + assertAcked(client().admin().indices().prepareClose(indexName)); + assertAcked(client().admin().indices().prepareOpen(indexName)); + } + } + + final IndexMetaData indexMetaData = client().admin().cluster().prepareState().clear().setIndices(indexName) + .setMetaData(true).get().getState().metaData().index(indexName); + final int numPrimaries = getNumShards(indexName).numPrimaries; + final Map primaryTerms = IntStream.range(0, numPrimaries) + .boxed().collect(Collectors.toMap(shardId -> shardId, indexMetaData::primaryTerm)); + + assertAcked(client().admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings(randomRepoSettings())); + final CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true).setIndices(indexName).get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(numPrimaries)); + assertThat(createSnapshotResponse.getSnapshotInfo().failedShards(), equalTo(0)); + + assertAcked(client().admin().indices().prepareClose(indexName)); + + final RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true).get(); + assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(numPrimaries)); + assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); + + final IndexMetaData restoredIndexMetaData = client().admin().cluster().prepareState().clear().setIndices(indexName) + .setMetaData(true).get().getState().metaData().index(indexName); + for (int shardId = 0; shardId < numPrimaries; shardId++) { + assertThat(restoredIndexMetaData.primaryTerm(shardId), equalTo(primaryTerms.get(shardId) + 1)); + } + } + private RepositoryData getRepositoryData(Repository repository) throws InterruptedException { ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, internalCluster().getMasterName()); final SetOnce repositoryData = new SetOnce<>(); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index 2dccc0e96b7a2..3a13027cb3511 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -487,14 +487,15 @@ private Map> getDocIdAndSeqNos(InternalTestClus return docs; } - protected void atLeastDocsIndexed(Client client, String index, long numDocsReplicated) throws InterruptedException { + protected void atLeastDocsIndexed(Client client, String index, long numDocsReplicated) throws Exception { logger.info("waiting for at least [{}] documents to be indexed into index [{}]", numDocsReplicated, index); - awaitBusy(() -> { + assertBusy(() -> { refresh(client, index); SearchRequest request = new SearchRequest(index); request.source(new SearchSourceBuilder().size(0)); SearchResponse response = client.search(request).actionGet(); - return response.getHits().getTotalHits().value >= numDocsReplicated; + assertNotNull(response.getHits().getTotalHits()); + assertThat(response.getHits().getTotalHits().value, greaterThanOrEqualTo(numDocsReplicated)); }, 60, TimeUnit.SECONDS); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index 6a2a693d3b15e..dd18e3b319468 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.xpack.core.security.SecurityField; @@ -16,6 +17,7 @@ import javax.crypto.Cipher; import javax.crypto.SecretKeyFactory; +import javax.net.ssl.SSLContext; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; @@ -154,7 +156,20 @@ private XPackSettings() { } }, Setting.Property.NodeScope); - public static final List DEFAULT_SUPPORTED_PROTOCOLS = Arrays.asList("TLSv1.2", "TLSv1.1"); + public static final List DEFAULT_SUPPORTED_PROTOCOLS; + + static { + boolean supportsTLSv13 = false; + try { + SSLContext.getInstance("TLSv1.3"); + supportsTLSv13 = true; + } catch (NoSuchAlgorithmException e) { + LogManager.getLogger(XPackSettings.class).debug("TLSv1.3 is not supported", e); + } + DEFAULT_SUPPORTED_PROTOCOLS = supportsTLSv13 ? + Arrays.asList("TLSv1.3", "TLSv1.2", "TLSv1.1") : Arrays.asList("TLSv1.2", "TLSv1.1"); + } + public static final SSLClientAuth CLIENT_AUTH_DEFAULT = SSLClientAuth.REQUIRED; public static final SSLClientAuth HTTP_CLIENT_AUTH_DEFAULT = SSLClientAuth.NONE; public static final VerificationMode VERIFICATION_MODE_DEFAULT = VerificationMode.FULL; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index 2c30b5fe1affe..9cb25f6a221d0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -53,7 +53,7 @@ private static Map initializeReservedRoles() { null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null)) .put("monitoring_user", new RoleDescriptor("monitoring_user", - new String[] { "cluster:monitor/main" }, + new String[] { "cluster:monitor/main", "cluster:monitor/xpack/info" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices(".monitoring-*").privileges("read", "read_cross_cluster").build() diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java index e832de629359a..3611b6663a38f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java @@ -46,6 +46,7 @@ import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -56,6 +57,8 @@ import java.util.function.Supplier; import java.util.stream.Collectors; +import static org.elasticsearch.xpack.core.XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS; + /** * Provides access to {@link SSLEngine} and {@link SSLSocketFactory} objects based on a provided configuration. All * configurations loaded by this service must be configured on construction. @@ -63,6 +66,26 @@ public class SSLService { private static final Logger logger = LogManager.getLogger(SSLService.class); + /** + * An ordered map of protocol algorithms to SSLContext algorithms. The map is ordered from most + * secure to least secure. The names in this map are taken from the + * + * Java Security Standard Algorithm Names Documentation for Java 11. + */ + private static final Map ORDERED_PROTOCOL_ALGORITHM_MAP; + static { + LinkedHashMap protocolAlgorithmMap = new LinkedHashMap<>(); + if (DEFAULT_SUPPORTED_PROTOCOLS.contains("TLSv1.3")) { + protocolAlgorithmMap.put("TLSv1.3", "TLSv1.3"); + } + protocolAlgorithmMap.put("TLSv1.2", "TLSv1.2"); + protocolAlgorithmMap.put("TLSv1.1", "TLSv1.1"); + protocolAlgorithmMap.put("TLSv1", "TLSv1"); + protocolAlgorithmMap.put("SSLv3", "SSLv3"); + protocolAlgorithmMap.put("SSLv2", "SSL"); + protocolAlgorithmMap.put("SSLv2Hello", "SSL"); + ORDERED_PROTOCOL_ALGORITHM_MAP = Collections.unmodifiableMap(protocolAlgorithmMap); + } private final Settings settings; @@ -691,47 +714,19 @@ public SSLConfiguration getSSLConfiguration(String contextName) { /** * Maps the supported protocols to an appropriate ssl context algorithm. We make an attempt to use the "best" algorithm when * possible. The names in this method are taken from the - * JCA Standard Algorithm Name - * Documentation for Java 8. + * Java Security + * Standard Algorithm Names Documentation for Java 11. */ private static String sslContextAlgorithm(List supportedProtocols) { if (supportedProtocols.isEmpty()) { - return "TLSv1.2"; - } - - String algorithm = "SSL"; - for (String supportedProtocol : supportedProtocols) { - switch (supportedProtocol) { - case "TLSv1.2": - return "TLSv1.2"; - case "TLSv1.1": - if ("TLSv1.2".equals(algorithm) == false) { - algorithm = "TLSv1.1"; - } - break; - case "TLSv1": - switch (algorithm) { - case "TLSv1.2": - case "TLSv1.1": - break; - default: - algorithm = "TLSv1"; - } - break; - case "SSLv3": - switch (algorithm) { - case "SSLv2": - case "SSL": - algorithm = "SSLv3"; - } - break; - case "SSLv2": - case "SSLv2Hello": - break; - default: - throw new IllegalArgumentException("found unexpected value in supported protocols: " + supportedProtocol); + throw new IllegalArgumentException("no SSL/TLS protocols have been configured"); + } + for (Entry entry : ORDERED_PROTOCOL_ALGORITHM_MAP.entrySet()) { + if (supportedProtocols.contains(entry.getKey())) { + return entry.getValue(); } } - return algorithm; + throw new IllegalArgumentException("no supported SSL/TLS protocol was found in the configured supported protocols: " + + supportedProtocols); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java index 7689ae4088f34..004b46897a48e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java @@ -9,9 +9,11 @@ import org.elasticsearch.test.ESTestCase; import javax.crypto.Cipher; import javax.crypto.SecretKeyFactory; +import javax.net.ssl.SSLContext; import java.security.NoSuchAlgorithmException; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.not; @@ -48,6 +50,16 @@ public void testPasswordHashingAlgorithmSettingValidation() { Settings.builder().put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), bcryptAlgo).build())); } + public void testDefaultSupportedProtocolsWithTLSv13() throws Exception { + assumeTrue("current JVM does not support TLSv1.3", supportTLSv13()); + assertThat(XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS, contains("TLSv1.3", "TLSv1.2", "TLSv1.1")); + } + + public void testDefaultSupportedProtocolsWithoutTLSv13() throws Exception { + assumeFalse("current JVM supports TLSv1.3", supportTLSv13()); + assertThat(XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS, contains("TLSv1.2", "TLSv1.1")); + } + private boolean isSecretkeyFactoryAlgoAvailable(String algorithmId) { try { SecretKeyFactory.getInstance(algorithmId); @@ -56,4 +68,13 @@ private boolean isSecretkeyFactoryAlgoAvailable(String algorithmId) { return false; } } + + private boolean supportTLSv13() { + try { + SSLContext.getInstance("TLSv1.3"); + return true; + } catch (NoSuchAlgorithmException e) { + return false; + } + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 35e2043acd809..f0da0c5775e1f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -47,6 +47,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.DeleteCalendarAction; @@ -405,6 +406,7 @@ public void testMonitoringUserRole() { Role monitoringUserRole = Role.builder(roleDescriptor, null).build(); assertThat(monitoringUserRole.cluster().check(MainAction.NAME, request), is(true)); + assertThat(monitoringUserRole.cluster().check(XPackInfoAction.NAME, request), is(true)); assertThat(monitoringUserRole.cluster().check(ClusterHealthAction.NAME, request), is(false)); assertThat(monitoringUserRole.cluster().check(ClusterStateAction.NAME, request), is(false)); assertThat(monitoringUserRole.cluster().check(ClusterStatsAction.NAME, request), is(false)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java index 318d8e4150a1d..6857d8a0456e3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java @@ -26,7 +26,6 @@ import javax.net.ssl.SSLContext; import javax.net.ssl.SSLHandshakeException; - import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -263,7 +262,7 @@ public void testReloadingPEMTrustConfig() throws Exception { try (MockWebServer server = getSslServer(serverKeyPath, serverCertPath, "testnode")) { final Consumer trustMaterialPreChecks = (context) -> { try (CloseableHttpClient client = HttpClients.custom().setSSLContext(context).build()) { - privilegedConnect(() -> client.execute(new HttpGet("https://localhost:" + server.getPort())).close()); + privilegedConnect(() -> client.execute(new HttpGet("https://localhost:" + server.getPort())));//.close()); } catch (Exception e) { throw new RuntimeException("Exception connecting to the mock server", e); } @@ -480,7 +479,9 @@ private static MockWebServer getSslServer(Path keyStorePath, String keyStorePass try (InputStream is = Files.newInputStream(keyStorePath)) { keyStore.load(is, keyStorePass.toCharArray()); } - final SSLContext sslContext = new SSLContextBuilder().loadKeyMaterial(keyStore, keyStorePass.toCharArray()) + final SSLContext sslContext = new SSLContextBuilder() + .loadKeyMaterial(keyStore, keyStorePass.toCharArray()) + .setProtocol("TLSv1.2") .build(); MockWebServer server = new MockWebServer(sslContext, false); server.enqueue(new MockResponse().setResponseCode(200).setBody("body")); @@ -494,7 +495,9 @@ private static MockWebServer getSslServer(Path keyPath, Path certPath, String pa keyStore.load(null, password.toCharArray()); keyStore.setKeyEntry("testnode_ec", PemUtils.readPrivateKey(keyPath, password::toCharArray), password.toCharArray(), CertParsingUtils.readCertificates(Collections.singletonList(certPath))); - final SSLContext sslContext = new SSLContextBuilder().loadKeyMaterial(keyStore, password.toCharArray()) + final SSLContext sslContext = new SSLContextBuilder() + .loadKeyMaterial(keyStore, password.toCharArray()) + .setProtocol("TLSv1.2") .build(); MockWebServer server = new MockWebServer(sslContext, false); server.enqueue(new MockResponse().setResponseCode(200).setBody("body")); @@ -509,7 +512,10 @@ private static CloseableHttpClient getSSLClient(Path trustStorePath, String trus try (InputStream is = Files.newInputStream(trustStorePath)) { trustStore.load(is, trustStorePass.toCharArray()); } - final SSLContext sslContext = new SSLContextBuilder().loadTrustMaterial(trustStore, null).build(); + final SSLContext sslContext = new SSLContextBuilder() + .loadTrustMaterial(trustStore, null) + .setProtocol("TLSv1.2") + .build(); return HttpClients.custom().setSSLContext(sslContext).build(); } @@ -526,7 +532,10 @@ private static CloseableHttpClient getSSLClient(List trustedCertificatePat for (Certificate cert : CertParsingUtils.readCertificates(trustedCertificatePaths)) { trustStore.setCertificateEntry(cert.toString(), cert); } - final SSLContext sslContext = new SSLContextBuilder().loadTrustMaterial(trustStore, null).build(); + final SSLContext sslContext = new SSLContextBuilder() + .loadTrustMaterial(trustStore, null) + .setProtocol("TLSv1.2") + .build(); return HttpClients.custom().setSSLContext(sslContext).build(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java index 1076533660273..1a8aea05c458b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ml.action; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.FailedNodeException; @@ -16,6 +17,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -272,7 +274,12 @@ protected void taskOperation(CloseJobAction.Request request, TransportOpenJobAct threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { - listener.onFailure(e); + if (e instanceof ResourceNotFoundException && Strings.isAllOrWildcard(new String[]{request.getJobId()})) { + jobTask.closeJob("close job (api)"); + listener.onResponse(new CloseJobAction.Response(true)); + } else { + listener.onFailure(e); + } } @Override @@ -332,7 +339,10 @@ public void onResponse(PersistentTasksCustomMetaData.PersistentTask task) { @Override public void onFailure(Exception e) { final int slot = counter.incrementAndGet(); - failures.set(slot - 1, e); + if ((e instanceof ResourceNotFoundException && + Strings.isAllOrWildcard(new String[]{request.getJobId()})) == false) { + failures.set(slot - 1, e); + } if (slot == numberOfJobs) { sendResponseOrFailure(request.getJobId(), listener, failures); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java index 63c47996881c2..636138a855bce 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -187,7 +188,10 @@ public void onResponse(PersistentTasksCustomMetaData.PersistentTask persisten @Override public void onFailure(Exception e) { final int slot = counter.incrementAndGet(); - failures.set(slot - 1, e); + if ((e instanceof ResourceNotFoundException && + Strings.isAllOrWildcard(new String[]{request.getDatafeedId()})) == false) { + failures.set(slot - 1, e); + } if (slot == startedDatafeeds.size()) { sendResponseOrFailure(request.getDatafeedId(), listener, failures); } @@ -215,7 +219,13 @@ protected void taskOperation(StopDatafeedAction.Request request, TransportStartD threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { - listener.onFailure(e); + if ((e instanceof ResourceNotFoundException && + Strings.isAllOrWildcard(new String[]{request.getDatafeedId()}))) { + datafeedTask.stop("stop_datafeed (api)", request.getStopTimeout()); + listener.onResponse(new StopDatafeedAction.Response(true)); + } else { + listener.onFailure(e); + } } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java index ba22b170ecea0..9172de9dedaa5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java @@ -353,7 +353,7 @@ public static Map makeIngestPipelineDefinition(String grokPatter if (needClientTimezone) { dateProcessorSettings.put("timezone", "{{ " + BEAT_TIMEZONE_FIELD + " }}"); } - dateProcessorSettings.put("formats", jodaBwcJavaTimestampFormatsForIngestPipeline(timestampFormats)); + dateProcessorSettings.put("formats", timestampFormats); processors.add(Collections.singletonMap("date", dateProcessorSettings)); } @@ -365,19 +365,4 @@ public static Map makeIngestPipelineDefinition(String grokPatter pipeline.put(Pipeline.PROCESSORS_KEY, processors); return pipeline; } - - // TODO: remove this method when Java time formats are the default - static List jodaBwcJavaTimestampFormatsForIngestPipeline(List javaTimestampFormats) { - return javaTimestampFormats.stream().map(format -> { - switch (format) { - case "ISO8601": - case "UNIX_MS": - case "UNIX": - case "TAI64N": - return format; - default: - return "8" + format; - } - }).collect(Collectors.toList()); - } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java index 07dba7dcb2c64..c19a93a7be99e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java @@ -472,8 +472,7 @@ public Map getEsDateMappingTypeWithFormat() { case "UNIX": return Stream.of("epoch_second"); default: - // TODO: remove the "8" prefix when Java time formats are the default - return Stream.of("8" + format); + return Stream.of(format); } }).collect(Collectors.joining("||")); if (formats.isEmpty() == false) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java index 8140d2fa6034f..264521e68fb51 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java @@ -331,8 +331,7 @@ public void testGuessMappingsAndCalculateFieldStats() { assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("foo")); Map expectedTimeMapping = new HashMap<>(); expectedTimeMapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); - // TODO: remove the "8" prefix when Java time formats are the default - expectedTimeMapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, "8" + "yyyy-MM-dd HH:mm:ss,SSS"); + expectedTimeMapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, "yyyy-MM-dd HH:mm:ss,SSS"); assertEquals(expectedTimeMapping, mappings.get("time")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("bar")); assertNull(mappings.get("nothing")); @@ -372,8 +371,7 @@ public void testMakeIngestPipelineDefinitionGivenStructuredWithTimestamp() { assertNotNull(dateProcessor); assertEquals(timestampField, dateProcessor.get("field")); assertEquals(needClientTimezone, dateProcessor.containsKey("timezone")); - // TODO: remove the call to jodaBwcJavaTimestampFormatsForIngestPipeline() when Java time formats are the default - assertEquals(FileStructureUtils.jodaBwcJavaTimestampFormatsForIngestPipeline(timestampFormats), dateProcessor.get("formats")); + assertEquals(timestampFormats, dateProcessor.get("formats")); // After removing the two expected fields there should be nothing left in the pipeline assertEquals(Collections.emptyMap(), pipeline); @@ -406,8 +404,7 @@ public void testMakeIngestPipelineDefinitionGivenSemiStructured() { assertNotNull(dateProcessor); assertEquals(timestampField, dateProcessor.get("field")); assertEquals(needClientTimezone, dateProcessor.containsKey("timezone")); - // TODO: remove the call to jodaBwcJavaTimestampFormatsForIngestPipeline() when Java time formats are the default - assertEquals(FileStructureUtils.jodaBwcJavaTimestampFormatsForIngestPipeline(timestampFormats), dateProcessor.get("formats")); + assertEquals(timestampFormats, dateProcessor.get("formats")); Map removeProcessor = (Map) processors.get(2).get("remove"); assertNotNull(removeProcessor);