From f3cde06a1dcbe57d1fb6641a2af8910ca4f8db6d Mon Sep 17 00:00:00 2001 From: Yuri Astrakhan Date: Thu, 31 Jan 2019 19:11:30 -0500 Subject: [PATCH 01/54] geotile_grid implementation (#37842) Implements `geotile_grid` aggregation This patch refactors previous implementation https://github.com/elastic/elasticsearch/pull/30240 This code uses the same base classes as `geohash_grid` agg, but uses a different hashing algorithm to allow zoom consistency. Each grid bucket is aligned to Web Mercator tiles. --- .../client/RestHighLevelClient.java | 3 + .../bucket/geotilegrid-aggregation.asciidoc | 185 ++++++++++++++++ .../search.aggregation/290_geotile_grid.yml | 65 ++++++ .../elasticsearch/search/SearchModule.java | 4 + .../aggregations/AggregationBuilders.java | 9 + .../GeoTileGridAggregationBuilder.java | 88 ++++++++ .../bucket/geogrid/GeoTileGridAggregator.java | 57 +++++ .../geogrid/GeoTileGridAggregatorFactory.java | 78 +++++++ .../bucket/geogrid/GeoTileUtils.java | 195 ++++++++++++++++ .../bucket/geogrid/InternalGeoHashGrid.java | 4 +- .../bucket/geogrid/InternalGeoTileGrid.java | 69 ++++++ .../geogrid/InternalGeoTileGridBucket.java | 55 +++++ .../bucket/geogrid/ParsedGeoTileGrid.java | 42 ++++ .../geogrid/ParsedGeoTileGridBucket.java | 42 ++++ .../aggregations/AggregationsTests.java | 2 + .../aggregations/bucket/ShardReduceIT.java | 16 ++ .../geogrid/GeoGridAggregatorTestCase.java | 8 + .../geogrid/GeoTileGridAggregatorTests.java | 50 +++++ .../geogrid/GeoTileGridParserTests.java | 73 ++++++ .../bucket/geogrid/GeoTileGridTests.java | 56 +++++ .../bucket/geogrid/GeoTileUtilsTests.java | 209 ++++++++++++++++++ .../test/InternalAggregationTestCase.java | 3 + 22 files changed, 1310 insertions(+), 3 deletions(-) create mode 100644 docs/reference/aggregations/bucket/geotilegrid-aggregation.asciidoc create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/290_geotile_grid.yml create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtilsTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 2eebd0cc56c66..51ed51d1a696a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -95,6 +95,8 @@ import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilters; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid; +import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoTileGrid; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.ParsedGlobal; import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; @@ -1760,6 +1762,7 @@ static List getDefaultNamedXContents() { map.put(FilterAggregationBuilder.NAME, (p, c) -> ParsedFilter.fromXContent(p, (String) c)); map.put(InternalSampler.PARSER_NAME, (p, c) -> ParsedSampler.fromXContent(p, (String) c)); map.put(GeoHashGridAggregationBuilder.NAME, (p, c) -> ParsedGeoHashGrid.fromXContent(p, (String) c)); + map.put(GeoTileGridAggregationBuilder.NAME, (p, c) -> ParsedGeoTileGrid.fromXContent(p, (String) c)); map.put(RangeAggregationBuilder.NAME, (p, c) -> ParsedRange.fromXContent(p, (String) c)); map.put(DateRangeAggregationBuilder.NAME, (p, c) -> ParsedDateRange.fromXContent(p, (String) c)); map.put(GeoDistanceAggregationBuilder.NAME, (p, c) -> ParsedGeoDistance.fromXContent(p, (String) c)); diff --git a/docs/reference/aggregations/bucket/geotilegrid-aggregation.asciidoc b/docs/reference/aggregations/bucket/geotilegrid-aggregation.asciidoc new file mode 100644 index 0000000000000..ac173ec2b002f --- /dev/null +++ b/docs/reference/aggregations/bucket/geotilegrid-aggregation.asciidoc @@ -0,0 +1,185 @@ +[[search-aggregations-bucket-geotilegrid-aggregation]] +=== GeoTile Grid Aggregation + +A multi-bucket aggregation that works on `geo_point` fields and groups points into +buckets that represent cells in a grid. The resulting grid can be sparse and only +contains cells that have matching data. Each cell corresponds to a +https://en.wikipedia.org/wiki/Tiled_web_map[map tile] as used by many online map +sites. Each cell is labeled using a "{zoom}/{x}/{y}" format, where zoom is equal +to the user-specified precision. + +* High precision keys have a larger range for x and y, and represent tiles that +cover only a small area. +* Low precision keys have a smaller range for x and y, and represent tiles that +each cover a large area. + +See https://wiki.openstreetmap.org/wiki/Zoom_levels[Zoom level documentation] +on how precision (zoom) correlates to size on the ground. Precision for this +aggregation can be between 0 and 29, inclusive. + +WARNING: The highest-precision geotile of length 29 produces cells that cover +less than a 10cm by 10cm of land and so high-precision requests can be very +costly in terms of RAM and result sizes. Please see the example below on how +to first filter the aggregation to a smaller geographic area before requesting +high-levels of detail. + +The specified field must be of type `geo_point` (which can only be set +explicitly in the mappings) and it can also hold an array of `geo_point` +fields, in which case all points will be taken into account during aggregation. + + +==== Simple low-precision request + +[source,js] +-------------------------------------------------- +PUT /museums +{ + "mappings": { + "properties": { + "location": { + "type": "geo_point" + } + } + } +} + +POST /museums/_bulk?refresh +{"index":{"_id":1}} +{"location": "52.374081,4.912350", "name": "NEMO Science Museum"} +{"index":{"_id":2}} +{"location": "52.369219,4.901618", "name": "Museum Het Rembrandthuis"} +{"index":{"_id":3}} +{"location": "52.371667,4.914722", "name": "Nederlands Scheepvaartmuseum"} +{"index":{"_id":4}} +{"location": "51.222900,4.405200", "name": "Letterenhuis"} +{"index":{"_id":5}} +{"location": "48.861111,2.336389", "name": "Musée du Louvre"} +{"index":{"_id":6}} +{"location": "48.860000,2.327000", "name": "Musée d'Orsay"} + +POST /museums/_search?size=0 +{ + "aggregations" : { + "large-grid" : { + "geotile_grid" : { + "field" : "location", + "precision" : 8 + } + } + } +} +-------------------------------------------------- +// CONSOLE + +Response: + +[source,js] +-------------------------------------------------- +{ + ... + "aggregations": { + "large-grid": { + "buckets": [ + { + "key" : "8/131/84", + "doc_count" : 3 + }, + { + "key" : "8/129/88", + "doc_count" : 2 + }, + { + "key" : "8/131/85", + "doc_count" : 1 + } + ] + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/] + +==== High-precision requests + +When requesting detailed buckets (typically for displaying a "zoomed in" map) +a filter like <> should be +applied to narrow the subject area otherwise potentially millions of buckets +will be created and returned. + +[source,js] +-------------------------------------------------- +POST /museums/_search?size=0 +{ + "aggregations" : { + "zoomed-in" : { + "filter" : { + "geo_bounding_box" : { + "location" : { + "top_left" : "52.4, 4.9", + "bottom_right" : "52.3, 5.0" + } + } + }, + "aggregations":{ + "zoom1":{ + "geotile_grid" : { + "field": "location", + "precision": 22 + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,js] +-------------------------------------------------- +{ + ... + "aggregations" : { + "zoomed-in" : { + "doc_count" : 3, + "zoom1" : { + "buckets" : [ + { + "key" : "22/2154412/1378379", + "doc_count" : 1 + }, + { + "key" : "22/2154385/1378332", + "doc_count" : 1 + }, + { + "key" : "22/2154259/1378425", + "doc_count" : 1 + } + ] + } + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/] + + +==== Options + +[horizontal] +field:: Mandatory. The name of the field indexed with GeoPoints. + +precision:: Optional. The integer zoom of the key used to define + cells/buckets in the results. Defaults to 7. + Values outside of [0,29] will be rejected. + +size:: Optional. The maximum number of geohash buckets to return + (defaults to 10,000). When results are trimmed, buckets are + prioritised based on the volumes of documents they contain. + +shard_size:: Optional. To allow for more accurate counting of the top cells + returned in the final result the aggregation defaults to + returning `max(10,(size x number-of-shards))` buckets from each + shard. If this heuristic is undesirable, the number considered + from each shard can be over-ridden using this parameter. diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/290_geotile_grid.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/290_geotile_grid.yml new file mode 100644 index 0000000000000..2db498a0cacf0 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/290_geotile_grid.yml @@ -0,0 +1,65 @@ +setup: + - skip: + version: " - 6.99.99" + reason: "added in 7.0.0" + - do: + indices.create: + include_type_name: false + index: test_1 + body: + settings: + number_of_replicas: 0 + mappings: + properties: + location: + type: geo_point + +--- +"Basic test": + - do: + bulk: + refresh: true + body: + - index: + _index: test_1 + _id: 1 + - location: "52.374081,4.912350" + - index: + _index: test_1 + _id: 2 + - location: "52.369219,4.901618" + - index: + _index: test_1 + _id: 3 + - location: "52.371667,4.914722" + - index: + _index: test_1 + _id: 4 + - location: "51.222900,4.405200" + - index: + _index: test_1 + _id: 5 + - location: "48.861111,2.336389" + - index: + _index: test_1 + _id: 6 + - location: "48.860000,2.327000" + + - do: + search: + rest_total_hits_as_int: true + body: + aggregations: + grid: + geotile_grid: + field: location + precision: 8 + + + - match: { hits.total: 6 } + - match: { aggregations.grid.buckets.0.key: "8/131/84" } + - match: { aggregations.grid.buckets.0.doc_count: 3 } + - match: { aggregations.grid.buckets.1.key: "8/129/88" } + - match: { aggregations.grid.buckets.1.doc_count: 2 } + - match: { aggregations.grid.buckets.2.key: "8/131/85" } + - match: { aggregations.grid.buckets.2.doc_count: 1 } diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 3d93effecc545..81c6273ec1a36 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -110,6 +110,8 @@ import org.elasticsearch.search.aggregations.bucket.filter.InternalFilters; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoTileGrid; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.InternalGlobal; import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; @@ -422,6 +424,8 @@ private void registerAggregations(List plugins) { GeoDistanceAggregationBuilder::parse).addResultReader(InternalGeoDistance::new)); registerAggregation(new AggregationSpec(GeoHashGridAggregationBuilder.NAME, GeoHashGridAggregationBuilder::new, GeoHashGridAggregationBuilder::parse).addResultReader(InternalGeoHashGrid::new)); + registerAggregation(new AggregationSpec(GeoTileGridAggregationBuilder.NAME, GeoTileGridAggregationBuilder::new, + GeoTileGridAggregationBuilder::parse).addResultReader(InternalGeoTileGrid::new)); registerAggregation(new AggregationSpec(NestedAggregationBuilder.NAME, NestedAggregationBuilder::new, NestedAggregationBuilder::parse).addResultReader(InternalNested::new)); registerAggregation(new AggregationSpec(ReverseNestedAggregationBuilder.NAME, ReverseNestedAggregationBuilder::new, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java index fd56172325230..d78e42ba89603 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java @@ -30,6 +30,8 @@ import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregator.KeyedFilter; import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoTileGrid; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; @@ -250,6 +252,13 @@ public static GeoHashGridAggregationBuilder geohashGrid(String name) { return new GeoHashGridAggregationBuilder(name); } + /** + * Create a new {@link InternalGeoTileGrid} aggregation with the given name. + */ + public static GeoTileGridAggregationBuilder geotileGrid(String name) { + return new GeoTileGridAggregationBuilder(name); + } + /** * Create a new {@link SignificantTerms} aggregation with the given name. */ diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java new file mode 100644 index 0000000000000..33efeeb5d38b6 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Map; + +public class GeoTileGridAggregationBuilder extends GeoGridAggregationBuilder { + public static final String NAME = "geotile_grid"; + private static final int DEFAULT_PRECISION = 7; + private static final int DEFAULT_MAX_NUM_CELLS = 10000; + + private static final ObjectParser PARSER = createParser(NAME, GeoTileUtils::parsePrecision); + + public GeoTileGridAggregationBuilder(String name) { + super(name); + precision(DEFAULT_PRECISION); + size(DEFAULT_MAX_NUM_CELLS); + shardSize = -1; + } + + public GeoTileGridAggregationBuilder(StreamInput in) throws IOException { + super(in); + } + + @Override + public GeoGridAggregationBuilder precision(int precision) { + this.precision = GeoTileUtils.checkPrecisionRange(precision); + return this; + } + + @Override + protected ValuesSourceAggregatorFactory createFactory( + String name, ValuesSourceConfig config, int precision, int requiredSize, int shardSize, + SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, + Map metaData + ) throws IOException { + return new GeoTileGridAggregatorFactory(name, config, precision, requiredSize, shardSize, context, parent, + subFactoriesBuilder, metaData); + } + + private GeoTileGridAggregationBuilder(GeoTileGridAggregationBuilder clone, AggregatorFactories.Builder factoriesBuilder, + Map metaData) { + super(clone, factoriesBuilder, metaData); + } + + @Override + protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map metaData) { + return new GeoTileGridAggregationBuilder(this, factoriesBuilder, metaData); + } + + public static GeoGridAggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException { + return PARSER.parse(parser, new GeoTileGridAggregationBuilder(aggregationName), null); + } + + @Override + public String getType() { + return NAME; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java new file mode 100644 index 0000000000000..d2ff5ed82513c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * Aggregates data expressed as geotile longs (for efficiency's sake) but formats results as geotile strings. + */ +public class GeoTileGridAggregator extends GeoGridAggregator { + + GeoTileGridAggregator(String name, AggregatorFactories factories, CellIdSource valuesSource, + int requiredSize, int shardSize, SearchContext aggregationContext, Aggregator parent, + List pipelineAggregators, Map metaData) throws IOException { + super(name, factories, valuesSource, requiredSize, shardSize, aggregationContext, parent, pipelineAggregators, metaData); + } + + @Override + InternalGeoTileGrid buildAggregation(String name, int requiredSize, List buckets, + List pipelineAggregators, Map metaData) { + return new InternalGeoTileGrid(name, requiredSize, buckets, pipelineAggregators, metaData); + } + + @Override + public InternalGeoTileGrid buildEmptyAggregation() { + return new InternalGeoTileGrid(name, requiredSize, Collections.emptyList(), pipelineAggregators(), metaData()); + } + + InternalGeoGridBucket newEmptyBucket() { + return new InternalGeoTileGridBucket(0, 0, null); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java new file mode 100644 index 0000000000000..87077a89d6c23 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.NonCollectingAggregator; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.aggregations.support.ValuesSource.GeoPoint; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory { + + private final int precision; + private final int requiredSize; + private final int shardSize; + + GeoTileGridAggregatorFactory(String name, ValuesSourceConfig config, int precision, int requiredSize, + int shardSize, SearchContext context, AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, Map metaData + ) throws IOException { + super(name, config, context, parent, subFactoriesBuilder, metaData); + this.precision = precision; + this.requiredSize = requiredSize; + this.shardSize = shardSize; + } + + @Override + protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) + throws IOException { + final InternalAggregation aggregation = new InternalGeoTileGrid(name, requiredSize, + Collections.emptyList(), pipelineAggregators, metaData); + return new NonCollectingAggregator(name, context, parent, pipelineAggregators, metaData) { + @Override + public InternalAggregation buildEmptyAggregation() { + return aggregation; + } + }; + } + + @Override + protected Aggregator doCreateInternal(final ValuesSource.GeoPoint valuesSource, Aggregator parent, boolean collectsFromSingleBucket, + List pipelineAggregators, Map metaData) throws IOException { + if (collectsFromSingleBucket == false) { + return asMultiBucketAggregator(this, context, parent); + } + CellIdSource cellIdSource = new CellIdSource(valuesSource, precision, GeoTileUtils::longEncode); + return new GeoTileGridAggregator(name, factories, cellIdSource, requiredSize, shardSize, context, parent, + pipelineAggregators, metaData); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java new file mode 100644 index 0000000000000..d85cf6b1a56ce --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java @@ -0,0 +1,195 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.support.XContentMapValues; + +import java.io.IOException; +import java.util.Locale; + +import static org.elasticsearch.common.geo.GeoUtils.normalizeLat; +import static org.elasticsearch.common.geo.GeoUtils.normalizeLon; + +/** + * Implements geotile key hashing, same as used by many map tile implementations. + * The string key is formatted as "zoom/x/y" + * The hash value (long) contains all three of those values compacted into a single 64bit value: + * bits 58..63 -- zoom (0..29) + * bits 29..57 -- X tile index (0..2^zoom) + * bits 0..28 -- Y tile index (0..2^zoom) + */ +final class GeoTileUtils { + + private GeoTileUtils() {} + + /** + * Largest number of tiles (precision) to use. + * This value cannot be more than (64-5)/2 = 29, because 5 bits are used for zoom level itself (0-31) + * If zoom is not stored inside hash, it would be possible to use up to 32. + * Note that changing this value will make serialization binary-incompatible between versions. + * Another consideration is that index optimizes lat/lng storage, loosing some precision. + * E.g. hash lng=140.74779717298918D lat=45.61884022447444D == "18/233561/93659", but shown as "18/233561/93658" + */ + static final int MAX_ZOOM = 29; + + /** + * Bit position of the zoom value within hash - zoom is stored in the most significant 6 bits of a long number. + */ + private static final int ZOOM_SHIFT = MAX_ZOOM * 2; + + /** + * Bit mask to extract just the lowest 29 bits of a long + */ + private static final long X_Y_VALUE_MASK = (1L << MAX_ZOOM) - 1; + + /** + * Parse an integer precision (zoom level). The {@link ValueType#INT} allows it to be a number or a string. + * + * The precision is expressed as a zoom level between 0 and {@link #MAX_ZOOM} (inclusive). + * + * @param parser {@link XContentParser} to parse the value from + * @return int representing precision + */ + static int parsePrecision(XContentParser parser) throws IOException, ElasticsearchParseException { + final Object node = parser.currentToken().equals(XContentParser.Token.VALUE_NUMBER) + ? Integer.valueOf(parser.intValue()) + : parser.text(); + return XContentMapValues.nodeIntegerValue(node); + } + + /** + * Assert the precision value is within the allowed range, and return it if ok, or throw. + */ + static int checkPrecisionRange(int precision) { + if (precision < 0 || precision > MAX_ZOOM) { + throw new IllegalArgumentException("Invalid geotile_grid precision of " + + precision + ". Must be between 0 and " + MAX_ZOOM + "."); + } + return precision; + } + + /** + * Encode lon/lat to the geotile based long format. + * The resulting hash contains interleaved tile X and Y coordinates. + * The precision itself is also encoded as a few high bits. + */ + static long longEncode(double longitude, double latitude, int precision) { + // Mathematics for this code was adapted from https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Java + + // Number of tiles for the current zoom level along the X and Y axis + final long tiles = 1 << checkPrecisionRange(precision); + + long xTile = (long) Math.floor((normalizeLon(longitude) + 180) / 360 * tiles); + + double latSin = Math.sin(Math.toRadians(normalizeLat(latitude))); + long yTile = (long) Math.floor((0.5 - (Math.log((1 + latSin) / (1 - latSin)) / (4 * Math.PI))) * tiles); + + // Edge values may generate invalid values, and need to be clipped. + // For example, polar regions (above/below lat 85.05112878) get normalized. + if (xTile < 0) { + xTile = 0; + } + if (xTile >= tiles) { + xTile = tiles - 1; + } + if (yTile < 0) { + yTile = 0; + } + if (yTile >= tiles) { + yTile = tiles - 1; + } + + // Zoom value is placed in front of all the bits used for the geotile + // e.g. when max zoom is 29, the largest index would use 58 bits (57th..0th), + // leaving 5 bits unused for zoom. See MAX_ZOOM comment above. + return ((long) precision << ZOOM_SHIFT) | (xTile << MAX_ZOOM) | yTile; + } + + /** + * Parse geotile hash as zoom, x, y integers. + */ + private static int[] parseHash(long hash) { + final int zoom = (int) (hash >>> ZOOM_SHIFT); + final int xTile = (int) ((hash >>> MAX_ZOOM) & X_Y_VALUE_MASK); + final int yTile = (int) (hash & X_Y_VALUE_MASK); + return new int[]{zoom, xTile, yTile}; + } + + /** + * Encode to a geotile string from the geotile based long format + */ + static String stringEncode(long hash) { + int[] res = parseHash(hash); + validateZXY(res[0], res[1], res[2]); + return "" + res[0] + "/" + res[1] + "/" + res[2]; + } + + /** + * Decode long hash as a GeoPoint (center of the tile) + */ + static GeoPoint hashToGeoPoint(long hash) { + int[] res = parseHash(hash); + return zxyToGeoPoint(res[0], res[1], res[2]); + } + + /** + * Decode a string bucket key in "zoom/x/y" format to a GeoPoint (center of the tile) + */ + static GeoPoint keyToGeoPoint(String hashAsString) { + final String[] parts = hashAsString.split("/", 4); + if (parts.length != 3) { + throw new IllegalArgumentException("Invalid geotile_grid hash string of " + + hashAsString + ". Must be three integers in a form \"zoom/x/y\"."); + } + + try { + return zxyToGeoPoint(Integer.parseInt(parts[0]), Integer.parseInt(parts[1]), Integer.parseInt(parts[2])); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Invalid geotile_grid hash string of " + + hashAsString + ". Must be three integers in a form \"zoom/x/y\".", e); + } + } + + /** + * Validates Zoom, X, and Y values, and returns the total number of allowed tiles along the x/y axis. + */ + private static int validateZXY(int zoom, int xTile, int yTile) { + final int tiles = 1 << checkPrecisionRange(zoom); + if (xTile < 0 || yTile < 0 || xTile >= tiles || yTile >= tiles) { + throw new IllegalArgumentException(String.format( + Locale.ROOT, "Zoom/X/Y combination is not valid: %d/%d/%d", zoom, xTile, yTile)); + } + return tiles; + } + + /** + * Converts zoom/x/y integers into a GeoPoint. + */ + private static GeoPoint zxyToGeoPoint(int zoom, int xTile, int yTile) { + final int tiles = validateZXY(zoom, xTile, yTile); + final double n = Math.PI - (2.0 * Math.PI * (yTile + 0.5)) / tiles; + final double lat = Math.toDegrees(Math.atan(Math.sinh(n))); + final double lon = ((xTile + 0.5) / tiles * 360.0) - 180; + return new GeoPoint(lat, lon); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java index 0c28788666249..7c874781d0c22 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java @@ -33,8 +33,6 @@ */ public class InternalGeoHashGrid extends InternalGeoGrid { - private static final String NAME = "geohash_grid"; - InternalGeoHashGrid(String name, int requiredSize, List buckets, List pipelineAggregators, Map metaData) { super(name, requiredSize, buckets, pipelineAggregators, metaData); @@ -66,6 +64,6 @@ Reader getBucketReader() { @Override public String getWriteableName() { - return NAME; + return GeoHashGridAggregationBuilder.NAME; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java new file mode 100644 index 0000000000000..8a842b66dcfca --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * Represents a grid of cells where each cell's location is determined by a geohash. + * All geohashes in a grid are of the same precision and held internally as a single long + * for efficiency's sake. + */ +public class InternalGeoTileGrid extends InternalGeoGrid { + + InternalGeoTileGrid(String name, int requiredSize, List buckets, + List pipelineAggregators, Map metaData) { + super(name, requiredSize, buckets, pipelineAggregators, metaData); + } + + public InternalGeoTileGrid(StreamInput in) throws IOException { + super(in); + } + + @Override + public InternalGeoGrid create(List buckets) { + return new InternalGeoTileGrid(name, requiredSize, buckets, pipelineAggregators(), metaData); + } + + @Override + public InternalGeoGridBucket createBucket(InternalAggregations aggregations, InternalGeoGridBucket prototype) { + return new InternalGeoTileGridBucket(prototype.hashAsLong, prototype.docCount, aggregations); + } + + @Override + InternalGeoGrid create(String name, int requiredSize, List buckets, List list, Map metaData) { + return new InternalGeoTileGrid(name, requiredSize, buckets, list, metaData); + } + + @Override + Reader getBucketReader() { + return InternalGeoTileGridBucket::new; + } + + @Override + public String getWriteableName() { + return GeoTileGridAggregationBuilder.NAME; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java new file mode 100644 index 0000000000000..fb9afbaaca4f8 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.search.aggregations.InternalAggregations; + +import java.io.IOException; + +public class InternalGeoTileGridBucket extends InternalGeoGridBucket { + InternalGeoTileGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + super(hashAsLong, docCount, aggregations); + } + + /** + * Read from a stream. + */ + public InternalGeoTileGridBucket(StreamInput in) throws IOException { + super(in); + } + + @Override + InternalGeoTileGridBucket buildBucket(InternalGeoGridBucket bucket, long hashAsLong, long docCount, + InternalAggregations aggregations) { + return new InternalGeoTileGridBucket(hashAsLong, docCount, aggregations); + } + + @Override + public String getKeyAsString() { + return GeoTileUtils.stringEncode(hashAsLong); + } + + @Override + public GeoPoint getKey() { + return GeoTileUtils.hashToGeoPoint(hashAsLong); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java new file mode 100644 index 0000000000000..e88c7ad305433 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class ParsedGeoTileGrid extends ParsedGeoGrid { + + private static ObjectParser PARSER = createParser(ParsedGeoTileGrid::new, + ParsedGeoTileGridBucket::fromXContent, ParsedGeoTileGridBucket::fromXContent); + + public static ParsedGeoGrid fromXContent(XContentParser parser, String name) throws IOException { + ParsedGeoGrid aggregation = PARSER.parse(parser, null); + aggregation.setName(name); + return aggregation; + } + + @Override + public String getType() { + return GeoTileGridAggregationBuilder.NAME; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java new file mode 100644 index 0000000000000..d2d18b40e76d1 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class ParsedGeoTileGridBucket extends ParsedGeoGridBucket { + + @Override + public GeoPoint getKey() { + return GeoTileUtils.keyToGeoPoint(hashAsString); + } + + @Override + public String getKeyAsString() { + return hashAsString; + } + + static ParsedGeoTileGridBucket fromXContent(XContentParser parser) throws IOException { + return parseXContent(parser, false, ParsedGeoTileGridBucket::new, (p, bucket) -> bucket.hashAsString = p.text()); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java index 874623132f36a..012171ec25a0b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.search.aggregations.bucket.filter.InternalFilterTests; import org.elasticsearch.search.aggregations.bucket.filter.InternalFiltersTests; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridTests; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridTests; import org.elasticsearch.search.aggregations.bucket.global.InternalGlobalTests; import org.elasticsearch.search.aggregations.bucket.histogram.InternalAutoDateHistogramTests; import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogramTests; @@ -140,6 +141,7 @@ private static List> getAggsTests() { aggsTests.add(new InternalFilterTests()); aggsTests.add(new InternalSamplerTests()); aggsTests.add(new GeoHashGridTests()); + aggsTests.add(new GeoTileGridTests()); aggsTests.add(new InternalRangeTests()); aggsTests.add(new InternalDateRangeTests()); aggsTests.add(new InternalGeoDistanceTests()); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java index 664edba7db0d8..8cb42e352156b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java @@ -39,6 +39,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.dateRange; import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.geohashGrid; +import static org.elasticsearch.search.aggregations.AggregationBuilders.geotileGrid; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.ipRange; @@ -306,5 +307,20 @@ public void testGeoHashGrid() throws Exception { assertThat(histo.getBuckets().size(), equalTo(4)); } + public void testGeoTileGrid() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation(geotileGrid("grid").field("location") + .subAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY) + .minDocCount(0))) + .get(); + + assertSearchResponse(response); + + GeoGrid grid = response.getAggregations().get("grid"); + Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java index 5965574bef6e8..047903bc86100 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.geogrid; import org.apache.lucene.document.LatLonDocValuesField; +import org.apache.lucene.geo.GeoEncodingUtils; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; @@ -89,6 +90,13 @@ public void testWithSeveralDocs() throws IOException { double lat = (180d * randomDouble()) - 90d; double lng = (360d * randomDouble()) - 180d; + // Precision-adjust longitude/latitude to avoid wrong bucket placement + // Internally, lat/lng get converted to 32 bit integers, loosing some precision. + // This does not affect geohashing because geohash uses the same algorithm, + // but it does affect other bucketing algos, thus we need to do the same steps here. + lng = GeoEncodingUtils.decodeLongitude(GeoEncodingUtils.encodeLongitude(lng)); + lat = GeoEncodingUtils.decodeLatitude(GeoEncodingUtils.encodeLatitude(lat)); + points.add(new LatLonDocValuesField(FIELD_NAME, lat, lng)); String hash = hashAsString(lng, lat, precision); if (distinctHashesPerDoc.contains(hash) == false) { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java new file mode 100644 index 0000000000000..6544344543e34 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.geogrid; + +public class GeoTileGridAggregatorTests extends GeoGridAggregatorTestCase { + + @Override + protected int randomPrecision() { + return randomIntBetween(0, GeoTileUtils.MAX_ZOOM); + } + + @Override + protected String hashAsString(double lng, double lat, int precision) { + return GeoTileUtils.stringEncode(GeoTileUtils.longEncode(lng, lat, precision)); + } + + @Override + protected GeoGridAggregationBuilder createBuilder(String name) { + return new GeoTileGridAggregationBuilder(name); + } + + public void testPrecision() { + final GeoGridAggregationBuilder builder = createBuilder("_name"); + + expectThrows(IllegalArgumentException.class, () -> builder.precision(-1)); + expectThrows(IllegalArgumentException.class, () -> builder.precision(30)); + + int precision = randomIntBetween(0, 29); + builder.precision(precision); + assertEquals(precision, builder.precision()); + } + +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java new file mode 100644 index 0000000000000..d3a9992af5305 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; + +public class GeoTileGridParserTests extends ESTestCase { + public void testParseValidFromInts() throws Exception { + int precision = randomIntBetween(0, GeoTileUtils.MAX_ZOOM); + XContentParser stParser = createParser(JsonXContent.jsonXContent, + "{\"field\":\"my_loc\", \"precision\":" + precision + ", \"size\": 500, \"shard_size\": 550}"); + XContentParser.Token token = stParser.nextToken(); + assertSame(XContentParser.Token.START_OBJECT, token); + // can create a factory + assertNotNull(GeoTileGridAggregationBuilder.parse("geotile_grid", stParser)); + } + + public void testParseValidFromStrings() throws Exception { + int precision = randomIntBetween(0, GeoTileUtils.MAX_ZOOM); + XContentParser stParser = createParser(JsonXContent.jsonXContent, + "{\"field\":\"my_loc\", \"precision\":\"" + precision + "\", \"size\": \"500\", \"shard_size\": \"550\"}"); + XContentParser.Token token = stParser.nextToken(); + assertSame(XContentParser.Token.START_OBJECT, token); + // can create a factory + assertNotNull(GeoTileGridAggregationBuilder.parse("geotile_grid", stParser)); + } + + public void testParseErrorOnBooleanPrecision() throws Exception { + XContentParser stParser = createParser(JsonXContent.jsonXContent, "{\"field\":\"my_loc\", \"precision\":false}"); + XContentParser.Token token = stParser.nextToken(); + assertSame(XContentParser.Token.START_OBJECT, token); + XContentParseException e = expectThrows(XContentParseException.class, + () -> GeoTileGridAggregationBuilder.parse("geotile_grid", stParser)); + assertThat(ExceptionsHelper.detailedMessage(e), + containsString("[geotile_grid] precision doesn't support values of type: VALUE_BOOLEAN")); + } + + public void testParseErrorOnPrecisionOutOfRange() throws Exception { + XContentParser stParser = createParser(JsonXContent.jsonXContent, "{\"field\":\"my_loc\", \"precision\":\"30\"}"); + XContentParser.Token token = stParser.nextToken(); + assertSame(XContentParser.Token.START_OBJECT, token); + try { + GeoTileGridAggregationBuilder.parse("geotile_grid", stParser); + fail(); + } catch (XContentParseException ex) { + assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class)); + assertEquals("Invalid geotile_grid precision of 30. Must be between 0 and 29.", ex.getCause().getMessage()); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridTests.java new file mode 100644 index 0000000000000..0a8aa8df56eec --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridTests.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; + +import java.util.List; +import java.util.Map; + +public class GeoTileGridTests extends GeoGridTestCase { + + @Override + protected InternalGeoTileGrid createInternalGeoGrid(String name, int size, List buckets, + List pipelineAggregators, Map metaData) { + return new InternalGeoTileGrid(name, size, buckets, pipelineAggregators, metaData); + } + + @Override + protected Writeable.Reader instanceReader() { + return InternalGeoTileGrid::new; + } + + @Override + protected InternalGeoTileGridBucket createInternalGeoGridBucket(Long key, long docCount, InternalAggregations aggregations) { + return new InternalGeoTileGridBucket(key, docCount, aggregations); + } + + @Override + protected long longEncode(double lng, double lat, int precision) { + return GeoTileUtils.longEncode(lng, lat, precision); + } + + @Override + protected int randomPrecision() { + // precision values below 8 can lead to parsing errors + return randomIntBetween(8, GeoTileUtils.MAX_ZOOM); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtilsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtilsTests.java new file mode 100644 index 0000000000000..e2881fd9b9145 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtilsTests.java @@ -0,0 +1,209 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils.MAX_ZOOM; +import static org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils.checkPrecisionRange; +import static org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils.hashToGeoPoint; +import static org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils.keyToGeoPoint; +import static org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils.longEncode; +import static org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils.stringEncode; +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.containsString; + +public class GeoTileUtilsTests extends ESTestCase { + + private static final double GEOTILE_TOLERANCE = 1E-5D; + + /** + * Precision validation should throw an error if its outside of the valid range. + */ + public void testCheckPrecisionRange() { + for (int i = 0; i <= 29; i++) { + assertEquals(i, checkPrecisionRange(i)); + } + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> checkPrecisionRange(-1)); + assertThat(ex.getMessage(), containsString("Invalid geotile_grid precision of -1. Must be between 0 and 29.")); + ex = expectThrows(IllegalArgumentException.class, () -> checkPrecisionRange(30)); + assertThat(ex.getMessage(), containsString("Invalid geotile_grid precision of 30. Must be between 0 and 29.")); + } + + /** + * A few hardcoded lat/lng/zoom hashing expectations + */ + public void testLongEncode() { + assertEquals(0x0000000000000000L, longEncode(0, 0, 0)); + assertEquals(0x3C00095540001CA5L, longEncode(30, 70, 15)); + assertEquals(0x77FFFF4580000000L, longEncode(179.999, 89.999, 29)); + assertEquals(0x740000BA7FFFFFFFL, longEncode(-179.999, -89.999, 29)); + assertEquals(0x0800000040000001L, longEncode(1, 1, 2)); + assertEquals(0x0C00000060000000L, longEncode(-20, 100, 3)); + assertEquals(0x71127D27C8ACA67AL, longEncode(13, -15, 28)); + assertEquals(0x4C0077776003A9ACL, longEncode(-12, 15, 19)); + assertEquals(0x140000024000000EL, longEncode(-328.231870,16.064082, 5)); + assertEquals(0x6436F96B60000000L, longEncode(-590.769588,89.549167, 25)); + assertEquals(0x6411BD6BA0A98359L, longEncode(999.787079,51.830093, 25)); + assertEquals(0x751BD6BBCA983596L, longEncode(999.787079,51.830093, 29)); + assertEquals(0x77CF880A20000000L, longEncode(-557.039740,-632.103969, 29)); + assertEquals(0x7624FA4FA0000000L, longEncode(13,88, 29)); + assertEquals(0x7624FA4FBFFFFFFFL, longEncode(13,-88, 29)); + assertEquals(0x0400000020000000L, longEncode(13,89, 1)); + assertEquals(0x0400000020000001L, longEncode(13,-89, 1)); + assertEquals(0x0400000020000000L, longEncode(13,95, 1)); + assertEquals(0x0400000020000001L, longEncode(13,-95, 1)); + + expectThrows(IllegalArgumentException.class, () -> longEncode(0, 0, -1)); + expectThrows(IllegalArgumentException.class, () -> longEncode(-1, 0, MAX_ZOOM + 1)); + } + + private void assertGeoPointEquals(GeoPoint gp, final double longitude, final double latitude) { + assertThat(gp.lon(), closeTo(longitude, GEOTILE_TOLERANCE)); + assertThat(gp.lat(), closeTo(latitude, GEOTILE_TOLERANCE)); + } + + public void testHashToGeoPoint() { + assertGeoPointEquals(keyToGeoPoint("0/0/0"), 0.0, 0.0); + assertGeoPointEquals(keyToGeoPoint("1/0/0"), -90.0, 66.51326044311186); + assertGeoPointEquals(keyToGeoPoint("1/1/0"), 90.0, 66.51326044311186); + assertGeoPointEquals(keyToGeoPoint("1/0/1"), -90.0, -66.51326044311186); + assertGeoPointEquals(keyToGeoPoint("1/1/1"), 90.0, -66.51326044311186); + assertGeoPointEquals(keyToGeoPoint("29/536870000/10"), 179.99938879162073, 85.05112817241982); + assertGeoPointEquals(keyToGeoPoint("29/10/536870000"), -179.99999295920134, -85.0510760525731); + + //noinspection ConstantConditions + expectThrows(NullPointerException.class, () -> keyToGeoPoint(null)); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("a")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("0")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("0/0")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("0/0/0/0")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("0/-1/-1")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("0/-1/0")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("0/0/-1")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("a/0/0")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("0/a/0")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("0/0/a")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("-1/0/0")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint((MAX_ZOOM + 1) + "/0/0")); + + for (int z = 0; z <= MAX_ZOOM; z++) { + final int zoom = z; + final int max_index = (int) Math.pow(2, zoom); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint(zoom + "/0/" + max_index)); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint(zoom + "/" + max_index + "/0")); + } + } + + /** + * Make sure that hash produces the expected key, and that the key could be converted to hash via a GeoPoint + */ + private void assertStrCodec(long hash, String key, int zoom) { + assertEquals(key, stringEncode(hash)); + final GeoPoint gp = keyToGeoPoint(key); + assertEquals(hash, longEncode(gp.lon(), gp.lat(), zoom)); + } + + /** + * A few hardcoded lat/lng/zoom hashing expectations + */ + public void testStringEncode() { + assertStrCodec(0x0000000000000000L, "0/0/0", 0); + assertStrCodec(0x3C00095540001CA5L, "15/19114/7333", 15); + assertStrCodec(0x77FFFF4580000000L, "29/536869420/0", 29); + assertStrCodec(0x740000BA7FFFFFFFL, "29/1491/536870911", 29); + assertStrCodec(0x0800000040000001L, "2/2/1", 2); + assertStrCodec(0x0C00000060000000L, "3/3/0", 3); + assertStrCodec(0x71127D27C8ACA67AL, "28/143911230/145532538", 28); + assertStrCodec(0x4C0077776003A9ACL, "19/244667/240044", 19); + assertStrCodec(0x140000024000000EL, "5/18/14", 5); + assertStrCodec(0x6436F96B60000000L, "25/28822363/0", 25); + assertStrCodec(0x6411BD6BA0A98359L, "25/9300829/11109209", 25); + assertStrCodec(0x751BD6BBCA983596L, "29/148813278/177747350", 29); + assertStrCodec(0x77CF880A20000000L, "29/511459409/0", 29); + assertStrCodec(0x7624FA4FA0000000L, "29/287822461/0", 29); + assertStrCodec(0x7624FA4FBFFFFFFFL, "29/287822461/536870911", 29); + assertStrCodec(0x0400000020000000L, "1/1/0", 1); + assertStrCodec(0x0400000020000001L, "1/1/1", 1); + + expectThrows(IllegalArgumentException.class, () -> stringEncode(-1L)); + expectThrows(IllegalArgumentException.class, () -> stringEncode(0x7800000000000000L)); // z=30 + expectThrows(IllegalArgumentException.class, () -> stringEncode(0x0000000000000001L)); // z=0,x=0,y=1 + expectThrows(IllegalArgumentException.class, () -> stringEncode(0x0000000020000000L)); // z=0,x=1,y=0 + + for (int zoom = 0; zoom < 5; zoom++) { + int maxTile = 1 << zoom; + for (int x = 0; x < maxTile; x++) { + for (int y = 0; y < maxTile; y++) { + String expectedTileIndex = zoom + "/" + x + "/" + y; + GeoPoint point = keyToGeoPoint(expectedTileIndex); + String actualTileIndex = stringEncode(longEncode(point.lon(), point.lat(), zoom)); + assertEquals(expectedTileIndex, actualTileIndex); + } + } + } + } + + /** + * Ensure that for all points at all supported precision levels that the long encoding of a geotile + * is compatible with its String based counterpart + */ + public void testGeoTileAsLongRoutines() { + for (double lat = -90; lat <= 90; lat++) { + for (double lng = -180; lng <= 180; lng++) { + for (int p = 0; p <= 29; p++) { + long hash = longEncode(lng, lat, p); + if (p > 0) { + assertNotEquals(0, hash); + } + + // GeoPoint would be in the center of the bucket, thus must produce the same hash + GeoPoint point = hashToGeoPoint(hash); + long hashAsLong2 = longEncode(point.lon(), point.lat(), p); + assertEquals(hash, hashAsLong2); + + // Same point should be generated from the string key + assertEquals(point, keyToGeoPoint(stringEncode(hash))); + } + } + } + } + + /** + * Make sure the polar regions are handled properly. + * Mercator projection does not show anything above 85 or below -85, + * so ensure they are clipped correctly. + */ + public void testSingularityAtPoles() { + double minLat = -85.05112878; + double maxLat = 85.05112878; + double lon = randomIntBetween(-180, 180); + double lat = randomBoolean() + ? randomDoubleBetween(-90, minLat, true) + : randomDoubleBetween(maxLat, 90, true); + double clippedLat = Math.min(Math.max(lat, minLat), maxLat); + int zoom = randomIntBetween(0, MAX_ZOOM); + String tileIndex = stringEncode(longEncode(lon, lat, zoom)); + String clippedTileIndex = stringEncode(longEncode(lon, clippedLat, zoom)); + assertEquals(tileIndex, clippedTileIndex); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java index fd560af806066..f9d72e38044fb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java @@ -51,6 +51,8 @@ import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilters; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid; +import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoTileGrid; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.ParsedGlobal; import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; @@ -212,6 +214,7 @@ public abstract class InternalAggregationTestCase map.put(FilterAggregationBuilder.NAME, (p, c) -> ParsedFilter.fromXContent(p, (String) c)); map.put(InternalSampler.PARSER_NAME, (p, c) -> ParsedSampler.fromXContent(p, (String) c)); map.put(GeoHashGridAggregationBuilder.NAME, (p, c) -> ParsedGeoHashGrid.fromXContent(p, (String) c)); + map.put(GeoTileGridAggregationBuilder.NAME, (p, c) -> ParsedGeoTileGrid.fromXContent(p, (String) c)); map.put(RangeAggregationBuilder.NAME, (p, c) -> ParsedRange.fromXContent(p, (String) c)); map.put(DateRangeAggregationBuilder.NAME, (p, c) -> ParsedDateRange.fromXContent(p, (String) c)); map.put(GeoDistanceAggregationBuilder.NAME, (p, c) -> ParsedGeoDistance.fromXContent(p, (String) c)); From c67a9663afd4b9711ab4890395830da6659d1565 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Thu, 31 Jan 2019 17:13:19 -0700 Subject: [PATCH 02/54] Fix MasterServiceTests.testClusterStateUpdateLogging (#38116) This changes the test to not use a `CountDownlatch`, instead adding an assertion for the final logging message and waiting until the `MockAppender` has seen it before proceeding. Related to df2c06f6f30f7e23a6863a3f72fc3bdb7648885c Resolves #23739 --- .../cluster/service/MasterServiceTests.java | 26 ++++++++----------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index aadd48a9bd50d..7ed3f45e505f9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -328,11 +328,16 @@ public void testClusterStateUpdateLogging() throws Exception { MasterService.class.getCanonicalName(), Level.DEBUG, "*processing [test3]: took [3s] done publishing updated cluster state (version: *, uuid: *)")); + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "test4", + MasterService.class.getCanonicalName(), + Level.DEBUG, + "*processing [test4]: took [0s] no change in cluster state")); Logger clusterLogger = LogManager.getLogger(MasterService.class); Loggers.addAppender(clusterLogger, mockAppender); try { - final CountDownLatch latch = new CountDownLatch(4); masterService.currentTimeOverride = System.nanoTime(); masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { @Override @@ -342,9 +347,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - } + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { } @Override public void onFailure(String source, Exception e) { @@ -364,9 +367,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } @Override - public void onFailure(String source, Exception e) { - latch.countDown(); - } + public void onFailure(String source, Exception e) { } }); masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { @Override @@ -376,9 +377,7 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - } + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { } @Override public void onFailure(String source, Exception e) { @@ -394,21 +393,18 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - } + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { } @Override public void onFailure(String source, Exception e) { fail(); } }); - latch.await(); + assertBusy(mockAppender::assertAllExpectationsMatched); } finally { Loggers.removeAppender(clusterLogger, mockAppender); mockAppender.stop(); } - mockAppender.assertAllExpectationsMatched(); } public void testClusterStateBatchedUpdates() throws BrokenBarrierException, InterruptedException { From 291c4e7a0ce63f25a238efc34bbef8b224ac1ec9 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Thu, 31 Jan 2019 18:02:24 -0700 Subject: [PATCH 03/54] Fix file reading in ccr restore service (#38117) Currently we use the raw byte array length when calling the IndexInput read call to determine how many bytes we want to read. However, due to how BigArrays works, the array length might be longer than the reference length. This commit fixes the issue and uses the BytesRef length when calling read. Additionally, it expands the index follow test to index many more documents. These documents should potentially lead to large enough segment files to trigger scenarios where this fix matters. --- .../repository/CcrRestoreSourceService.java | 3 +- .../elasticsearch/xpack/CcrIntegTestCase.java | 22 ------------ .../xpack/ccr/IndexFollowingIT.java | 35 +++++++++++++------ 3 files changed, 26 insertions(+), 34 deletions(-) diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java index a72b2f21d71df..f093143112d3d 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java @@ -235,8 +235,7 @@ private long readFileBytes(String fileName, BytesReference reference) throws IOE BytesRefIterator refIterator = reference.iterator(); BytesRef ref; while ((ref = refIterator.next()) != null) { - byte[] refBytes = ref.bytes; - indexInput.readBytes(refBytes, 0, refBytes.length); + indexInput.readBytes(ref.bytes, ref.offset, ref.length); } long offsetAfterRead = indexInput.getFilePointer(); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index c4fdeb116ae86..2dccc0e96b7a2 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -70,7 +70,6 @@ import org.elasticsearch.transport.nio.MockNioTransportPlugin; import org.elasticsearch.xpack.ccr.CcrSettings; import org.elasticsearch.xpack.ccr.LocalStateCcr; -import org.elasticsearch.xpack.ccr.index.engine.FollowingEngine; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; @@ -551,27 +550,6 @@ protected void assertMaxSeqNoOfUpdatesIsTransferred(Index leaderIndex, Index fol }); } - protected void assertTotalNumberOfOptimizedIndexing(Index followerIndex, int numberOfShards, long expectedTotal) throws Exception { - assertBusy(() -> { - long[] numOfOptimizedOps = new long[numberOfShards]; - for (int shardId = 0; shardId < numberOfShards; shardId++) { - for (String node : getFollowerCluster().nodesInclude(followerIndex.getName())) { - IndicesService indicesService = getFollowerCluster().getInstance(IndicesService.class, node); - IndexShard shard = indicesService.getShardOrNull(new ShardId(followerIndex, shardId)); - if (shard != null && shard.routingEntry().primary()) { - try { - FollowingEngine engine = ((FollowingEngine) IndexShardTestCase.getEngine(shard)); - numOfOptimizedOps[shardId] = engine.getNumberOfOptimizedIndexing(); - } catch (AlreadyClosedException e) { - throw new AssertionError(e); // causes assertBusy to retry - } - } - } - } - assertThat(Arrays.stream(numOfOptimizedOps).sum(), equalTo(expectedTotal)); - }); - } - static void removeCCRRelatedMetadataFromClusterState(ClusterService clusterService) throws Exception { CountDownLatch latch = new CountDownLatch(1); clusterService.submitStateUpdateTask("remove-ccr-related-metadata", new ClusterStateUpdateTask() { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index 55fcb6ace89fd..74c44704e2e1c 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.bulk.BulkProcessor; import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; @@ -101,9 +102,30 @@ public void testFollowIndex() throws Exception { assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); ensureLeaderYellow("index1"); - final int firstBatchNumDocs = randomIntBetween(2, 64); + final int firstBatchNumDocs; + // Sometimes we want to index a lot of documents to ensure that the recovery works with larger files + if (rarely()) { + firstBatchNumDocs = randomIntBetween(1800, 2000); + } else { + firstBatchNumDocs = randomIntBetween(10, 64); + } + final int flushPoint = (int) (firstBatchNumDocs * 0.75); + logger.info("Indexing [{}] docs as first batch", firstBatchNumDocs); - for (int i = 0; i < firstBatchNumDocs; i++) { + BulkRequestBuilder bulkRequestBuilder = leaderClient().prepareBulk(); + for (int i = 0; i < flushPoint; i++) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); + IndexRequest indexRequest = new IndexRequest("index1", "doc", Integer.toString(i)) + .source(source, XContentType.JSON) + .timeout(TimeValue.timeValueSeconds(1)); + bulkRequestBuilder.add(indexRequest); + } + bulkRequestBuilder.get(); + + leaderClient().admin().indices().prepareFlush("index1").setWaitIfOngoing(true).get(); + + // Index some docs after the flush that might be recovered in the normal index following operations + for (int i = flushPoint; i < firstBatchNumDocs; i++) { final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); leaderClient().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); } @@ -147,7 +169,7 @@ public void testFollowIndex() throws Exception { for (int i = 0; i < firstBatchNumDocs; i++) { assertBusy(assertExpectedDocumentRunnable(i)); } - assertTotalNumberOfOptimizedIndexing(resolveFollowerIndex("index2"), numberOfPrimaryShards, firstBatchNumDocs); + pauseFollow("index2"); followerClient().execute(ResumeFollowAction.INSTANCE, followRequest.getFollowRequest()).get(); final int secondBatchNumDocs = randomIntBetween(2, 64); @@ -172,8 +194,6 @@ public void testFollowIndex() throws Exception { for (int i = firstBatchNumDocs; i < firstBatchNumDocs + secondBatchNumDocs; i++) { assertBusy(assertExpectedDocumentRunnable(i)); } - assertTotalNumberOfOptimizedIndexing(resolveFollowerIndex("index2"), numberOfPrimaryShards, - firstBatchNumDocs + secondBatchNumDocs); pauseFollow("index2"); assertMaxSeqNoOfUpdatesIsTransferred(resolveLeaderIndex("index1"), resolveFollowerIndex("index2"), numberOfPrimaryShards); } @@ -287,7 +307,6 @@ public void testFollowIndexWithoutWaitForComplete() throws Exception { for (int i = 0; i < firstBatchNumDocs; i++) { assertBusy(assertExpectedDocumentRunnable(i)); } - assertTotalNumberOfOptimizedIndexing(resolveFollowerIndex("index2"), numberOfPrimaryShards, firstBatchNumDocs); pauseFollow("index2"); } @@ -432,8 +451,6 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) assertIndexFullyReplicatedToFollower("index1", "index2"); pauseFollow("index2"); leaderClient().admin().indices().prepareRefresh("index1").get(); - assertTotalNumberOfOptimizedIndexing(resolveFollowerIndex("index2"), numberOfShards, - leaderClient().prepareSearch("index1").get().getHits().getTotalHits().value); assertMaxSeqNoOfUpdatesIsTransferred(resolveLeaderIndex("index1"), resolveFollowerIndex("index2"), numberOfShards); } @@ -475,7 +492,6 @@ public void testFollowIndexWithNestedField() throws Exception { } pauseFollow("index2"); assertMaxSeqNoOfUpdatesIsTransferred(resolveLeaderIndex("index1"), resolveFollowerIndex("index2"), 1); - assertTotalNumberOfOptimizedIndexing(resolveFollowerIndex("index2"), 1, numDocs); } public void testUnfollowNonExistingIndex() { @@ -538,7 +554,6 @@ public void testFollowIndexMaxOperationSizeInBytes() throws Exception { } pauseFollow("index2"); assertMaxSeqNoOfUpdatesIsTransferred(resolveLeaderIndex("index1"), resolveFollowerIndex("index2"), 1); - assertTotalNumberOfOptimizedIndexing(resolveFollowerIndex("index2"), 1, numDocs); } public void testAttemptToChangeCcrFollowingIndexSetting() throws Exception { From a8ebe2a217ae74476b7e361228cbd66bca7cf8ab Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 31 Jan 2019 20:50:41 -0500 Subject: [PATCH 04/54] Fix random params in testSoftDeletesRetentionLock (#38114) Since #37992 the retainingSequenceNumber is initialized with 0 while the global checkpoint can be -1. Relates #37992 --- .../org/elasticsearch/index/engine/SoftDeletesPolicyTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java index acad319d94ccc..8a34b0d1b5207 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java @@ -74,7 +74,7 @@ public void testSoftDeletesRetentionLock() { // Advances the global checkpoint and the local checkpoint of a safe commit globalCheckpoint.addAndGet(between(0, 1000)); for (final AtomicLong retainingSequenceNumber : retainingSequenceNumbers) { - retainingSequenceNumber.set(randomLongBetween(retainingSequenceNumber.get(), globalCheckpoint.get())); + retainingSequenceNumber.set(randomLongBetween(retainingSequenceNumber.get(), Math.max(globalCheckpoint.get(), 0L))); } safeCommitCheckpoint = randomLongBetween(safeCommitCheckpoint, globalCheckpoint.get()); policy.setLocalCheckpointOfSafeCommit(safeCommitCheckpoint); From b8b843476d387f257a50c53db586abe855ea0fb9 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 31 Jan 2019 21:01:41 -0500 Subject: [PATCH 05/54] Disable dynamic mapping in testSimpleGetFieldMappingsWithDefaults (#38045) Since #31140 we no longer require acking on the dynamic mapping of index requests. Thus, a returned mapping from a get mapping request does not necessarily contain the dynamic updates from the index request. This commit replaces the dynamic mapping update with a manual put mapping. Relates #31140 Closes #37928 --- .../indices/mapping/SimpleGetFieldMappingsIT.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java b/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java index 0e2e230053cd2..e344f15f3c55f 100644 --- a/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java @@ -147,8 +147,7 @@ public void testGetFieldMappings() throws Exception { @SuppressWarnings("unchecked") public void testSimpleGetFieldMappingsWithDefaults() throws Exception { assertAcked(prepareCreate("test").addMapping("type", getMappingForType("type"))); - - client().prepareIndex("test", "type", "1").setSource("num", 1).get(); + client().admin().indices().preparePutMapping("test").setType("type").setSource("num", "type=long").get(); GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings() .setFields("num", "field1", "obj.subfield").includeDefaults(true).get(); From 0d56955d39e9e5e3e66aaf8ea63a13f10606bf7e Mon Sep 17 00:00:00 2001 From: Paul Sanwald Date: Thu, 31 Jan 2019 22:00:09 -0500 Subject: [PATCH 06/54] mute test, as this one is failing also per #35450 (#38132) --- .../src/test/java/org/elasticsearch/client/ClusterClientIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index dc9f246c7b878..2044a5ac56c92 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -182,6 +182,7 @@ public void testClusterHealthYellowClusterLevel() throws IOException { assertThat(response.getIndices().size(), equalTo(0)); } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35450") public void testClusterHealthYellowIndicesLevel() throws IOException { createIndex("index", Settings.EMPTY); createIndex("index2", Settings.EMPTY); From 0a604e3b2496777168bb541815a8526e463ee15f Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 1 Feb 2019 05:45:40 +0100 Subject: [PATCH 07/54] Fix Two Races that Lead to Stuck Snapshots (#37686) * Fixes two broken spots: 1. Master failover while deleting a snapshot that has no shards will get stuck if the new master finds the 0-shard snapshot in `INIT` when deleting 2. Aborted shards that were never seen in `INIT` state by the `SnapshotsShardService` will not be notified as failed, leading to the snapshot staying in `ABORTED` state and never getting deleted with one or more shards stuck in `ABORTED` state * Tried to make fixes as short as possible so we can backport to `6.x` with the least amount of risk * Significantly extended test infrastructure to reproduce the above two issues * Two new test runs: 1. Reproducing the effects of node disconnects/restarts in isolation 2. Reproducing the effects of disconnects/restarts in parallel with shard relocations and deletes * Relates #32265 * Closes #32348 --- .../snapshots/SnapshotShardsService.java | 56 +- .../snapshots/SnapshotsService.java | 5 +- .../snapshots/SnapshotResiliencyTests.java | 1037 +++++++++++++++++ .../snapshots/SnapshotsServiceTests.java | 619 ---------- .../disruption/DisruptableMockTransport.java | 7 - .../test/transport/MockTransport.java | 10 +- 6 files changed, 1096 insertions(+), 638 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java delete mode 100644 server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 6b7b506114361..132b269b196e0 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -67,6 +67,10 @@ import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.Repository; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.EmptyTransportResponseHandler; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequestDeduplicator; +import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -85,7 +89,6 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.cluster.SnapshotsInProgress.completed; -import static org.elasticsearch.transport.EmptyTransportResponseHandler.INSTANCE_SAME; /** * This service runs on data and master nodes and controls currently snapshotted shards on these nodes. It is responsible for @@ -112,6 +115,10 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements private volatile Map> shardSnapshots = emptyMap(); + // A map of snapshots to the shardIds that we already reported to the master as failed + private final TransportRequestDeduplicator remoteFailedRequestDeduplicator = + new TransportRequestDeduplicator<>(); + private final SnapshotStateExecutor snapshotStateExecutor = new SnapshotStateExecutor(); private final UpdateSnapshotStatusAction updateSnapshotStatusHandler; @@ -272,12 +279,11 @@ private void processIndexShardSnapshots(ClusterChangedEvent event) { // Abort all running shards for this snapshot Map snapshotShards = shardSnapshots.get(entry.snapshot()); if (snapshotShards != null) { - final String failure = "snapshot has been aborted"; for (ObjectObjectCursor shard : entry.shards()) { - final IndexShardSnapshotStatus snapshotStatus = snapshotShards.get(shard.key); if (snapshotStatus != null) { - final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.abortIfNotCompleted(failure); + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = + snapshotStatus.abortIfNotCompleted("snapshot has been aborted"); final Stage stage = lastSnapshotStatus.getStage(); if (stage == Stage.FINALIZE) { logger.debug("[{}] trying to cancel snapshot on shard [{}] that is finalizing, " + @@ -295,6 +301,15 @@ private void processIndexShardSnapshots(ClusterChangedEvent event) { } } } + } else { + final Snapshot snapshot = entry.snapshot(); + for (ObjectObjectCursor curr : entry.shards()) { + // due to CS batching we might have missed the INIT state and straight went into ABORTED + // notify master that abort has completed by moving to FAILED + if (curr.value.state() == State.ABORTED) { + notifyFailedSnapshotShard(snapshot, curr.key, localNodeId, curr.value.reason()); + } + } } } } @@ -515,12 +530,33 @@ void notifyFailedSnapshotShard(final Snapshot snapshot, final ShardId shardId, f /** Updates the shard snapshot status by sending a {@link UpdateIndexShardSnapshotStatusRequest} to the master node */ void sendSnapshotShardUpdate(final Snapshot snapshot, final ShardId shardId, final ShardSnapshotStatus status) { - try { - UpdateIndexShardSnapshotStatusRequest request = new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status); - transportService.sendRequest(transportService.getLocalNode(), UPDATE_SNAPSHOT_STATUS_ACTION_NAME, request, INSTANCE_SAME); - } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e); - } + remoteFailedRequestDeduplicator.executeOnce( + new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status), + new ActionListener() { + @Override + public void onResponse(Void aVoid) { + logger.trace("[{}] [{}] updated snapshot state", snapshot, status); + } + + @Override + public void onFailure(Exception e) { + logger.warn( + () -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e); + } + }, + (req, reqListener) -> transportService.sendRequest(transportService.getLocalNode(), UPDATE_SNAPSHOT_STATUS_ACTION_NAME, req, + new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + @Override + public void handleResponse(TransportResponse.Empty response) { + reqListener.onResponse(null); + } + + @Override + public void handleException(TransportException exp) { + reqListener.onFailure(exp); + } + }) + ); } /** diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index af6d7055e533a..c5b478fa908a9 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -1210,7 +1210,10 @@ public ClusterState execute(ClusterState currentState) throws Exception { if (state == State.INIT) { // snapshot is still initializing, mark it as aborted shards = snapshotEntry.shards(); - + assert shards.isEmpty(); + // No shards in this snapshot, we delete it right away since the SnapshotShardsService + // has no work to do. + endSnapshot(snapshotEntry); } else if (state == State.STARTED) { // snapshot is started - mark every non completed shard as aborted final ImmutableOpenMap.Builder shardsBuilder = ImmutableOpenMap.builder(); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java new file mode 100644 index 0000000000000..a54155db92da1 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -0,0 +1,1037 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.snapshots; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryAction; +import org.elasticsearch.action.admin.cluster.repositories.put.TransportPutRepositoryAction; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteAction; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; +import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.create.TransportCreateSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.delete.TransportDeleteSnapshotAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.TransportClusterStateAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; +import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; +import org.elasticsearch.action.resync.TransportResyncReplicationAction; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.NodeConnectionsService; +import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.coordination.ClusterBootstrapService; +import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; +import org.elasticsearch.cluster.coordination.CoordinationState; +import org.elasticsearch.cluster.coordination.Coordinator; +import org.elasticsearch.cluster.coordination.CoordinatorTests; +import org.elasticsearch.cluster.coordination.DeterministicTaskQueue; +import org.elasticsearch.cluster.coordination.InMemoryPersistedState; +import org.elasticsearch.cluster.coordination.MockSinglePrioritizingExecutor; +import org.elasticsearch.cluster.metadata.AliasValidator; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; +import org.elasticsearch.cluster.metadata.MetaDataMappingService; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RoutingService; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; +import org.elasticsearch.cluster.service.ClusterApplierService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.service.MasterService; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.gateway.MetaStateService; +import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards; +import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; +import org.elasticsearch.index.seqno.RetentionLeaseSyncAction; +import org.elasticsearch.index.shard.PrimaryReplicaSyncer; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.indices.cluster.FakeThreadPoolMasterService; +import org.elasticsearch.indices.cluster.IndicesClusterStateService; +import org.elasticsearch.indices.flush.SyncedFlushService; +import org.elasticsearch.indices.mapper.MapperRegistry; +import org.elasticsearch.indices.recovery.PeerRecoverySourceService; +import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; +import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.SearchService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.disruption.DisruptableMockTransport; +import org.elasticsearch.test.disruption.NetworkDisruption; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportInterceptor; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportService; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.elasticsearch.env.Environment.PATH_HOME_SETTING; +import static org.elasticsearch.node.Node.NODE_NAME_SETTING; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasSize; +import static org.mockito.Mockito.mock; + +public class SnapshotResiliencyTests extends ESTestCase { + + private DeterministicTaskQueue deterministicTaskQueue; + + private TestClusterNodes testClusterNodes; + + private Path tempDir; + + @Before + public void createServices() { + tempDir = createTempDir(); + deterministicTaskQueue = + new DeterministicTaskQueue(Settings.builder().put(NODE_NAME_SETTING.getKey(), "shared").build(), random()); + } + + @After + public void stopServices() { + testClusterNodes.nodes.values().forEach(TestClusterNode::stop); + } + + public void testSuccessfulSnapshot() { + setupTestCluster(randomFrom(1, 3, 5), randomIntBetween(2, 10)); + + String repoName = "repo"; + String snapshotName = "snapshot"; + final String index = "test"; + + final int shards = randomIntBetween(1, 10); + + TestClusterNode masterNode = + testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state()); + final AtomicBoolean createdSnapshot = new AtomicBoolean(); + masterNode.client.admin().cluster().preparePutRepository(repoName) + .setType(FsRepository.TYPE).setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) + .execute( + assertNoFailureListener( + () -> masterNode.client.admin().indices().create( + new CreateIndexRequest(index).waitForActiveShards(ActiveShardCount.ALL) + .settings(defaultIndexSettings(shards)), + assertNoFailureListener( + () -> masterNode.client.admin().cluster().prepareCreateSnapshot(repoName, snapshotName) + .execute(assertNoFailureListener(() -> createdSnapshot.set(true))))))); + + deterministicTaskQueue.runAllRunnableTasks(); + + assertTrue(createdSnapshot.get()); + SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); + final Repository repository = masterNode.repositoriesService.repository(repoName); + Collection snapshotIds = repository.getRepositoryData().getSnapshotIds(); + assertThat(snapshotIds, hasSize(1)); + + final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotIds.iterator().next()); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertThat(snapshotInfo.indices(), containsInAnyOrder(index)); + assertEquals(shards, snapshotInfo.successfulShards()); + assertEquals(0, snapshotInfo.failedShards()); + } + + public void testSnapshotWithNodeDisconnects() { + final int dataNodes = randomIntBetween(2, 10); + setupTestCluster(randomFrom(1, 3, 5), dataNodes); + + String repoName = "repo"; + String snapshotName = "snapshot"; + final String index = "test"; + + final int shards = randomIntBetween(1, 10); + + TestClusterNode masterNode = + testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state()); + final AtomicBoolean createdSnapshot = new AtomicBoolean(); + + final AdminClient masterAdminClient = masterNode.client.admin(); + masterNode.client.admin().cluster().preparePutRepository(repoName) + .setType(FsRepository.TYPE).setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) + .execute( + assertNoFailureListener( + () -> masterNode.client.admin().indices().create( + + new CreateIndexRequest(index).waitForActiveShards(ActiveShardCount.ALL) + .settings(defaultIndexSettings(shards)), + assertNoFailureListener( + () -> { + for (int i = 0; i < randomIntBetween(0, dataNodes); ++i) { + scheduleNow(this::disconnectRandomDataNode); + } + if (randomBoolean()) { + scheduleNow(() -> testClusterNodes.clearNetworkDisruptions()); + } + masterAdminClient.cluster().prepareCreateSnapshot(repoName, snapshotName) + .execute(assertNoFailureListener(() -> { + for (int i = 0; i < randomIntBetween(0, dataNodes); ++i) { + scheduleNow(this::disconnectOrRestartDataNode); + } + final boolean disconnectedMaster = randomBoolean(); + if (disconnectedMaster) { + scheduleNow(this::disconnectOrRestartMasterNode); + } + if (disconnectedMaster || randomBoolean()) { + scheduleSoon(() -> testClusterNodes.clearNetworkDisruptions()); + } else if (randomBoolean()) { + scheduleNow(() -> testClusterNodes.clearNetworkDisruptions()); + } + createdSnapshot.set(true); + })); + })))); + + runUntil(() -> { + final Optional randomMaster = testClusterNodes.randomMasterNode(); + if (randomMaster.isPresent()) { + final SnapshotsInProgress snapshotsInProgress = randomMaster.get().clusterService.state().custom(SnapshotsInProgress.TYPE); + return snapshotsInProgress != null && snapshotsInProgress.entries().isEmpty(); + } + return false; + }, TimeUnit.MINUTES.toMillis(1L)); + + clearDisruptionsAndAwaitSync(); + + assertTrue(createdSnapshot.get()); + final TestClusterNode randomMaster = testClusterNodes.randomMasterNode() + .orElseThrow(() -> new AssertionError("expected to find at least one active master node")); + SnapshotsInProgress finalSnapshotsInProgress = randomMaster.clusterService.state().custom(SnapshotsInProgress.TYPE); + assertThat(finalSnapshotsInProgress.entries(), empty()); + final Repository repository = randomMaster.repositoriesService.repository(repoName); + Collection snapshotIds = repository.getRepositoryData().getSnapshotIds(); + assertThat(snapshotIds, hasSize(1)); + } + + public void testConcurrentSnapshotCreateAndDelete() { + setupTestCluster(randomFrom(1, 3, 5), randomIntBetween(2, 10)); + + String repoName = "repo"; + String snapshotName = "snapshot"; + final String index = "test"; + + final int shards = randomIntBetween(1, 10); + + TestClusterNode masterNode = + testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state()); + final AtomicBoolean createdSnapshot = new AtomicBoolean(); + masterNode.client.admin().cluster().preparePutRepository(repoName) + .setType(FsRepository.TYPE).setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) + .execute( + assertNoFailureListener( + () -> masterNode.client.admin().indices().create( + new CreateIndexRequest(index).waitForActiveShards(ActiveShardCount.ALL) + .settings(defaultIndexSettings(shards)), + assertNoFailureListener( + () -> masterNode.client.admin().cluster().prepareCreateSnapshot(repoName, snapshotName) + .execute(assertNoFailureListener( + () -> masterNode.client.admin().cluster().deleteSnapshot( + new DeleteSnapshotRequest(repoName, snapshotName), + assertNoFailureListener(() -> masterNode.client.admin().cluster() + .prepareCreateSnapshot(repoName, snapshotName).execute( + assertNoFailureListener(() -> createdSnapshot.set(true)) + ))))))))); + + deterministicTaskQueue.runAllRunnableTasks(); + + assertTrue(createdSnapshot.get()); + SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); + final Repository repository = masterNode.repositoriesService.repository(repoName); + Collection snapshotIds = repository.getRepositoryData().getSnapshotIds(); + assertThat(snapshotIds, hasSize(1)); + + final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotIds.iterator().next()); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertThat(snapshotInfo.indices(), containsInAnyOrder(index)); + assertEquals(shards, snapshotInfo.successfulShards()); + assertEquals(0, snapshotInfo.failedShards()); + } + + /** + * Simulates concurrent restarts of data and master nodes as well as relocating a primary shard, while starting and subsequently + * deleting a snapshot. + */ + public void testSnapshotPrimaryRelocations() { + final int masterNodeCount = randomFrom(1, 3, 5); + setupTestCluster(masterNodeCount, randomIntBetween(2, 10)); + + String repoName = "repo"; + String snapshotName = "snapshot"; + final String index = "test"; + + final int shards = randomIntBetween(1, 10); + + final TestClusterNode masterNode = + testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state()); + final AtomicBoolean createdSnapshot = new AtomicBoolean(); + final AdminClient masterAdminClient = masterNode.client.admin(); + masterAdminClient.cluster().preparePutRepository(repoName) + .setType(FsRepository.TYPE).setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) + .execute( + assertNoFailureListener( + () -> masterAdminClient.indices().create( + new CreateIndexRequest(index).waitForActiveShards(ActiveShardCount.ALL) + .settings(defaultIndexSettings(shards)), + assertNoFailureListener( + () -> masterAdminClient.cluster().state(new ClusterStateRequest(), assertNoFailureListener( + clusterStateResponse -> { + final ShardRouting shardToRelocate = + clusterStateResponse.getState().routingTable().allShards(index).get(0); + final TestClusterNode currentPrimaryNode = + testClusterNodes.nodeById(shardToRelocate.currentNodeId()); + final TestClusterNode otherNode = + testClusterNodes.randomDataNodeSafe(currentPrimaryNode.node.getName()); + final Runnable maybeForceAllocate = new Runnable() { + @Override + public void run() { + masterAdminClient.cluster().state(new ClusterStateRequest(), assertNoFailureListener( + resp -> { + final ShardRouting shardRouting = resp.getState().routingTable() + .shardRoutingTable(shardToRelocate.shardId()).primaryShard(); + if (shardRouting.unassigned() + && shardRouting.unassignedInfo().getReason() == UnassignedInfo.Reason.NODE_LEFT) { + if (masterNodeCount > 1) { + scheduleNow(() -> testClusterNodes.stopNode(masterNode)); + } + testClusterNodes.randomDataNodeSafe().client.admin().cluster() + .prepareCreateSnapshot(repoName, snapshotName) + .execute(ActionListener.wrap(() -> { + testClusterNodes.randomDataNodeSafe().client.admin().cluster() + .deleteSnapshot( + new DeleteSnapshotRequest(repoName, snapshotName), noopListener()); + createdSnapshot.set(true); + })); + scheduleNow( + () -> testClusterNodes.randomMasterNodeSafe().client.admin().cluster().reroute( + new ClusterRerouteRequest().add( + new AllocateEmptyPrimaryAllocationCommand( + index, shardRouting.shardId().id(), otherNode.node.getName(), true) + ), noopListener())); + } else { + scheduleSoon(this); + } + } + )); + } + }; + scheduleNow(() -> testClusterNodes.stopNode(currentPrimaryNode)); + scheduleNow(maybeForceAllocate); + } + )))))); + + runUntil(() -> { + final Optional randomMaster = testClusterNodes.randomMasterNode(); + if (randomMaster.isPresent()) { + final SnapshotsInProgress snapshotsInProgress = + randomMaster.get().clusterService.state().custom(SnapshotsInProgress.TYPE); + return (snapshotsInProgress == null || snapshotsInProgress.entries().isEmpty()) && createdSnapshot.get(); + } + return false; + }, TimeUnit.MINUTES.toMillis(1L)); + + clearDisruptionsAndAwaitSync(); + + assertTrue(createdSnapshot.get()); + final SnapshotsInProgress finalSnapshotsInProgress = testClusterNodes.randomDataNodeSafe() + .clusterService.state().custom(SnapshotsInProgress.TYPE); + assertThat(finalSnapshotsInProgress.entries(), empty()); + final Repository repository = masterNode.repositoriesService.repository(repoName); + Collection snapshotIds = repository.getRepositoryData().getSnapshotIds(); + assertThat(snapshotIds, either(hasSize(1)).or(hasSize(0))); + } + + private void clearDisruptionsAndAwaitSync() { + testClusterNodes.clearNetworkDisruptions(); + runUntil(() -> { + final List versions = testClusterNodes.nodes.values().stream() + .map(n -> n.clusterService.state().version()).distinct().collect(Collectors.toList()); + return versions.size() == 1L; + }, TimeUnit.MINUTES.toMillis(1L)); + } + + private void disconnectOrRestartDataNode() { + if (randomBoolean()) { + disconnectRandomDataNode(); + } else { + testClusterNodes.randomDataNode().ifPresent(TestClusterNode::restart); + } + } + + private void disconnectOrRestartMasterNode() { + testClusterNodes.randomMasterNode().ifPresent(masterNode -> { + if (randomBoolean()) { + testClusterNodes.disconnectNode(masterNode); + } else { + masterNode.restart(); + } + }); + } + + private void disconnectRandomDataNode() { + testClusterNodes.randomDataNode().ifPresent(n -> testClusterNodes.disconnectNode(n)); + } + + private void startCluster() { + final ClusterState initialClusterState = + new ClusterState.Builder(ClusterName.DEFAULT).nodes(testClusterNodes.discoveryNodes()).build(); + testClusterNodes.nodes.values().forEach(testClusterNode -> testClusterNode.start(initialClusterState)); + + deterministicTaskQueue.advanceTime(); + deterministicTaskQueue.runAllRunnableTasks(); + + final VotingConfiguration votingConfiguration = new VotingConfiguration(testClusterNodes.nodes.values().stream().map(n -> n.node) + .filter(DiscoveryNode::isMasterNode).map(DiscoveryNode::getId).collect(Collectors.toSet())); + testClusterNodes.nodes.values().stream().filter(n -> n.node.isMasterNode()).forEach( + testClusterNode -> testClusterNode.coordinator.setInitialConfiguration(votingConfiguration)); + + runUntil( + () -> { + List masterNodeIds = testClusterNodes.nodes.values().stream() + .map(node -> node.clusterService.state().nodes().getMasterNodeId()) + .distinct().collect(Collectors.toList()); + return masterNodeIds.size() == 1 && masterNodeIds.contains(null) == false; + }, + TimeUnit.SECONDS.toMillis(30L) + ); + } + + private void runUntil(Supplier fulfilled, long timeout) { + final long start = deterministicTaskQueue.getCurrentTimeMillis(); + while (timeout > deterministicTaskQueue.getCurrentTimeMillis() - start) { + if (fulfilled.get()) { + return; + } + deterministicTaskQueue.runAllRunnableTasks(); + deterministicTaskQueue.advanceTime(); + } + fail("Condition wasn't fulfilled."); + } + + private void setupTestCluster(int masterNodes, int dataNodes) { + testClusterNodes = new TestClusterNodes(masterNodes, dataNodes); + startCluster(); + } + + private void scheduleSoon(Runnable runnable) { + deterministicTaskQueue.scheduleAt(deterministicTaskQueue.getCurrentTimeMillis() + randomLongBetween(0, 100L), runnable); + } + + private void scheduleNow(Runnable runnable) { + deterministicTaskQueue.scheduleNow(runnable); + } + + private static Settings defaultIndexSettings(int shards) { + // TODO: randomize replica count settings once recovery operations aren't blocking anymore + return Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), shards) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0).build(); + } + + private static ActionListener assertNoFailureListener(Consumer consumer) { + return new ActionListener() { + @Override + public void onResponse(final T t) { + consumer.accept(t); + } + + @Override + public void onFailure(final Exception e) { + throw new AssertionError(e); + } + }; + } + + private static ActionListener assertNoFailureListener(Runnable r) { + return new ActionListener() { + @Override + public void onResponse(final T t) { + r.run(); + } + + @Override + public void onFailure(final Exception e) { + throw new AssertionError(e); + } + }; + } + + private static ActionListener noopListener() { + return new ActionListener() { + @Override + public void onResponse(final T t) { + } + + @Override + public void onFailure(final Exception e) { + } + }; + } + + /** + * Create a {@link Environment} with random path.home and path.repo + **/ + private Environment createEnvironment(String nodeName) { + return TestEnvironment.newEnvironment(Settings.builder() + .put(NODE_NAME_SETTING.getKey(), nodeName) + .put(PATH_HOME_SETTING.getKey(), tempDir.resolve(nodeName).toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), tempDir.resolve("repo").toAbsolutePath()) + .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), + ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.get(Settings.EMPTY)) + .build()); + } + + private static ClusterState stateForNode(ClusterState state, DiscoveryNode node) { + return ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(node.getId())).build(); + } + + private final class TestClusterNodes { + + // LinkedHashMap so we have deterministic ordering when iterating over the map in tests + private final Map nodes = new LinkedHashMap<>(); + + private DisconnectedNodes disruptedLinks = new DisconnectedNodes(); + + TestClusterNodes(int masterNodes, int dataNodes) { + for (int i = 0; i < masterNodes; ++i) { + nodes.computeIfAbsent("node" + i, nodeName -> { + try { + return newMasterNode(nodeName); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + for (int i = 0; i < dataNodes; ++i) { + nodes.computeIfAbsent("data-node" + i, nodeName -> { + try { + return newDataNode(nodeName); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + } + + public TestClusterNode nodeById(final String nodeId) { + return nodes.values().stream().filter(n -> n.node.getId().equals(nodeId)).findFirst() + .orElseThrow(() -> new AssertionError("Could not find node by id [" + nodeId + ']')); + } + + private TestClusterNode newMasterNode(String nodeName) throws IOException { + return newNode(nodeName, DiscoveryNode.Role.MASTER); + } + + private TestClusterNode newDataNode(String nodeName) throws IOException { + return newNode(nodeName, DiscoveryNode.Role.DATA); + } + + private TestClusterNode newNode(String nodeName, DiscoveryNode.Role role) throws IOException { + return new TestClusterNode( + new DiscoveryNode(nodeName, randomAlphaOfLength(10), buildNewFakeTransportAddress(), emptyMap(), + Collections.singleton(role), Version.CURRENT), this::getDisruption); + } + + public TestClusterNode randomMasterNodeSafe() { + return randomMasterNode().orElseThrow(() -> new AssertionError("Expected to find at least one connected master node")); + } + + public Optional randomMasterNode() { + // Select from sorted list of data-nodes here to not have deterministic behaviour + final List masterNodes = testClusterNodes.nodes.values().stream().filter(n -> n.node.isMasterNode()) + .sorted(Comparator.comparing(n -> n.node.getName())).collect(Collectors.toList()); + return masterNodes.isEmpty() ? Optional.empty() : Optional.of(randomFrom(masterNodes)); + } + + public void stopNode(TestClusterNode node) { + node.stop(); + nodes.remove(node.node.getName()); + } + + public TestClusterNode randomDataNodeSafe(String... excludedNames) { + return randomDataNode(excludedNames).orElseThrow(() -> new AssertionError("Could not find another data node.")); + } + + public Optional randomDataNode(String... excludedNames) { + // Select from sorted list of data-nodes here to not have deterministic behaviour + final List dataNodes = testClusterNodes.nodes.values().stream().filter(n -> n.node.isDataNode()) + .filter(n -> { + for (final String nodeName : excludedNames) { + if (n.node.getName().equals(nodeName)) { + return false; + } + } + return true; + }) + .sorted(Comparator.comparing(n -> n.node.getName())).collect(Collectors.toList()); + return dataNodes.isEmpty() ? Optional.empty() : Optional.ofNullable(randomFrom(dataNodes)); + } + + public void disconnectNode(TestClusterNode node) { + if (disruptedLinks.disconnected.contains(node.node.getName())) { + return; + } + testClusterNodes.nodes.values().forEach(n -> n.transportService.getConnectionManager().disconnectFromNode(node.node)); + disruptedLinks.disconnect(node.node.getName()); + } + + public void clearNetworkDisruptions() { + disruptedLinks.disconnected.forEach(nodeName -> { + if (testClusterNodes.nodes.containsKey(nodeName)) { + final DiscoveryNode node = testClusterNodes.nodes.get(nodeName).node; + testClusterNodes.nodes.values().forEach(n -> n.transportService.getConnectionManager().openConnection(node, null)); + } + }); + disruptedLinks.clear(); + } + + private NetworkDisruption.DisruptedLinks getDisruption() { + return disruptedLinks; + } + + /** + * Builds a {@link DiscoveryNodes} instance that holds the nodes in this test cluster. + * @return DiscoveryNodes + */ + public DiscoveryNodes discoveryNodes() { + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + nodes.values().forEach(node -> builder.add(node.node)); + return builder.build(); + } + + /** + * Returns the {@link TestClusterNode} for the master node in the given {@link ClusterState}. + * @param state ClusterState + * @return Master Node + */ + public TestClusterNode currentMaster(ClusterState state) { + TestClusterNode master = nodes.get(state.nodes().getMasterNode().getName()); + assertNotNull(master); + assertTrue(master.node.isMasterNode()); + return master; + } + } + + private final class TestClusterNode { + + private final Logger logger = LogManager.getLogger(TestClusterNode.class); + + private final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Stream.concat( + ClusterModule.getNamedWriteables().stream(), NetworkModule.getNamedWriteables().stream()).collect(Collectors.toList())); + + private final TransportService transportService; + + private final ClusterService clusterService; + + private final RepositoriesService repositoriesService; + + private final SnapshotsService snapshotsService; + + private final SnapshotShardsService snapshotShardsService; + + private final IndicesService indicesService; + + private final IndicesClusterStateService indicesClusterStateService; + + private final DiscoveryNode node; + + private final MasterService masterService; + + private final AllocationService allocationService; + + private final NodeClient client; + + private final NodeEnvironment nodeEnv; + + private final DisruptableMockTransport mockTransport; + + private final ThreadPool threadPool; + + private final Supplier disruption; + + private Coordinator coordinator; + + TestClusterNode(DiscoveryNode node, Supplier disruption) throws IOException { + this.disruption = disruption; + this.node = node; + final Environment environment = createEnvironment(node.getName()); + masterService = new FakeThreadPoolMasterService(node.getName(), "test", deterministicTaskQueue::scheduleNow); + final Settings settings = environment.settings(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool = deterministicTaskQueue.getThreadPool(); + clusterService = new ClusterService(settings, clusterSettings, masterService, + new ClusterApplierService(node.getName(), settings, clusterSettings, threadPool) { + @Override + protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() { + return new MockSinglePrioritizingExecutor(node.getName(), deterministicTaskQueue); + } + }); + mockTransport = new DisruptableMockTransport(node, logger) { + @Override + protected ConnectionStatus getConnectionStatus(DiscoveryNode destination) { + return disruption.get().disrupt(node.getName(), destination.getName()) + ? ConnectionStatus.DISCONNECTED : ConnectionStatus.CONNECTED; + } + + @Override + protected Optional getDisruptableMockTransport(TransportAddress address) { + return testClusterNodes.nodes.values().stream().map(cn -> cn.mockTransport) + .filter(transport -> transport.getLocalNode().getAddress().equals(address)) + .findAny(); + } + + @Override + protected void execute(Runnable runnable) { + scheduleNow(CoordinatorTests.onNodeLog(getLocalNode(), runnable)); + } + + @Override + protected NamedWriteableRegistry writeableRegistry() { + return namedWriteableRegistry; + } + }; + transportService = mockTransport.createTransportService( + settings, deterministicTaskQueue.getThreadPool(runnable -> CoordinatorTests.onNodeLog(node, runnable)), + new TransportInterceptor() { + @Override + public TransportRequestHandler interceptHandler(String action, String executor, + boolean forceExecution, TransportRequestHandler actualHandler) { + // TODO: Remove this hack once recoveries are async and can be used in these tests + if (action.startsWith("internal:index/shard/recovery")) { + return (request, channel, task) -> scheduleSoon( + new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + channel.sendResponse(new TransportException(new IOException("failed to recover shard"))); + } + + @Override + public void onFailure(final Exception e) { + throw new AssertionError(e); + } + }); + } else { + return actualHandler; + } + } + }, + a -> node, null, emptySet() + ); + final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + repositoriesService = new RepositoriesService( + settings, clusterService, transportService, + Collections.singletonMap(FsRepository.TYPE, metaData -> { + final Repository repository = new FsRepository(metaData, environment, xContentRegistry()) { + @Override + protected void assertSnapshotOrGenericThread() { + // eliminate thread name check as we create repo in the test thread + } + }; + repository.start(); + return repository; + } + ), + emptyMap(), + threadPool + ); + snapshotsService = + new SnapshotsService(settings, clusterService, indexNameExpressionResolver, repositoriesService, threadPool); + nodeEnv = new NodeEnvironment(settings, environment); + final NamedXContentRegistry namedXContentRegistry = new NamedXContentRegistry(Collections.emptyList()); + final ScriptService scriptService = new ScriptService(settings, emptyMap(), emptyMap()); + client = new NodeClient(settings, threadPool); + allocationService = ESAllocationTestCase.createAllocationService(settings); + final IndexScopedSettings indexScopedSettings = + new IndexScopedSettings(settings, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); + indicesService = new IndicesService( + settings, + mock(PluginsService.class), + nodeEnv, + namedXContentRegistry, + new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), + emptyMap(), emptyMap(), emptyMap(), emptyMap()), + indexNameExpressionResolver, + new MapperRegistry(emptyMap(), emptyMap(), MapperPlugin.NOOP_FIELD_FILTER), + namedWriteableRegistry, + threadPool, + indexScopedSettings, + new NoneCircuitBreakerService(), + new BigArrays(new PageCacheRecycler(settings), null, "test"), + scriptService, + client, + new MetaStateService(nodeEnv, namedXContentRegistry), + Collections.emptyList(), + emptyMap() + ); + final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); + final ActionFilters actionFilters = new ActionFilters(emptySet()); + snapshotShardsService = new SnapshotShardsService( + settings, clusterService, snapshotsService, threadPool, + transportService, indicesService, actionFilters, indexNameExpressionResolver); + final ShardStateAction shardStateAction = new ShardStateAction( + clusterService, transportService, allocationService, + new RoutingService(clusterService, allocationService), + threadPool + ); + indicesClusterStateService = new IndicesClusterStateService( + settings, + indicesService, + clusterService, + threadPool, + new PeerRecoveryTargetService(threadPool, transportService, recoverySettings, clusterService), + shardStateAction, + new NodeMappingRefreshAction(transportService, new MetaDataMappingService(clusterService, indicesService)), + repositoriesService, + mock(SearchService.class), + new SyncedFlushService(indicesService, clusterService, transportService, indexNameExpressionResolver), + new PeerRecoverySourceService(transportService, indicesService, recoverySettings), + snapshotShardsService, + new PrimaryReplicaSyncer( + transportService, + new TransportResyncReplicationAction( + settings, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + actionFilters, + indexNameExpressionResolver)), + new GlobalCheckpointSyncAction( + settings, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + actionFilters, + indexNameExpressionResolver), + new RetentionLeaseSyncAction( + settings, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + actionFilters, + indexNameExpressionResolver)); + Map actions = new HashMap<>(); + actions.put(CreateIndexAction.INSTANCE, + new TransportCreateIndexAction( + transportService, clusterService, threadPool, + new MetaDataCreateIndexService(settings, clusterService, indicesService, + allocationService, new AliasValidator(), environment, indexScopedSettings, + threadPool, namedXContentRegistry, false), + actionFilters, indexNameExpressionResolver + )); + actions.put(PutRepositoryAction.INSTANCE, + new TransportPutRepositoryAction( + transportService, clusterService, repositoriesService, threadPool, + actionFilters, indexNameExpressionResolver + )); + actions.put(CreateSnapshotAction.INSTANCE, + new TransportCreateSnapshotAction( + transportService, clusterService, threadPool, + snapshotsService, actionFilters, indexNameExpressionResolver + )); + actions.put(ClusterRerouteAction.INSTANCE, + new TransportClusterRerouteAction(transportService, clusterService, threadPool, allocationService, + actionFilters, indexNameExpressionResolver)); + actions.put(ClusterStateAction.INSTANCE, + new TransportClusterStateAction(transportService, clusterService, threadPool, + actionFilters, indexNameExpressionResolver)); + actions.put(IndicesShardStoresAction.INSTANCE, + new TransportIndicesShardStoresAction( + transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + new TransportNodesListGatewayStartedShards(settings, + threadPool, clusterService, transportService, actionFilters, nodeEnv, indicesService, namedXContentRegistry)) + ); + actions.put(DeleteSnapshotAction.INSTANCE, + new TransportDeleteSnapshotAction( + transportService, clusterService, threadPool, + snapshotsService, actionFilters, indexNameExpressionResolver + )); + client.initialize(actions, () -> clusterService.localNode().getId(), transportService.getRemoteClusterService()); + } + + public void restart() { + testClusterNodes.disconnectNode(this); + final ClusterState oldState = this.clusterService.state(); + stop(); + testClusterNodes.nodes.remove(node.getName()); + scheduleSoon(() -> { + try { + final TestClusterNode restartedNode = new TestClusterNode( + new DiscoveryNode(node.getName(), node.getId(), node.getAddress(), emptyMap(), + node.getRoles(), Version.CURRENT), disruption); + testClusterNodes.nodes.put(node.getName(), restartedNode); + restartedNode.start(oldState); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + public void stop() { + testClusterNodes.disconnectNode(this); + indicesService.close(); + clusterService.close(); + indicesClusterStateService.close(); + if (coordinator != null) { + coordinator.close(); + } + nodeEnv.close(); + } + + public void start(ClusterState initialState) { + transportService.start(); + transportService.acceptIncomingRequests(); + snapshotsService.start(); + snapshotShardsService.start(); + final CoordinationState.PersistedState persistedState = + new InMemoryPersistedState(initialState.term(), stateForNode(initialState, node)); + coordinator = new Coordinator(node.getName(), clusterService.getSettings(), + clusterService.getClusterSettings(), transportService, namedWriteableRegistry, + allocationService, masterService, () -> persistedState, + hostsResolver -> testClusterNodes.nodes.values().stream().filter(n -> n.node.isMasterNode()) + .map(n -> n.node.getAddress()).collect(Collectors.toList()), + clusterService.getClusterApplierService(), Collections.emptyList(), random()); + masterService.setClusterStatePublisher(coordinator); + coordinator.start(); + masterService.start(); + clusterService.getClusterApplierService().setNodeConnectionsService( + new NodeConnectionsService(clusterService.getSettings(), threadPool, transportService) { + @Override + public void connectToNodes(DiscoveryNodes discoveryNodes) { + // override this method as it does blocking calls + boolean callSuper = true; + for (final DiscoveryNode node : discoveryNodes) { + try { + transportService.connectToNode(node); + } catch (Exception e) { + callSuper = false; + } + } + if (callSuper) { + super.connectToNodes(discoveryNodes); + } + } + }); + clusterService.getClusterApplierService().start(); + indicesService.start(); + indicesClusterStateService.start(); + coordinator.startInitialJoin(); + } + } + + private final class DisconnectedNodes extends NetworkDisruption.DisruptedLinks { + + /** + * Node names that are disconnected from all other nodes. + */ + private final Set disconnected = new HashSet<>(); + + @Override + public boolean disrupt(String node1, String node2) { + if (node1.equals(node2)) { + return false; + } + // Check if both nodes are still part of the cluster + if (testClusterNodes.nodes.containsKey(node1) == false + || testClusterNodes.nodes.containsKey(node2) == false) { + return true; + } + return disconnected.contains(node1) || disconnected.contains(node2); + } + + public void disconnect(String node) { + disconnected.add(node); + } + + public void clear() { + disconnected.clear(); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java deleted file mode 100644 index 8b750939238cb..0000000000000 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java +++ /dev/null @@ -1,619 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.snapshots; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; -import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryAction; -import org.elasticsearch.action.admin.cluster.repositories.put.TransportPutRepositoryAction; -import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction; -import org.elasticsearch.action.admin.cluster.snapshots.create.TransportCreateSnapshotAction; -import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction; -import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; -import org.elasticsearch.action.admin.cluster.snapshots.delete.TransportDeleteSnapshotAction; -import org.elasticsearch.action.admin.indices.create.CreateIndexAction; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; -import org.elasticsearch.action.resync.TransportResyncReplicationAction; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.action.support.TransportAction; -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.cluster.ClusterModule; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ESAllocationTestCase; -import org.elasticsearch.cluster.NodeConnectionsService; -import org.elasticsearch.cluster.SnapshotsInProgress; -import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction; -import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; -import org.elasticsearch.cluster.coordination.CoordinationState; -import org.elasticsearch.cluster.coordination.Coordinator; -import org.elasticsearch.cluster.coordination.CoordinatorTests; -import org.elasticsearch.cluster.coordination.DeterministicTaskQueue; -import org.elasticsearch.cluster.coordination.InMemoryPersistedState; -import org.elasticsearch.cluster.coordination.MockSinglePrioritizingExecutor; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; -import org.elasticsearch.cluster.metadata.AliasValidator; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; -import org.elasticsearch.cluster.metadata.MetaDataMappingService; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.RoutingService; -import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.cluster.service.ClusterApplierService; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.cluster.service.MasterService; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.IndexScopedSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.env.TestEnvironment; -import org.elasticsearch.gateway.MetaStateService; -import org.elasticsearch.index.analysis.AnalysisRegistry; -import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; -import org.elasticsearch.index.seqno.RetentionLeaseSyncAction; -import org.elasticsearch.index.shard.PrimaryReplicaSyncer; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.indices.cluster.FakeThreadPoolMasterService; -import org.elasticsearch.indices.cluster.IndicesClusterStateService; -import org.elasticsearch.indices.flush.SyncedFlushService; -import org.elasticsearch.indices.mapper.MapperRegistry; -import org.elasticsearch.indices.recovery.PeerRecoverySourceService; -import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; -import org.elasticsearch.indices.recovery.RecoverySettings; -import org.elasticsearch.plugins.MapperPlugin; -import org.elasticsearch.plugins.PluginsService; -import org.elasticsearch.repositories.RepositoriesService; -import org.elasticsearch.repositories.Repository; -import org.elasticsearch.repositories.fs.FsRepository; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.SearchService; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.disruption.DisruptableMockTransport; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import org.junit.After; -import org.junit.Before; - -import java.io.IOException; -import java.io.UncheckedIOException; -import java.nio.file.Path; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Supplier; -import java.util.stream.Collectors; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; -import static org.elasticsearch.env.Environment.PATH_HOME_SETTING; -import static org.elasticsearch.node.Node.NODE_NAME_SETTING; -import static org.elasticsearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.hasSize; -import static org.mockito.Mockito.mock; - -public class SnapshotsServiceTests extends ESTestCase { - - private DeterministicTaskQueue deterministicTaskQueue; - - private TestClusterNodes testClusterNodes; - - private Path tempDir; - - @Before - public void createServices() { - tempDir = createTempDir(); - deterministicTaskQueue = - new DeterministicTaskQueue(Settings.builder().put(NODE_NAME_SETTING.getKey(), "shared").build(), random()); - } - - @After - public void stopServices() { - testClusterNodes.nodes.values().forEach( - n -> { - n.indicesService.close(); - n.clusterService.close(); - n.indicesClusterStateService.close(); - n.nodeEnv.close(); - n.coordinator.close(); - } - ); - } - - public void testSuccessfulSnapshot() { - setupTestCluster(randomFrom(1, 3, 5), randomIntBetween(2, 10)); - - String repoName = "repo"; - String snapshotName = "snapshot"; - final String index = "test"; - - final int shards = randomIntBetween(1, 10); - - TestClusterNode masterNode = - testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state()); - final AtomicBoolean createdSnapshot = new AtomicBoolean(); - masterNode.client.admin().cluster().preparePutRepository(repoName) - .setType(FsRepository.TYPE).setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) - .execute( - assertNoFailureListener( - () -> masterNode.client.admin().indices().create( - new CreateIndexRequest(index).waitForActiveShards(ActiveShardCount.ALL).settings( - Settings.builder() - .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), shards) - .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)), - assertNoFailureListener( - () -> masterNode.client.admin().cluster().prepareCreateSnapshot(repoName, snapshotName) - .execute(assertNoFailureListener(() -> createdSnapshot.set(true))))))); - - deterministicTaskQueue.runAllRunnableTasks(); - - assertTrue(createdSnapshot.get()); - SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); - assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); - final Repository repository = masterNode.repositoriesService.repository(repoName); - Collection snapshotIds = repository.getRepositoryData().getSnapshotIds(); - assertThat(snapshotIds, hasSize(1)); - - final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotIds.iterator().next()); - assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); - assertThat(snapshotInfo.indices(), containsInAnyOrder(index)); - assertEquals(shards, snapshotInfo.successfulShards()); - assertEquals(0, snapshotInfo.failedShards()); - } - - public void testConcurrentSnapshotCreateAndDelete() { - setupTestCluster(randomFrom(1, 3, 5), randomIntBetween(2, 10)); - - String repoName = "repo"; - String snapshotName = "snapshot"; - final String index = "test"; - - final int shards = randomIntBetween(1, 10); - - TestClusterNode masterNode = - testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state()); - final AtomicBoolean createdSnapshot = new AtomicBoolean(); - masterNode.client.admin().cluster().preparePutRepository(repoName) - .setType(FsRepository.TYPE).setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) - .execute( - assertNoFailureListener( - () -> masterNode.client.admin().indices().create( - new CreateIndexRequest(index).waitForActiveShards(ActiveShardCount.ALL).settings( - Settings.builder() - .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), shards) - .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)), - assertNoFailureListener( - () -> masterNode.client.admin().cluster().prepareCreateSnapshot(repoName, snapshotName) - .execute(assertNoFailureListener( - () -> masterNode.client.admin().cluster().deleteSnapshot( - new DeleteSnapshotRequest(repoName, snapshotName), - assertNoFailureListener(() -> masterNode.client.admin().cluster() - .prepareCreateSnapshot(repoName, snapshotName).execute( - assertNoFailureListener(() -> createdSnapshot.set(true)) - ))))))))); - - deterministicTaskQueue.runAllRunnableTasks(); - - assertTrue(createdSnapshot.get()); - SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); - assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); - final Repository repository = masterNode.repositoriesService.repository(repoName); - Collection snapshotIds = repository.getRepositoryData().getSnapshotIds(); - assertThat(snapshotIds, hasSize(1)); - - final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotIds.iterator().next()); - assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); - assertThat(snapshotInfo.indices(), containsInAnyOrder(index)); - assertEquals(shards, snapshotInfo.successfulShards()); - assertEquals(0, snapshotInfo.failedShards()); - } - - private void startCluster() { - final ClusterState initialClusterState = - new ClusterState.Builder(ClusterName.DEFAULT).nodes(testClusterNodes.randomDiscoveryNodes()).build(); - testClusterNodes.nodes.values().forEach(testClusterNode -> testClusterNode.start(initialClusterState)); - - deterministicTaskQueue.advanceTime(); - deterministicTaskQueue.runAllRunnableTasks(); - - final VotingConfiguration votingConfiguration = new VotingConfiguration(testClusterNodes.nodes.values().stream().map(n -> n.node) - .filter(DiscoveryNode::isMasterNode).map(DiscoveryNode::getId).collect(Collectors.toSet())); - testClusterNodes.nodes.values().stream().filter(n -> n.node.isMasterNode()).forEach( - testClusterNode -> testClusterNode.coordinator.setInitialConfiguration(votingConfiguration)); - - runUntil( - () -> { - List masterNodeIds = testClusterNodes.nodes.values().stream() - .map(node -> node.clusterService.state().nodes().getMasterNodeId()) - .distinct().collect(Collectors.toList()); - return masterNodeIds.size() == 1 && masterNodeIds.contains(null) == false; - }, - TimeUnit.SECONDS.toMillis(30L) - ); - } - - private void runUntil(Supplier fulfilled, long timeout) { - final long start = deterministicTaskQueue.getCurrentTimeMillis(); - while (timeout > deterministicTaskQueue.getCurrentTimeMillis() - start) { - deterministicTaskQueue.runAllRunnableTasks(); - if (fulfilled.get()) { - return; - } - deterministicTaskQueue.advanceTime(); - } - fail("Condition wasn't fulfilled."); - } - - private void setupTestCluster(int masterNodes, int dataNodes) { - testClusterNodes = new TestClusterNodes(masterNodes, dataNodes); - startCluster(); - } - - private static ActionListener assertNoFailureListener(Runnable r) { - return new ActionListener() { - @Override - public void onResponse(final T t) { - r.run(); - } - - @Override - public void onFailure(final Exception e) { - throw new AssertionError(e); - } - }; - } - - /** - * Create a {@link Environment} with random path.home and path.repo - **/ - private Environment createEnvironment(String nodeName) { - return TestEnvironment.newEnvironment(Settings.builder() - .put(NODE_NAME_SETTING.getKey(), nodeName) - .put(PATH_HOME_SETTING.getKey(), tempDir.resolve(nodeName).toAbsolutePath()) - .put(Environment.PATH_REPO_SETTING.getKey(), tempDir.resolve("repo").toAbsolutePath()) - .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), - ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.get(Settings.EMPTY)) - .build()); - } - - private TestClusterNode newMasterNode(String nodeName) throws IOException { - return newNode(nodeName, DiscoveryNode.Role.MASTER); - } - - private TestClusterNode newDataNode(String nodeName) throws IOException { - return newNode(nodeName, DiscoveryNode.Role.DATA); - } - - private TestClusterNode newNode(String nodeName, DiscoveryNode.Role role) throws IOException { - return new TestClusterNode( - new DiscoveryNode(nodeName, randomAlphaOfLength(10), buildNewFakeTransportAddress(), emptyMap(), - Collections.singleton(role), Version.CURRENT) - ); - } - - private static ClusterState stateForNode(ClusterState state, DiscoveryNode node) { - return ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(node.getId())).build(); - } - - private final class TestClusterNodes { - - // LinkedHashMap so we have deterministic ordering when iterating over the map in tests - private final Map nodes = new LinkedHashMap<>(); - - TestClusterNodes(int masterNodes, int dataNodes) { - for (int i = 0; i < masterNodes; ++i) { - nodes.computeIfAbsent("node" + i, nodeName -> { - try { - return SnapshotsServiceTests.this.newMasterNode(nodeName); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - }); - } - for (int i = 0; i < dataNodes; ++i) { - nodes.computeIfAbsent("data-node" + i, nodeName -> { - try { - return SnapshotsServiceTests.this.newDataNode(nodeName); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - }); - } - } - - /** - * Builds a {@link DiscoveryNodes} instance that has one master eligible node set as its master - * by random. - * @return DiscoveryNodes with set master node - */ - public DiscoveryNodes randomDiscoveryNodes() { - DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); - nodes.values().forEach(node -> builder.add(node.node)); - return builder.build(); - } - - /** - * Returns the {@link TestClusterNode} for the master node in the given {@link ClusterState}. - * @param state ClusterState - * @return Master Node - */ - public TestClusterNode currentMaster(ClusterState state) { - TestClusterNode master = nodes.get(state.nodes().getMasterNode().getName()); - assertNotNull(master); - assertTrue(master.node.isMasterNode()); - return master; - } - } - - private final class TestClusterNode { - - private final Logger logger = LogManager.getLogger(TestClusterNode.class); - - private final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); - - private final TransportService transportService; - - private final ClusterService clusterService; - - private final RepositoriesService repositoriesService; - - private final SnapshotsService snapshotsService; - - private final SnapshotShardsService snapshotShardsService; - - private final IndicesService indicesService; - - private final IndicesClusterStateService indicesClusterStateService; - - private final DiscoveryNode node; - - private final MasterService masterService; - - private final AllocationService allocationService; - - private final NodeClient client; - - private final NodeEnvironment nodeEnv; - - private final DisruptableMockTransport mockTransport; - - private final ThreadPool threadPool; - - private Coordinator coordinator; - - TestClusterNode(DiscoveryNode node) throws IOException { - this.node = node; - final Environment environment = createEnvironment(node.getName()); - masterService = new FakeThreadPoolMasterService(node.getName(), "test", deterministicTaskQueue::scheduleNow); - final Settings settings = environment.settings(); - final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - threadPool = deterministicTaskQueue.getThreadPool(); - clusterService = new ClusterService(settings, clusterSettings, masterService, - new ClusterApplierService(node.getName(), settings, clusterSettings, threadPool) { - @Override - protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() { - return new MockSinglePrioritizingExecutor(node.getName(), deterministicTaskQueue); - } - }); - mockTransport = new DisruptableMockTransport(node, logger) { - @Override - protected ConnectionStatus getConnectionStatus(DiscoveryNode destination) { - return ConnectionStatus.CONNECTED; - } - - @Override - protected Optional getDisruptableMockTransport(TransportAddress address) { - return testClusterNodes.nodes.values().stream().map(cn -> cn.mockTransport) - .filter(transport -> transport.getLocalNode().getAddress().equals(address)) - .findAny(); - } - - @Override - protected void execute(Runnable runnable) { - deterministicTaskQueue.scheduleNow(CoordinatorTests.onNodeLog(getLocalNode(), runnable)); - } - }; - transportService = mockTransport.createTransportService( - settings, deterministicTaskQueue.getThreadPool(runnable -> CoordinatorTests.onNodeLog(node, runnable)), - NOOP_TRANSPORT_INTERCEPTOR, - a -> node, null, emptySet() - ); - final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); - repositoriesService = new RepositoriesService( - settings, clusterService, transportService, - Collections.singletonMap(FsRepository.TYPE, metaData -> { - final Repository repository = new FsRepository(metaData, environment, xContentRegistry()) { - @Override - protected void assertSnapshotOrGenericThread() { - // eliminate thread name check as we create repo in the test thread - } - }; - repository.start(); - return repository; - } - ), - emptyMap(), - threadPool - ); - snapshotsService = - new SnapshotsService(settings, clusterService, indexNameExpressionResolver, repositoriesService, threadPool); - nodeEnv = new NodeEnvironment(settings, environment); - final NamedXContentRegistry namedXContentRegistry = new NamedXContentRegistry(Collections.emptyList()); - final ScriptService scriptService = new ScriptService(settings, emptyMap(), emptyMap()); - client = new NodeClient(settings, threadPool); - allocationService = ESAllocationTestCase.createAllocationService(settings); - final IndexScopedSettings indexScopedSettings = - new IndexScopedSettings(settings, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); - indicesService = new IndicesService( - settings, - mock(PluginsService.class), - nodeEnv, - namedXContentRegistry, - new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), - emptyMap(), emptyMap(), emptyMap(), emptyMap()), - indexNameExpressionResolver, - new MapperRegistry(emptyMap(), emptyMap(), MapperPlugin.NOOP_FIELD_FILTER), - namedWriteableRegistry, - threadPool, - indexScopedSettings, - new NoneCircuitBreakerService(), - new BigArrays(new PageCacheRecycler(settings), null, "test"), - scriptService, - client, - new MetaStateService(nodeEnv, namedXContentRegistry), - Collections.emptyList(), - emptyMap() - ); - final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); - final ActionFilters actionFilters = new ActionFilters(emptySet()); - snapshotShardsService = new SnapshotShardsService( - settings, clusterService, snapshotsService, threadPool, - transportService, indicesService, actionFilters, indexNameExpressionResolver); - final ShardStateAction shardStateAction = new ShardStateAction( - clusterService, transportService, allocationService, - new RoutingService(clusterService, allocationService), - deterministicTaskQueue.getThreadPool() - ); - indicesClusterStateService = new IndicesClusterStateService( - settings, - indicesService, - clusterService, - threadPool, - new PeerRecoveryTargetService( - deterministicTaskQueue.getThreadPool(), transportService, recoverySettings, clusterService), - shardStateAction, - new NodeMappingRefreshAction(transportService, new MetaDataMappingService(clusterService, indicesService)), - repositoriesService, - mock(SearchService.class), - new SyncedFlushService(indicesService, clusterService, transportService, indexNameExpressionResolver), - new PeerRecoverySourceService(transportService, indicesService, recoverySettings), - snapshotShardsService, - new PrimaryReplicaSyncer( - transportService, - new TransportResyncReplicationAction( - settings, - transportService, - clusterService, - indicesService, - threadPool, - shardStateAction, - actionFilters, - indexNameExpressionResolver)), - new GlobalCheckpointSyncAction( - settings, - transportService, - clusterService, - indicesService, - threadPool, - shardStateAction, - actionFilters, - indexNameExpressionResolver), - new RetentionLeaseSyncAction( - settings, - transportService, - clusterService, - indicesService, - threadPool, - shardStateAction, - actionFilters, - indexNameExpressionResolver)); - Map actions = new HashMap<>(); - actions.put(CreateIndexAction.INSTANCE, - new TransportCreateIndexAction( - transportService, clusterService, threadPool, - new MetaDataCreateIndexService(settings, clusterService, indicesService, - allocationService, new AliasValidator(), environment, indexScopedSettings, - threadPool, namedXContentRegistry, false), - actionFilters, indexNameExpressionResolver - )); - actions.put(PutRepositoryAction.INSTANCE, - new TransportPutRepositoryAction( - transportService, clusterService, repositoriesService, threadPool, - actionFilters, indexNameExpressionResolver - )); - actions.put(CreateSnapshotAction.INSTANCE, - new TransportCreateSnapshotAction( - transportService, clusterService, threadPool, - snapshotsService, actionFilters, indexNameExpressionResolver - )); - actions.put(DeleteSnapshotAction.INSTANCE, - new TransportDeleteSnapshotAction( - transportService, clusterService, threadPool, - snapshotsService, actionFilters, indexNameExpressionResolver - )); - client.initialize(actions, () -> clusterService.localNode().getId(), transportService.getRemoteClusterService()); - } - - public void start(ClusterState initialState) { - transportService.start(); - transportService.acceptIncomingRequests(); - snapshotsService.start(); - snapshotShardsService.start(); - final CoordinationState.PersistedState persistedState = - new InMemoryPersistedState(0L, stateForNode(initialState, node)); - coordinator = new Coordinator(node.getName(), clusterService.getSettings(), - clusterService.getClusterSettings(), transportService, namedWriteableRegistry, - allocationService, masterService, () -> persistedState, - hostsResolver -> testClusterNodes.nodes.values().stream().filter(n -> n.node.isMasterNode()) - .map(n -> n.node.getAddress()).collect(Collectors.toList()), - clusterService.getClusterApplierService(), Collections.emptyList(), random()); - masterService.setClusterStatePublisher(coordinator); - coordinator.start(); - masterService.start(); - clusterService.getClusterApplierService().setNodeConnectionsService( - new NodeConnectionsService(clusterService.getSettings(), threadPool, transportService) { - @Override - public void connectToNodes(DiscoveryNodes discoveryNodes) { - // override this method as it does blocking calls - for (final DiscoveryNode node : discoveryNodes) { - transportService.connectToNode(node); - } - super.connectToNodes(discoveryNodes); - } - }); - clusterService.getClusterApplierService().start(); - indicesService.start(); - indicesClusterStateService.start(); - coordinator.startInitialJoin(); - } - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/DisruptableMockTransport.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/DisruptableMockTransport.java index 2a1101c6d7986..d750a8256b8bc 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/DisruptableMockTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/DisruptableMockTransport.java @@ -21,10 +21,8 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -72,7 +70,6 @@ protected final void execute(String action, Runnable runnable) { if (action.equals(HANDSHAKE_ACTION_NAME)) { runnable.run(); } else { - execute(runnable); } } @@ -254,10 +251,6 @@ public String toString() { } } - private NamedWriteableRegistry writeableRegistry() { - return new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); - } - public enum ConnectionStatus { CONNECTED, DISCONNECTED, // network requests to or from this node throw a ConnectTransportException diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java index ddfcc29c750ce..a6dbd1561936e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java @@ -20,6 +20,7 @@ package org.elasticsearch.test.transport; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Randomness; @@ -29,6 +30,8 @@ import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -96,7 +99,8 @@ public void handleResponse(final long reque final Response deliveredResponse; try (BytesStreamOutput output = new BytesStreamOutput()) { response.writeTo(output); - deliveredResponse = transportResponseHandler.read(output.bytes().streamInput()); + deliveredResponse = transportResponseHandler.read( + new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writeableRegistry())); } catch (IOException | UnsupportedOperationException e) { throw new AssertionError("failed to serialize/deserialize response " + response, e); } @@ -275,4 +279,8 @@ public boolean removeMessageListener(TransportMessageListener listener) { } return false; } + + protected NamedWriteableRegistry writeableRegistry() { + return new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); + } } From 6fcbd07420f1f779a58fa5d49b834964896a2ac1 Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Fri, 1 Feb 2019 17:59:13 +1100 Subject: [PATCH 08/54] Remove heuristics that enable security on trial licenses (#38075) In 6.3 trial licenses were changed to default to security disabled, and ee added some heuristics to detect when security should be automatically be enabled if `xpack.security.enabled` was not set. This change removes those heuristics, and requires that security be explicitly enabled (via the `xpack.security.enabled` setting) for trial licenses. Relates: #38009 --- .../migration/migrate_7_0/settings.asciidoc | 16 ++++++ .../license/XPackLicenseState.java | 44 +++++------------ .../license/XPackLicenseStateTests.java | 49 +++++-------------- 3 files changed, 40 insertions(+), 69 deletions(-) diff --git a/docs/reference/migration/migrate_7_0/settings.asciidoc b/docs/reference/migration/migrate_7_0/settings.asciidoc index c6874856011ce..0b18c267748b5 100644 --- a/docs/reference/migration/migrate_7_0/settings.asciidoc +++ b/docs/reference/migration/migrate_7_0/settings.asciidoc @@ -145,6 +145,22 @@ You can enable TLS v1.0 by configuring the relevant `ssl.supported_protocols` se xpack.security.http.ssl.supported_protocols: [ "TLSv1.2", "TLSv1.1", "TLSv1" ] -------------------------------------------------- +[float] +[[trial-explicit-security]] +==== Security on Trial Licenses + +On trial licenses, `xpack.security.enabled` defaults to `false`. + +In prior versions, a trial license would automatically enable security if either + +* `xpack.security.transport.enabled` was `true`; _or_ +* the trial license was generated on a version of X-Pack from 6.2 or earlier. + +This behaviour has been now removed, so security is only enabled if: + +* `xpack.security.enabled` is `true`; _or_ +* `xpack.security.enabled` is not set, and a gold or platinum license is installed. + [float] [[watcher-notifications-account-settings]] ==== Watcher notifications account settings diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 0b9640839202b..84dc4c9a5887b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.license; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -271,17 +270,11 @@ private static class Status { private final boolean isSecurityExplicitlyEnabled; private Status status = new Status(OperationMode.TRIAL, true); - private boolean isSecurityEnabledByTrialVersion; public XPackLicenseState(Settings settings) { this.listeners = new CopyOnWriteArrayList<>(); this.isSecurityEnabled = XPackSettings.SECURITY_ENABLED.get(settings); - // 6.0+ requires TLS for production licenses, so if TLS is enabled and security is enabled - // we can interpret this as an explicit enabling of security if the security enabled - // setting is not explicitly set - this.isSecurityExplicitlyEnabled = isSecurityEnabled && - (settings.hasValue(XPackSettings.SECURITY_ENABLED.getKey()) || XPackSettings.TRANSPORT_SSL_ENABLED.get(settings)); - this.isSecurityEnabledByTrialVersion = false; + this.isSecurityExplicitlyEnabled = isSecurityEnabled && settings.hasValue(XPackSettings.SECURITY_ENABLED.getKey()); } private XPackLicenseState(XPackLicenseState xPackLicenseState) { @@ -289,7 +282,6 @@ private XPackLicenseState(XPackLicenseState xPackLicenseState) { this.isSecurityEnabled = xPackLicenseState.isSecurityEnabled; this.isSecurityExplicitlyEnabled = xPackLicenseState.isSecurityExplicitlyEnabled; this.status = xPackLicenseState.status; - this.isSecurityEnabledByTrialVersion = xPackLicenseState.isSecurityEnabledByTrialVersion; } /** @@ -304,16 +296,6 @@ private XPackLicenseState(XPackLicenseState xPackLicenseState) { void update(OperationMode mode, boolean active, @Nullable Version mostRecentTrialVersion) { synchronized (this) { status = new Status(mode, active); - if (isSecurityEnabled == true && isSecurityExplicitlyEnabled == false && mode == OperationMode.TRIAL - && isSecurityEnabledByTrialVersion == false) { - // Before 6.3, Trial licenses would default having security enabled. - // If this license was generated before that version, then treat it as if security is explicitly enabled - if (mostRecentTrialVersion == null || mostRecentTrialVersion.before(Version.V_6_3_0)) { - LogManager.getLogger(getClass()).info("Automatically enabling security for older trial license ({})", - mostRecentTrialVersion == null ? "[pre 6.1.0]" : mostRecentTrialVersion.toString()); - isSecurityEnabledByTrialVersion = true; - } - } } listeners.forEach(LicenseStateListener::licenseStateChanged); } @@ -345,7 +327,7 @@ public synchronized boolean isActive() { public synchronized boolean isAuthAllowed() { OperationMode mode = status.mode; final boolean isSecurityCurrentlyEnabled = - isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabled); return isSecurityCurrentlyEnabled && (mode == OperationMode.STANDARD || mode == OperationMode.GOLD || mode == OperationMode.PLATINUM || mode == OperationMode.TRIAL); } @@ -356,7 +338,7 @@ public synchronized boolean isAuthAllowed() { public synchronized boolean isIpFilteringAllowed() { OperationMode mode = status.mode; final boolean isSecurityCurrentlyEnabled = - isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabled); return isSecurityCurrentlyEnabled && (mode == OperationMode.GOLD || mode == OperationMode.PLATINUM || mode == OperationMode.TRIAL); } @@ -366,7 +348,7 @@ public synchronized boolean isIpFilteringAllowed() { public synchronized boolean isAuditingAllowed() { OperationMode mode = status.mode; final boolean isSecurityCurrentlyEnabled = - isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabled); return isSecurityCurrentlyEnabled && (mode == OperationMode.GOLD || mode == OperationMode.PLATINUM || mode == OperationMode.TRIAL); } @@ -395,7 +377,7 @@ public synchronized boolean isStatsAndHealthAllowed() { public synchronized boolean isDocumentAndFieldLevelSecurityAllowed() { OperationMode mode = status.mode; final boolean isSecurityCurrentlyEnabled = - isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabled); return isSecurityCurrentlyEnabled && (mode == OperationMode.TRIAL || mode == OperationMode.PLATINUM); } @@ -412,7 +394,7 @@ public enum AllowedRealmType { */ public synchronized AllowedRealmType allowedRealmType() { final boolean isSecurityCurrentlyEnabled = - isSecurityEnabled(status.mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + isSecurityEnabled(status.mode, isSecurityExplicitlyEnabled, isSecurityEnabled); if (isSecurityCurrentlyEnabled) { switch (status.mode) { case PLATINUM: @@ -435,7 +417,7 @@ public synchronized AllowedRealmType allowedRealmType() { */ public synchronized boolean isCustomRoleProvidersAllowed() { final boolean isSecurityCurrentlyEnabled = - isSecurityEnabled(status.mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + isSecurityEnabled(status.mode, isSecurityExplicitlyEnabled, isSecurityEnabled); return isSecurityCurrentlyEnabled && (status.mode == OperationMode.PLATINUM || status.mode == OperationMode.TRIAL) && status.active; } @@ -446,7 +428,7 @@ public synchronized boolean isCustomRoleProvidersAllowed() { */ public synchronized boolean isAuthorizationRealmAllowed() { final boolean isSecurityCurrentlyEnabled = - isSecurityEnabled(status.mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + isSecurityEnabled(status.mode, isSecurityExplicitlyEnabled, isSecurityEnabled); return isSecurityCurrentlyEnabled && (status.mode == OperationMode.PLATINUM || status.mode == OperationMode.TRIAL) && status.active; } @@ -676,19 +658,17 @@ public synchronized boolean isSecurityAvailable() { * @return true if security has been disabled by a trial license which is the case of the * default distribution post 6.3.0. The conditions necessary for this are: *
    - *
  • A trial license generated in 6.3.0+
  • + *
  • A trial license
  • *
  • xpack.security.enabled not specified as a setting
  • *
*/ public synchronized boolean isSecurityDisabledByTrialLicense() { - return status.mode == OperationMode.TRIAL && isSecurityEnabled - && isSecurityExplicitlyEnabled == false - && isSecurityEnabledByTrialVersion == false; + return status.mode == OperationMode.TRIAL && isSecurityEnabled && isSecurityExplicitlyEnabled == false; } private static boolean isSecurityEnabled(final OperationMode mode, final boolean isSecurityExplicitlyEnabled, - final boolean isSecurityEnabledByTrialVersion, final boolean isSecurityEnabled) { - return mode == OperationMode.TRIAL ? (isSecurityExplicitlyEnabled || isSecurityEnabledByTrialVersion) : isSecurityEnabled; + final boolean isSecurityEnabled) { + return mode == OperationMode.TRIAL ? isSecurityExplicitlyEnabled : isSecurityEnabled; } /** diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java index 76b735dc78a38..bbd5d950c8b9b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -81,24 +81,15 @@ public void testSecurityDefaults() { assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.ALL)); assertThat(licenseState.isCustomRoleProvidersAllowed(), is(true)); + licenseState = new XPackLicenseState(Settings.EMPTY); + assertSecurityNotAllowed(licenseState); + } + + public void testTransportSslDoesNotAutomaticallyEnableSecurityOnTrialLicense() { + final XPackLicenseState licenseState; licenseState = new XPackLicenseState(Settings.builder().put(XPackSettings.TRANSPORT_SSL_ENABLED.getKey(), true).build()); - assertThat(licenseState.isAuthAllowed(), is(true)); - assertThat(licenseState.isIpFilteringAllowed(), is(true)); - assertThat(licenseState.isAuditingAllowed(), is(true)); - assertThat(licenseState.isStatsAndHealthAllowed(), is(true)); - assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(true)); - assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.ALL)); - assertThat(licenseState.isCustomRoleProvidersAllowed(), is(true)); - - licenseState = new XPackLicenseState(Settings.EMPTY); - assertThat(licenseState.isAuthAllowed(), is(false)); - assertThat(licenseState.isIpFilteringAllowed(), is(false)); - assertThat(licenseState.isAuditingAllowed(), is(false)); - assertThat(licenseState.isStatsAndHealthAllowed(), is(true)); - assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(false)); - assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.NONE)); - assertThat(licenseState.isCustomRoleProvidersAllowed(), is(false)); + assertSecurityNotAllowed(licenseState); } public void testSecurityBasic() { @@ -106,13 +97,7 @@ public void testSecurityBasic() { Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build())); licenseState.update(BASIC, true, null); - assertThat(licenseState.isAuthAllowed(), is(false)); - assertThat(licenseState.isIpFilteringAllowed(), is(false)); - assertThat(licenseState.isAuditingAllowed(), is(false)); - assertThat(licenseState.isStatsAndHealthAllowed(), is(true)); - assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(false)); - assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.NONE)); - assertThat(licenseState.isCustomRoleProvidersAllowed(), is(false)); + assertSecurityNotAllowed(licenseState); } public void testSecurityBasicExpired() { @@ -218,6 +203,10 @@ public void testNewTrialDefaultsSecurityOff() { licenseState.update(TRIAL, true, VersionUtils.randomVersionBetween(random(), Version.V_6_3_0, Version.CURRENT)); assertThat(licenseState.isSecurityDisabledByTrialLicense(), is(true)); + assertSecurityNotAllowed(licenseState); + } + + private void assertSecurityNotAllowed(XPackLicenseState licenseState) { assertThat(licenseState.isAuthAllowed(), is(false)); assertThat(licenseState.isIpFilteringAllowed(), is(false)); assertThat(licenseState.isAuditingAllowed(), is(false)); @@ -227,20 +216,6 @@ public void testNewTrialDefaultsSecurityOff() { assertThat(licenseState.isCustomRoleProvidersAllowed(), is(false)); } - public void testOldTrialDefaultsSecurityOn() { - XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); - licenseState.update(TRIAL, true, rarely() ? null : VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_2_4)); - - assertThat(licenseState.isSecurityDisabledByTrialLicense(), is(false)); - assertThat(licenseState.isAuthAllowed(), is(true)); - assertThat(licenseState.isIpFilteringAllowed(), is(true)); - assertThat(licenseState.isAuditingAllowed(), is(true)); - assertThat(licenseState.isStatsAndHealthAllowed(), is(true)); - assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(true)); - assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.ALL)); - assertThat(licenseState.isCustomRoleProvidersAllowed(), is(true)); - } - public void testSecurityAckBasicToNotGoldOrStandard() { OperationMode toMode = randomFrom(OperationMode.values(), mode -> mode != GOLD && mode != STANDARD); assertAckMesssages(XPackField.SECURITY, BASIC, toMode, 0); From 275857857099f711276e4399f276cf1e95912613 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Fri, 1 Feb 2019 08:12:12 +0100 Subject: [PATCH 09/54] Trim the JSON source in indexing slow logs (#38081) The '{' as a first character in log line is causing problems for beats when parsing plaintext logs. This can happen if the submitted document has an additional '\n' at the beginning and we are not reformatting. Trimming the source part of a SlogLog solves that and keeps the logs readable. closes #38080 --- .../elasticsearch/index/IndexingSlowLog.java | 2 +- .../index/IndexingSlowLogTests.java | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index e69cfd2c7af0b..b4b471e220a77 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -194,7 +194,7 @@ public String toString() { } try { String source = XContentHelper.convertToJson(doc.source(), reformat, doc.getXContentType()); - sb.append(", source[").append(Strings.cleanTruncate(source, maxSourceCharsToLog)).append("]"); + sb.append(", source[").append(Strings.cleanTruncate(source, maxSourceCharsToLog).trim()).append("]"); } catch (IOException e) { sb.append(", source[_failed_to_convert_[").append(e.getMessage()).append("]]"); /* diff --git a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java index 6bb799ac9ebb0..72a1cb4a87d7f 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java @@ -38,6 +38,7 @@ import java.io.UncheckedIOException; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; @@ -127,6 +128,22 @@ public void testReformatSetting() { assertTrue(log.isReformat()); } + public void testReformatIsFalseAndSourceIsTrim() { + String json = "\n\n{ \"fieldName\": 123 } \n "; + BytesReference source = new BytesArray(json); + ParsedDocument pd = new ParsedDocument(new NumericDocValuesField("version", 1), + SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id", + "test", null, null, source, XContentType.JSON, null); + Index index = new Index("foo", "123"); + // Turning off reformatting so the document is in logs as provided + SlowLogParsedDocumentPrinter p = new SlowLogParsedDocumentPrinter(index, pd, 10, false, 1000); + String logLine = p.toString(); + + //expect the new lines and white characters to be trimmed + assertThat(logLine, containsString("source[{")); + assertThat(logLine.split("\n").length, equalTo(1)); + } + public void testLevelSetting() { SlowLogLevel level = randomFrom(SlowLogLevel.values()); IndexMetaData metaData = newIndexMeta("index", Settings.builder() From d83c748417da334010d0659ed2f5b2c9052fb7d7 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 1 Feb 2019 08:35:29 +0100 Subject: [PATCH 10/54] Fix test bug in DynamicMappingsIT. (#37906) Closes #37898 --- .../elasticsearch/index/mapper/DynamicMappingIT.java | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index 5ec63681fe690..e1f235c19c662 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; +import org.hamcrest.Matchers; import java.io.IOException; import java.util.Collection; @@ -41,7 +42,6 @@ protected Collection> nodePlugins() { return Collections.singleton(InternalSettingsPlugin.class); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37898") public void testConflictingDynamicMappings() { // we don't use indexRandom because the order of requests is important here createIndex("index"); @@ -50,7 +50,15 @@ public void testConflictingDynamicMappings() { client().prepareIndex("index", "type", "2").setSource("foo", "bar").get(); fail("Indexing request should have failed!"); } catch (MapperParsingException e) { - // expected + // general case, the parsing code complains that it can't parse "bar" as a "long" + assertThat(e.getMessage(), + Matchers.containsString("failed to parse field [foo] of type [long]")); + } catch (IllegalArgumentException e) { + // rare case: the node that processes the index request doesn't have the mappings + // yet and sends a mapping update to the master node to map "bar" as "text". This + // fails as it had been already mapped as a long by the previous index request. + assertThat(e.getMessage(), + Matchers.containsString("mapper [foo] of different type, current_type [long], merged_type [text]")); } } From 859e2f5bc8739e3a3afa8e2f94c78906b5f5a4d0 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Fri, 1 Feb 2019 08:57:26 +0100 Subject: [PATCH 11/54] Adapt timeouts in UpdateMappingIntegrationIT Relates to #37263 and possibly #36916 --- .../mapping/UpdateMappingIntegrationIT.java | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index 7ac2ff659dfa9..fab952b658144 100644 --- a/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -25,10 +25,12 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.mapper.MapperService; @@ -72,15 +74,19 @@ public void testDynamicUpdates() throws Exception { .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), Long.MAX_VALUE) ).execute().actionGet(); client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings( + Settings.builder().put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMinutes(5))) + .get(); - int recCount = randomIntBetween(200, 600); + int recCount = randomIntBetween(20, 200); List indexRequests = new ArrayList<>(); for (int rec = 0; rec < recCount; rec++) { String type = "type"; String fieldName = "field_" + type + "_" + rec; - indexRequests.add(client().prepareIndex("test", type, Integer.toString(rec)).setSource(fieldName, "some_value")); + indexRequests.add(client().prepareIndex("test", type, Integer.toString(rec)) + .setTimeout(TimeValue.timeValueMinutes(5)).setSource(fieldName, "some_value")); } - indexRandom(true, indexRequests); + indexRandom(true, false, indexRequests); logger.info("checking all the documents are there"); RefreshResponse refreshResponse = client().admin().indices().prepareRefresh().execute().actionGet(); @@ -95,6 +101,9 @@ public void testDynamicUpdates() throws Exception { String fieldName = "field_" + type + "_" + rec; assertConcreteMappingsOnAll("test", type, fieldName); } + + client().admin().cluster().prepareUpdateSettings().setTransientSettings( + Settings.builder().putNull(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey())).get(); } public void testUpdateMappingWithoutType() { @@ -224,7 +233,7 @@ public void testUpdateMappingConcurrently() throws Throwable { JsonXContent.contentBuilder().startObject().startObject(typeName) .startObject("properties").startObject(fieldName).field("type", "text").endObject().endObject() .endObject().endObject() - ).get(); + ).setMasterNodeTimeout(TimeValue.timeValueMinutes(5)).get(); assertThat(response.isAcknowledged(), equalTo(true)); GetMappingsResponse getMappingResponse = client2.admin().indices().prepareGetMappings(indexName).get(); From c02cd3e2fdc703ade351f43f5e733279b324e9fb Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Fri, 1 Feb 2019 09:03:48 +0100 Subject: [PATCH 12/54] Fix java time epoch date formatters (#37829) The self written epoch date formatters were not properly able to format an Instant to a string due to a misconfiguration. This fix also removes a until now existing runtime behaviour under java 8 regarding the names of the aggregation buckets, which are now the same as before and have been under java 11. --- .../elasticsearch/common/time/EpochTime.java | 51 +++++-------------- .../common/time/DateFormattersTests.java | 20 ++++++++ .../aggregations/bucket/DateRangeIT.java | 28 +++------- 3 files changed, 40 insertions(+), 59 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/time/EpochTime.java b/server/src/main/java/org/elasticsearch/common/time/EpochTime.java index c824a7c7e7c35..22b29bd0edf45 100644 --- a/server/src/main/java/org/elasticsearch/common/time/EpochTime.java +++ b/server/src/main/java/org/elasticsearch/common/time/EpochTime.java @@ -19,8 +19,6 @@ package org.elasticsearch.common.time; -import org.elasticsearch.bootstrap.JavaVersion; - import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; import java.time.format.ResolverStyle; @@ -72,7 +70,7 @@ public TemporalAccessor resolve(Map fieldValues, private static final EpochField NANOS_OF_SECOND = new EpochField(ChronoUnit.NANOS, ChronoUnit.SECONDS, ValueRange.of(0, 999_999_999)) { @Override public boolean isSupportedBy(TemporalAccessor temporal) { - return temporal.isSupported(ChronoField.NANO_OF_SECOND) && temporal.getLong(ChronoField.NANO_OF_SECOND) != 0; + return temporal.isSupported(ChronoField.NANO_OF_SECOND); } @Override public long getFrom(TemporalAccessor temporal) { @@ -117,32 +115,30 @@ public boolean isSupportedBy(TemporalAccessor temporal) { } @Override public long getFrom(TemporalAccessor temporal) { - return temporal.getLong(ChronoField.NANO_OF_SECOND); + return temporal.getLong(ChronoField.NANO_OF_SECOND) % 1_000_000; } }; // this supports seconds without any fraction private static final DateTimeFormatter SECONDS_FORMATTER1 = new DateTimeFormatterBuilder() .appendValue(SECONDS, 1, 19, SignStyle.NORMAL) + .optionalStart() // optional is used so isSupported will be called when printing + .appendFraction(NANOS_OF_SECOND, 0, 9, true) + .optionalEnd() .toFormatter(Locale.ROOT); // this supports seconds ending in dot private static final DateTimeFormatter SECONDS_FORMATTER2 = new DateTimeFormatterBuilder() - .append(SECONDS_FORMATTER1) + .appendValue(SECONDS, 1, 19, SignStyle.NORMAL) .appendLiteral('.') .toFormatter(Locale.ROOT); - // this supports seconds with a fraction and is also used for printing - private static final DateTimeFormatter SECONDS_FORMATTER3 = new DateTimeFormatterBuilder() - .append(SECONDS_FORMATTER1) - .optionalStart() // optional is used so isSupported will be called when printing - .appendFraction(NANOS_OF_SECOND, 1, 9, true) - .optionalEnd() - .toFormatter(Locale.ROOT); - // this supports milliseconds without any fraction private static final DateTimeFormatter MILLISECONDS_FORMATTER1 = new DateTimeFormatterBuilder() .appendValue(MILLIS, 1, 19, SignStyle.NORMAL) + .optionalStart() + .appendFraction(NANOS_OF_MILLI, 0, 6, true) + .optionalEnd() .toFormatter(Locale.ROOT); // this supports milliseconds ending in dot @@ -151,32 +147,13 @@ public long getFrom(TemporalAccessor temporal) { .appendLiteral('.') .toFormatter(Locale.ROOT); - // this supports milliseconds with a fraction and is also used for printing - private static final DateTimeFormatter MILLISECONDS_FORMATTER3 = new DateTimeFormatterBuilder() - .append(MILLISECONDS_FORMATTER1) - .optionalStart() // optional is used so isSupported will be called when printing - .appendFraction(NANOS_OF_MILLI, 1, 6, true) - .optionalEnd() - .toFormatter(Locale.ROOT); - - static final DateFormatter SECONDS_FORMATTER = new JavaDateFormatter("epoch_second", SECONDS_FORMATTER3, + static final DateFormatter SECONDS_FORMATTER = new JavaDateFormatter("epoch_second", SECONDS_FORMATTER1, builder -> builder.parseDefaulting(ChronoField.NANO_OF_SECOND, 999_999_999L), - SECONDS_FORMATTER1, SECONDS_FORMATTER2, SECONDS_FORMATTER3); - - static final DateFormatter MILLIS_FORMATTER = getEpochMillisFormatter(); + SECONDS_FORMATTER1, SECONDS_FORMATTER2); - private static DateFormatter getEpochMillisFormatter() { - // the third formatter fails under java 8 as a printer, so fall back to this one - final DateTimeFormatter printer; - if (JavaVersion.current().getVersion().get(0) == 8) { - printer = MILLISECONDS_FORMATTER1; - } else { - printer = MILLISECONDS_FORMATTER3; - } - return new JavaDateFormatter("epoch_millis", printer, - builder -> builder.parseDefaulting(EpochTime.NANOS_OF_MILLI, 999_999L), - MILLISECONDS_FORMATTER1, MILLISECONDS_FORMATTER2, MILLISECONDS_FORMATTER3); - } + static final DateFormatter MILLIS_FORMATTER = new JavaDateFormatter("epoch_millis", MILLISECONDS_FORMATTER1, + builder -> builder.parseDefaulting(EpochTime.NANOS_OF_MILLI, 999_999L), + MILLISECONDS_FORMATTER1, MILLISECONDS_FORMATTER2); private abstract static class EpochField implements TemporalField { diff --git a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java index 90a9a76e6a4f9..7b535f9d4c9d6 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java @@ -147,6 +147,26 @@ public void testSupportBackwardsJava8Format() { assertThat(formatter, instanceOf(JavaDateFormatter.class)); } + public void testEpochFormatting() { + long seconds = randomLongBetween(0, 130L * 365 * 86400); // from 1970 epoch till around 2100 + long nanos = randomLongBetween(0, 999_999_999L); + Instant instant = Instant.ofEpochSecond(seconds, nanos); + + DateFormatter millisFormatter = DateFormatter.forPattern("epoch_millis"); + String millis = millisFormatter.format(instant); + Instant millisInstant = Instant.from(millisFormatter.parse(millis)); + assertThat(millisInstant.toEpochMilli(), is(instant.toEpochMilli())); + assertThat(millisFormatter.format(Instant.ofEpochSecond(42, 0)), is("42000")); + assertThat(millisFormatter.format(Instant.ofEpochSecond(42, 123456789L)), is("42123.456789")); + + DateFormatter secondsFormatter = DateFormatter.forPattern("epoch_second"); + String formattedSeconds = secondsFormatter.format(instant); + Instant secondsInstant = Instant.from(secondsFormatter.parse(formattedSeconds)); + assertThat(secondsInstant.getEpochSecond(), is(instant.getEpochSecond())); + + assertThat(secondsFormatter.format(Instant.ofEpochSecond(42, 0)), is("42")); + } + public void testParsingStrictNanoDates() { DateFormatter formatter = DateFormatters.forPattern("strict_date_optional_time_nanos"); formatter.format(formatter.parse("2016-01-01T00:00:00.000")); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java index f50c0bfd072b1..ae6e4cc984fbf 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java @@ -22,7 +22,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -996,39 +995,24 @@ public void testRangeWithFormatNumericValue() throws Exception { .addAggregation(dateRange("date_range").field("date").addRange(1000, 3000).addRange(3000, 4000)).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); List buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - if (JavaVersion.current().getVersion().get(0) == 8) { - assertBucket(buckets.get(0), 2L, "1000.0-3000.0", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000.0-4000.0", 3000000L, 4000000L); - } else { - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); - } + assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); // using no format should also work when and to/from are string values searchResponse = client().prepareSearch(indexName).setSize(0) .addAggregation(dateRange("date_range").field("date").addRange("1000", "3000").addRange("3000", "4000")).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - if (JavaVersion.current().getVersion().get(0) == 8) { - assertBucket(buckets.get(0), 2L, "1000.0-3000.0", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000.0-4000.0", 3000000L, 4000000L); - } else { - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); - } + assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); // also e-notation should work, fractional parts should be truncated searchResponse = client().prepareSearch(indexName).setSize(0) .addAggregation(dateRange("date_range").field("date").addRange(1.0e3, 3000.8123).addRange(3000.8123, 4.0e3)).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - if (JavaVersion.current().getVersion().get(0) == 8) { - assertBucket(buckets.get(0), 2L, "1000.0-3000.0", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000.0-4000.0", 3000000L, 4000000L); - } else { - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); - } + assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); // using different format should work when to/from is compatible with // format in aggregation From 23f00e367673bec2637deb52d95e9c6bee3d4963 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 1 Feb 2019 08:10:49 +0000 Subject: [PATCH 13/54] Relax fault detector in some disruption tests (#38101) Today we use `AbstractDisruptionTestCase` to test the behaviour of things like master elections in the presence of cluster disruptions. These tests have rather enthusiastic fault detection settings, detecting a fault if a single ping fails, with a one-second timeout. Furthermore there are some tests that assert the identity of the master remains unchanged during some disruption, and these assertions fail rather often thanks to the overly sensitive fault detector. However in a number of these tests the fault detector need not be this sensitive. This commit moves some such tests into their own test suite and uses more sensible fault-detection settings to avoid the kind of master instability that is causing CI failures. Closes #37699 --- .../discovery/MasterDisruptionIT.java | 71 -------- .../discovery/StableMasterDisruptionIT.java | 170 ++++++++++++++++++ 2 files changed, 170 insertions(+), 71 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java diff --git a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java index 718904eecb5bb..fc9450e982636 100644 --- a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -71,76 +70,6 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) public class MasterDisruptionIT extends AbstractDisruptionTestCase { - /** - * Test that no split brain occurs under partial network partition. See https://github.com/elastic/elasticsearch/issues/2488 - */ - public void testFailWithMinimumMasterNodesConfigured() throws Exception { - List nodes = startCluster(3); - - // Figure out what is the elected master node - final String masterNode = internalCluster().getMasterName(); - logger.info("---> legit elected master node={}", masterNode); - - // Pick a node that isn't the elected master. - Set nonMasters = new HashSet<>(nodes); - nonMasters.remove(masterNode); - final String unluckyNode = randomFrom(nonMasters.toArray(Strings.EMPTY_ARRAY)); - - - // Simulate a network issue between the unlucky node and elected master node in both directions. - - NetworkDisruption networkDisconnect = new NetworkDisruption( - new NetworkDisruption.TwoPartitions(masterNode, unluckyNode), - new NetworkDisruption.NetworkDisconnect()); - setDisruptionScheme(networkDisconnect); - networkDisconnect.startDisrupting(); - - // Wait until elected master has removed that the unlucky node... - ensureStableCluster(2, masterNode); - - // The unlucky node must report *no* master node, since it can't connect to master and in fact it should - // continuously ping until network failures have been resolved. However - // It may a take a bit before the node detects it has been cut off from the elected master - assertNoMaster(unluckyNode); - - networkDisconnect.stopDisrupting(); - - // Wait until the master node sees all 3 nodes again. - ensureStableCluster(3); - - // The elected master shouldn't have changed, since the unlucky node never could have elected himself as - // master since m_m_n of 2 could never be satisfied. - assertMaster(masterNode, nodes); - } - - /** - * Verify that nodes fault detection works after master (re) election - */ - public void testNodesFDAfterMasterReelection() throws Exception { - startCluster(4); - - logger.info("--> stopping current master"); - internalCluster().stopCurrentMasterNode(); - - ensureStableCluster(3); - - String master = internalCluster().getMasterName(); - String nonMaster = null; - for (String node : internalCluster().getNodeNames()) { - if (!node.equals(master)) { - nonMaster = node; - } - } - - logger.info("--> isolating [{}]", nonMaster); - NetworkDisruption.TwoPartitions partitions = isolateNode(nonMaster); - NetworkDisruption networkDisruption = addRandomDisruptionType(partitions); - networkDisruption.startDisrupting(); - - logger.info("--> waiting for master to remove it"); - ensureStableCluster(2, master); - } - /** * Tests that emulates a frozen elected master node that unfreezes and pushes his cluster state to other nodes * that already are following another elected master node. These nodes should reject this cluster state and prevent diff --git a/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java new file mode 100644 index 0000000000000..b5177b1ce3e47 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java @@ -0,0 +1,170 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.discovery; + +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.cluster.coordination.FollowersChecker; +import org.elasticsearch.cluster.coordination.LeaderChecker; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.disruption.NetworkDisruption; +import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect; +import org.elasticsearch.test.disruption.NetworkDisruption.NetworkUnresponsive; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.test.transport.MockTransportService.TestPlugin; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.util.Collections.singleton; +import static org.hamcrest.Matchers.equalTo; + +/** + * Tests relating to the loss of the master, but which work with the default fault detection settings which are rather lenient and will + * not detect a master failure too quickly. + */ +@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +public class StableMasterDisruptionIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(TestPlugin.class); + } + + /** + * Test that no split brain occurs under partial network partition. See https://github.com/elastic/elasticsearch/issues/2488 + */ + public void testFailWithMinimumMasterNodesConfigured() throws Exception { + List nodes = internalCluster().startNodes(3); + ensureStableCluster(3); + + // Figure out what is the elected master node + final String masterNode = internalCluster().getMasterName(); + logger.info("---> legit elected master node={}", masterNode); + + // Pick a node that isn't the elected master. + Set nonMasters = new HashSet<>(nodes); + nonMasters.remove(masterNode); + final String unluckyNode = randomFrom(nonMasters.toArray(Strings.EMPTY_ARRAY)); + + // Simulate a network issue between the unlucky node and elected master node in both directions. + + NetworkDisruption networkDisconnect = new NetworkDisruption( + new NetworkDisruption.TwoPartitions(masterNode, unluckyNode), + new NetworkDisruption.NetworkDisconnect()); + setDisruptionScheme(networkDisconnect); + networkDisconnect.startDisrupting(); + + // Wait until elected master has removed that the unlucky node... + ensureStableCluster(2, masterNode); + + // The unlucky node must report *no* master node, since it can't connect to master and in fact it should + // continuously ping until network failures have been resolved. However + // It may a take a bit before the node detects it has been cut off from the elected master + assertBusy(() -> assertNull(client(unluckyNode).admin().cluster().state( + new ClusterStateRequest().local(true)).get().getState().nodes().getMasterNode())); + + networkDisconnect.stopDisrupting(); + + // Wait until the master node sees all 3 nodes again. + ensureStableCluster(3); + + // The elected master shouldn't have changed, since the unlucky node never could have elected itself as master + assertThat(internalCluster().getMasterName(), equalTo(masterNode)); + } + + /** + * Verify that nodes fault detection works after master (re) election + */ + public void testFollowerCheckerDetectsUnresponsiveNodeAfterMasterReelection() throws Exception { + internalCluster().startNodes(4, + Settings.builder() + .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") + .put(LeaderChecker.LEADER_CHECK_RETRY_COUNT_SETTING.getKey(), "10") + .put(FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), "1s") + .put(FollowersChecker.FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey(), 1).build()); + ensureStableCluster(4); + + logger.info("--> stopping current master"); + internalCluster().stopCurrentMasterNode(); + + ensureStableCluster(3); + + final String master = internalCluster().getMasterName(); + final List nonMasters = Arrays.stream(internalCluster().getNodeNames()).filter(n -> master.equals(n) == false) + .collect(Collectors.toList()); + final String isolatedNode = randomFrom(nonMasters); + final String otherNode = nonMasters.get(nonMasters.get(0).equals(isolatedNode) ? 1 : 0); + + logger.info("--> isolating [{}]", isolatedNode); + + final NetworkDisruption networkDisruption = new NetworkDisruption(new NetworkDisruption.TwoPartitions( + singleton(isolatedNode), Sets.newHashSet(master, otherNode)), new NetworkUnresponsive()); + setDisruptionScheme(networkDisruption); + networkDisruption.startDisrupting(); + + logger.info("--> waiting for master to remove it"); + ensureStableCluster(2, master); + + networkDisruption.stopDisrupting(); + ensureStableCluster(3); + } + + /** + * Verify that nodes fault detection works after master (re) election + */ + public void testFollowerCheckerDetectsDisconnectedNodeAfterMasterReelection() throws Exception { + internalCluster().startNodes(4); + ensureStableCluster(4); + + logger.info("--> stopping current master"); + internalCluster().stopCurrentMasterNode(); + + ensureStableCluster(3); + + final String master = internalCluster().getMasterName(); + final List nonMasters = Arrays.stream(internalCluster().getNodeNames()).filter(n -> master.equals(n) == false) + .collect(Collectors.toList()); + final String isolatedNode = randomFrom(nonMasters); + final String otherNode = nonMasters.get(nonMasters.get(0).equals(isolatedNode) ? 1 : 0); + + logger.info("--> isolating [{}]", isolatedNode); + + final NetworkDisruption networkDisruption = new NetworkDisruption(new NetworkDisruption.TwoPartitions( + singleton(isolatedNode), Stream.of(master, otherNode).collect(Collectors.toSet())), new NetworkDisconnect()); + setDisruptionScheme(networkDisruption); + networkDisruption.startDisrupting(); + + logger.info("--> waiting for master to remove it"); + ensureStableCluster(2, master); + + networkDisruption.stopDisrupting(); + ensureStableCluster(3); + } +} From b7308aa03cdf87d12a0ff781b2bbb7b6222fcdad Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 1 Feb 2019 09:35:46 +0100 Subject: [PATCH 14/54] Don't load global ordinals with the `map` execution_hint (#37833) The terms aggregator loads the global ordinals to retrieve the cardinality of the field to aggregate on. This information is then used to select the strategy to use for the aggregation (breadth_first or depth_first). However this should be avoided if the execution_hint is explicitly set to map since this mode doesn't really need the global ordinals. Since we still need the cardinality of the field this change picks the maximum cardinality in the segments as an estimation of the total cardinality to select the strategy to use (breadth_first or depth_first). This estimation is only used if the execution hint is set to map, otherwise the global ordinals are still used to retrieve the accurate cardinality. Closes #37705 --- .../test/search.aggregation/20_terms.yml | 71 +++++++++++++++++++ .../bucket/terms/TermsAggregatorFactory.java | 21 ++++-- 2 files changed, 87 insertions(+), 5 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml index d442672bf8bed..88e0ecff29608 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml @@ -700,3 +700,74 @@ setup: - is_false: aggregations.str_terms.buckets.1.key_as_string - match: { aggregations.str_terms.buckets.1.doc_count: 2 } + +--- +"Global ordinals are not loaded with the map execution hint": + + - skip: + version: " - 6.99.99" + reason: bug fixed in 7.0 + + - do: + index: + refresh: true + index: test_1 + id: 1 + routing: 1 + body: { "str": "abc" } + + - do: + index: + refresh: true + index: test_1 + id: 2 + routing: 1 + body: { "str": "abc" } + + - do: + index: + refresh: true + index: test_1 + id: 3 + routing: 1 + body: { "str": "bcd" } + + - do: + indices.refresh: {} + + - do: + search: + index: test_1 + body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "str", "execution_hint" : "map" } } } } + + - match: { hits.total.value: 3} + - length: { aggregations.str_terms.buckets: 2 } + + - do: + indices.stats: + index: test_1 + metric: fielddata + fielddata_fields: str + + - match: { indices.test_1.total.fielddata.memory_size_in_bytes: 0} + + - do: + search: + index: test_1 + body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "str", "execution_hint" : "global_ordinals" } } } } + + - match: { hits.total.value: 3} + - length: { aggregations.str_terms.buckets: 2 } + + - do: + indices.stats: + index: test_1 + metric: fielddata + fielddata_fields: str + + - gt: { indices.test_1.total.fielddata.memory_size_in_bytes: 0} + + + + + diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 1ff0efd3e8307..346da32763bd8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.apache.logging.log4j.LogManager; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.IndexSearcher; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.logging.DeprecationLogger; @@ -133,7 +134,7 @@ protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator pare if (valuesSource instanceof ValuesSource.Bytes.WithOrdinals == false) { execution = ExecutionMode.MAP; } - final long maxOrd = getMaxOrd(valuesSource, context.searcher()); + final long maxOrd = getMaxOrd(context.searcher(), valuesSource, execution); if (execution == null) { execution = ExecutionMode.GLOBAL_ORDINALS; } @@ -207,13 +208,23 @@ static SubAggCollectionMode subAggCollectionMode(int expectedSize, long maxOrd) } /** - * Get the maximum global ordinal value for the provided {@link ValuesSource} or -1 + * Get the maximum ordinal value for the provided {@link ValuesSource} or -1 * if the values source is not an instance of {@link ValuesSource.Bytes.WithOrdinals}. */ - static long getMaxOrd(ValuesSource source, IndexSearcher searcher) throws IOException { + static long getMaxOrd(IndexSearcher searcher, ValuesSource source, ExecutionMode executionMode) throws IOException { if (source instanceof ValuesSource.Bytes.WithOrdinals) { ValuesSource.Bytes.WithOrdinals valueSourceWithOrdinals = (ValuesSource.Bytes.WithOrdinals) source; - return valueSourceWithOrdinals.globalMaxOrd(searcher); + if (executionMode == ExecutionMode.MAP) { + // global ordinals are not requested so we don't load them + // and return the biggest cardinality per segment instead. + long maxOrd = -1; + for (LeafReaderContext leaf : searcher.getIndexReader().leaves()) { + maxOrd = Math.max(maxOrd, valueSourceWithOrdinals.ordinalsValues(leaf).getValueCount()); + } + return maxOrd; + } else { + return valueSourceWithOrdinals.globalMaxOrd(searcher); + } } else { return -1; } @@ -258,7 +269,7 @@ Aggregator create(String name, List pipelineAggregators, Map metaData) throws IOException { - final long maxOrd = getMaxOrd(valuesSource, context.searcher()); + final long maxOrd = getMaxOrd(context.searcher(), valuesSource, ExecutionMode.GLOBAL_ORDINALS); assert maxOrd != -1; final double ratio = maxOrd / ((double) context.searcher().getIndexReader().numDocs()); From d417997aca4a6e91e6bc0f0c8141af0e29b7255a Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Fri, 1 Feb 2019 10:47:54 +0200 Subject: [PATCH 15/54] Fix eclipse config for ssl-config (#38096) --- libs/ssl-config/src/main/eclipse.build.gradle | 2 ++ libs/ssl-config/src/test/eclipse.build.gradle | 5 +++++ 2 files changed, 7 insertions(+) create mode 100644 libs/ssl-config/src/main/eclipse.build.gradle create mode 100644 libs/ssl-config/src/test/eclipse.build.gradle diff --git a/libs/ssl-config/src/main/eclipse.build.gradle b/libs/ssl-config/src/main/eclipse.build.gradle new file mode 100644 index 0000000000000..58b2d7077120a --- /dev/null +++ b/libs/ssl-config/src/main/eclipse.build.gradle @@ -0,0 +1,2 @@ +// this is just shell gradle file for eclipse to have separate projects for geo src and tests +apply from: '../../build.gradle' diff --git a/libs/ssl-config/src/test/eclipse.build.gradle b/libs/ssl-config/src/test/eclipse.build.gradle new file mode 100644 index 0000000000000..f8265e3dfed08 --- /dev/null +++ b/libs/ssl-config/src/test/eclipse.build.gradle @@ -0,0 +1,5 @@ +// this is just shell gradle file for eclipse to have separate projects for geo src and tests +apply from: '../../build.gradle' +dependencies { + testCompile project(':libs:elasticsearch-ssl-config') +} From 6c5a7387af086381da34db6103fc711dd8350b14 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Fri, 1 Feb 2019 10:15:18 +0100 Subject: [PATCH 16/54] Replace joda time in ingest-common module (#38088) This commit fully replaces any remaining joda time time classes with java time implementations. Relates #27330 --- .../ingest/common/DateFormat.java | 28 ++++------- .../ingest/common/DateIndexNameProcessor.java | 50 ++++++++++--------- .../ingest/common/DateProcessor.java | 20 ++++---- .../ingest/common/DateFormatTests.java | 31 ++++++------ .../common/DateIndexNameFactoryTests.java | 6 +-- .../common/DateIndexNameProcessorTests.java | 35 ++++++------- .../common/DateProcessorFactoryTests.java | 8 +-- .../ingest/common/DateProcessorTests.java | 6 +-- 8 files changed, 90 insertions(+), 94 deletions(-) diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java index 8629f5f1fa321..05aa75944d2f9 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java @@ -21,10 +21,6 @@ import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateFormatters; -import org.elasticsearch.common.time.DateUtils; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.format.ISODateTimeFormat; import java.time.Instant; import java.time.LocalDate; @@ -48,26 +44,26 @@ enum DateFormat { Iso8601 { @Override - Function getFunction(String format, DateTimeZone timezone, Locale locale) { - return ISODateTimeFormat.dateTimeParser().withZone(timezone)::parseDateTime; + Function getFunction(String format, ZoneId timezone, Locale locale) { + return (date) -> DateFormatters.from(DateFormatter.forPattern("strict_date_time").parse(date)).withZoneSameInstant(timezone); } }, Unix { @Override - Function getFunction(String format, DateTimeZone timezone, Locale locale) { - return (date) -> new DateTime((long)(Double.parseDouble(date) * 1000), timezone); + Function getFunction(String format, ZoneId timezone, Locale locale) { + return date -> Instant.ofEpochMilli((long) (Double.parseDouble(date) * 1000.0)).atZone(timezone); } }, UnixMs { @Override - Function getFunction(String format, DateTimeZone timezone, Locale locale) { - return (date) -> new DateTime(Long.parseLong(date), timezone); + Function getFunction(String format, ZoneId timezone, Locale locale) { + return date -> Instant.ofEpochMilli(Long.parseLong(date)).atZone(timezone); } }, Tai64n { @Override - Function getFunction(String format, DateTimeZone timezone, Locale locale) { - return (date) -> new DateTime(parseMillis(date), timezone); + Function getFunction(String format, ZoneId timezone, Locale locale) { + return date -> Instant.ofEpochMilli(parseMillis(date)).atZone(timezone); } private long parseMillis(String date) { @@ -85,13 +81,12 @@ private long parseMillis(String date) { Arrays.asList(NANO_OF_SECOND, SECOND_OF_DAY, MINUTE_OF_DAY, HOUR_OF_DAY, DAY_OF_MONTH, MONTH_OF_YEAR); @Override - Function getFunction(String format, DateTimeZone timezone, Locale locale) { + Function getFunction(String format, ZoneId zoneId, Locale locale) { // support the 6.x BWC compatible way of parsing java 8 dates if (format.startsWith("8")) { format = format.substring(1); } - ZoneId zoneId = DateUtils.dateTimeZoneToZoneId(timezone); int year = LocalDate.now(ZoneOffset.UTC).getYear(); DateFormatter formatter = DateFormatter.forPattern(format) .withLocale(locale) @@ -111,13 +106,12 @@ Function getFunction(String format, DateTimeZone timezone, Loc accessor = newTime.withZoneSameLocal(zoneId); } - long millis = DateFormatters.from(accessor).toInstant().toEpochMilli(); - return new DateTime(millis, timezone); + return DateFormatters.from(accessor); }; } }; - abstract Function getFunction(String format, DateTimeZone timezone, Locale locale); + abstract Function getFunction(String format, ZoneId timezone, Locale locale); static DateFormat fromString(String format) { switch (format) { diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateIndexNameProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateIndexNameProcessor.java index ca429375f792e..e8e79c3d869ce 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateIndexNameProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateIndexNameProcessor.java @@ -19,25 +19,25 @@ package org.elasticsearch.ingest.common; -import java.util.ArrayList; -import java.util.Collections; -import java.util.IllformedLocaleException; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.function.Function; - import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.TemplateScript; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.format.DateTimeFormat; -import org.joda.time.format.DateTimeFormatter; + +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.Collections; +import java.util.IllformedLocaleException; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.function.Function; public final class DateIndexNameProcessor extends AbstractProcessor { @@ -47,10 +47,10 @@ public final class DateIndexNameProcessor extends AbstractProcessor { private final TemplateScript.Factory indexNamePrefixTemplate; private final TemplateScript.Factory dateRoundingTemplate; private final TemplateScript.Factory indexNameFormatTemplate; - private final DateTimeZone timezone; - private final List> dateFormats; + private final ZoneId timezone; + private final List> dateFormats; - DateIndexNameProcessor(String tag, String field, List> dateFormats, DateTimeZone timezone, + DateIndexNameProcessor(String tag, String field, List> dateFormats, ZoneId timezone, TemplateScript.Factory indexNamePrefixTemplate, TemplateScript.Factory dateRoundingTemplate, TemplateScript.Factory indexNameFormatTemplate) { super(tag); @@ -72,9 +72,9 @@ public IngestDocument execute(IngestDocument ingestDocument) throws Exception { date = obj.toString(); } - DateTime dateTime = null; + ZonedDateTime dateTime = null; Exception lastException = null; - for (Function dateParser : dateFormats) { + for (Function dateParser : dateFormats) { try { dateTime = dateParser.apply(date); } catch (Exception e) { @@ -90,13 +90,15 @@ public IngestDocument execute(IngestDocument ingestDocument) throws Exception { String indexNameFormat = ingestDocument.renderTemplate(indexNameFormatTemplate); String dateRounding = ingestDocument.renderTemplate(dateRoundingTemplate); - DateTimeFormatter formatter = DateTimeFormat.forPattern(indexNameFormat); + DateFormatter formatter = DateFormatter.forPattern(indexNameFormat); + // use UTC instead of Z is string representation of UTC, so behaviour is the same between 6.x and 7 + String zone = timezone.equals(ZoneOffset.UTC) ? "UTC" : timezone.getId(); StringBuilder builder = new StringBuilder() .append('<') .append(indexNamePrefix) .append('{') - .append(formatter.print(dateTime)).append("||/").append(dateRounding) - .append('{').append(indexNameFormat).append('|').append(timezone).append('}') + .append(formatter.format(dateTime)).append("||/").append(dateRounding) + .append('{').append(indexNameFormat).append('|').append(zone).append('}') .append('}') .append('>'); String dynamicIndexName = builder.toString(); @@ -125,11 +127,11 @@ TemplateScript.Factory getIndexNameFormatTemplate() { return indexNameFormatTemplate; } - DateTimeZone getTimezone() { + ZoneId getTimezone() { return timezone; } - List> getDateFormats() { + List> getDateFormats() { return dateFormats; } @@ -146,7 +148,7 @@ public DateIndexNameProcessor create(Map registry, St Map config) throws Exception { String localeString = ConfigurationUtils.readOptionalStringProperty(TYPE, tag, config, "locale"); String timezoneString = ConfigurationUtils.readOptionalStringProperty(TYPE, tag, config, "timezone"); - DateTimeZone timezone = timezoneString == null ? DateTimeZone.UTC : DateTimeZone.forID(timezoneString); + ZoneId timezone = timezoneString == null ? ZoneOffset.UTC : ZoneId.of(timezoneString); Locale locale = Locale.ENGLISH; if (localeString != null) { try { @@ -159,7 +161,7 @@ public DateIndexNameProcessor create(Map registry, St if (dateFormatStrings == null) { dateFormatStrings = Collections.singletonList("yyyy-MM-dd'T'HH:mm:ss.SSSXX"); } - List> dateFormats = new ArrayList<>(dateFormatStrings.size()); + List> dateFormats = new ArrayList<>(dateFormatStrings.size()); for (String format : dateFormatStrings) { DateFormat dateFormat = DateFormat.fromString(format); dateFormats.add(dateFormat.getFunction(format, timezone, locale)); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java index dd6e6006eeb6d..e7ad1356977e0 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java @@ -21,6 +21,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.util.LocaleUtils; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.ConfigurationUtils; @@ -28,10 +29,10 @@ import org.elasticsearch.ingest.Processor; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.TemplateScript; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.format.ISODateTimeFormat; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.List; import java.util.Locale; @@ -42,13 +43,14 @@ public final class DateProcessor extends AbstractProcessor { public static final String TYPE = "date"; static final String DEFAULT_TARGET_FIELD = "@timestamp"; + public static final DateFormatter FORMATTER = DateFormatter.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"); private final TemplateScript.Factory timezone; private final TemplateScript.Factory locale; private final String field; private final String targetField; private final List formats; - private final List, Function>> dateParsers; + private final List, Function>> dateParsers; DateProcessor(String tag, @Nullable TemplateScript.Factory timezone, @Nullable TemplateScript.Factory locale, String field, List formats, String targetField) { @@ -65,8 +67,8 @@ public final class DateProcessor extends AbstractProcessor { } } - private DateTimeZone newDateTimeZone(Map params) { - return timezone == null ? DateTimeZone.UTC : DateTimeZone.forID(timezone.newInstance(params).execute()); + private ZoneId newDateTimeZone(Map params) { + return timezone == null ? ZoneOffset.UTC : ZoneId.of(timezone.newInstance(params).execute()); } private Locale newLocale(Map params) { @@ -82,9 +84,9 @@ public IngestDocument execute(IngestDocument ingestDocument) { value = obj.toString(); } - DateTime dateTime = null; + ZonedDateTime dateTime = null; Exception lastException = null; - for (Function, Function> dateParser : dateParsers) { + for (Function, Function> dateParser : dateParsers) { try { dateTime = dateParser.apply(ingestDocument.getSourceAndMetadata()).apply(value); } catch (Exception e) { @@ -97,7 +99,7 @@ public IngestDocument execute(IngestDocument ingestDocument) { throw new IllegalArgumentException("unable to parse date [" + value + "]", lastException); } - ingestDocument.setFieldValue(targetField, ISODateTimeFormat.dateTime().print(dateTime)); + ingestDocument.setFieldValue(targetField, FORMATTER.format(dateTime)); return ingestDocument; } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java index 32874aa6a5776..136c9f7f69a0a 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java @@ -21,10 +21,7 @@ import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import java.time.Instant; import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; @@ -38,9 +35,9 @@ public class DateFormatTests extends ESTestCase { public void testParseJava() { - Function javaFunction = DateFormat.Java.getFunction("MMM dd HH:mm:ss Z", - DateTimeZone.forOffsetHours(-8), Locale.ENGLISH); - assertThat(Instant.ofEpochMilli(javaFunction.apply("Nov 24 01:29:01 -0800").getMillis()) + Function javaFunction = DateFormat.Java.getFunction("MMM dd HH:mm:ss Z", + ZoneOffset.ofHours(-8), Locale.ENGLISH); + assertThat(javaFunction.apply("Nov 24 01:29:01 -0800").toInstant() .atZone(ZoneId.of("GMT-8")) .format(DateTimeFormatter.ofPattern("MM dd HH:mm:ss", Locale.ENGLISH)), equalTo("11 24 01:29:01")); @@ -48,33 +45,35 @@ public void testParseJava() { public void testParseJavaDefaultYear() { String format = randomFrom("8dd/MM", "dd/MM"); - DateTimeZone timezone = DateUtils.zoneIdToDateTimeZone(ZoneId.of("Europe/Amsterdam")); - Function javaFunction = DateFormat.Java.getFunction(format, timezone, Locale.ENGLISH); + ZoneId timezone = DateUtils.of("Europe/Amsterdam"); + Function javaFunction = DateFormat.Java.getFunction(format, timezone, Locale.ENGLISH); int year = ZonedDateTime.now(ZoneOffset.UTC).getYear(); - DateTime dateTime = javaFunction.apply("12/06"); + ZonedDateTime dateTime = javaFunction.apply("12/06"); assertThat(dateTime.getYear(), is(year)); - assertThat(dateTime.toString(), is(year + "-06-12T00:00:00.000+02:00")); } public void testParseUnixMs() { - assertThat(DateFormat.UnixMs.getFunction(null, DateTimeZone.UTC, null).apply("1000500").getMillis(), equalTo(1000500L)); + assertThat(DateFormat.UnixMs.getFunction(null, ZoneOffset.UTC, null).apply("1000500").toInstant().toEpochMilli(), + equalTo(1000500L)); } public void testParseUnix() { - assertThat(DateFormat.Unix.getFunction(null, DateTimeZone.UTC, null).apply("1000.5").getMillis(), equalTo(1000500L)); + assertThat(DateFormat.Unix.getFunction(null, ZoneOffset.UTC, null).apply("1000.5").toInstant().toEpochMilli(), + equalTo(1000500L)); } public void testParseUnixWithMsPrecision() { - assertThat(DateFormat.Unix.getFunction(null, DateTimeZone.UTC, null).apply("1495718015").getMillis(), equalTo(1495718015000L)); + assertThat(DateFormat.Unix.getFunction(null, ZoneOffset.UTC, null).apply("1495718015").toInstant().toEpochMilli(), + equalTo(1495718015000L)); } public void testParseISO8601() { - assertThat(DateFormat.Iso8601.getFunction(null, DateTimeZone.UTC, null).apply("2001-01-01T00:00:00-0800").getMillis(), + assertThat(DateFormat.Iso8601.getFunction(null, ZoneOffset.UTC, null).apply("2001-01-01T00:00:00-0800").toInstant().toEpochMilli(), equalTo(978336000000L)); } public void testParseISO8601Failure() { - Function function = DateFormat.Iso8601.getFunction(null, DateTimeZone.UTC, null); + Function function = DateFormat.Iso8601.getFunction(null, ZoneOffset.UTC, null); try { function.apply("2001-01-0:00-0800"); fail("parse should have failed"); @@ -86,7 +85,7 @@ public void testParseISO8601Failure() { public void testTAI64NParse() { String input = "4000000050d506482dbdf024"; String expected = "2012-12-22T03:00:46.767+02:00"; - assertThat(DateFormat.Tai64n.getFunction(null, DateTimeZone.forOffsetHours(2), null) + assertThat(DateFormat.Tai64n.getFunction(null, ZoneOffset.ofHours(2), null) .apply((randomBoolean() ? "@" : "") + input).toString(), equalTo(expected)); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameFactoryTests.java index 2735cf55776b0..67027f6ab6784 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameFactoryTests.java @@ -23,8 +23,8 @@ import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; -import org.joda.time.DateTimeZone; +import java.time.ZoneOffset; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -44,7 +44,7 @@ public void testDefaults() throws Exception { assertThat(processor.getIndexNamePrefixTemplate().newInstance(Collections.emptyMap()).execute(), Matchers.equalTo("")); assertThat(processor.getDateRoundingTemplate().newInstance(Collections.emptyMap()).execute(), Matchers.equalTo("y")); assertThat(processor.getIndexNameFormatTemplate().newInstance(Collections.emptyMap()).execute(), Matchers.equalTo("yyyy-MM-dd")); - assertThat(processor.getTimezone(), Matchers.equalTo(DateTimeZone.UTC)); + assertThat(processor.getTimezone(), Matchers.equalTo(ZoneOffset.UTC)); } public void testSpecifyOptionalSettings() throws Exception { @@ -74,7 +74,7 @@ public void testSpecifyOptionalSettings() throws Exception { config.put("timezone", "+02:00"); processor = factory.create(null, null, config); - assertThat(processor.getTimezone(), Matchers.equalTo(DateTimeZone.forOffsetHours(2))); + assertThat(processor.getTimezone(), Matchers.equalTo(ZoneOffset.ofHours(2))); config = new HashMap<>(); config.put("field", "_field"); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java index 63d3e0416cd2c..3d891ffb81f4f 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java @@ -18,13 +18,14 @@ */ package org.elasticsearch.ingest.common; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.format.DateTimeFormat; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.Collections; import java.util.List; import java.util.Locale; @@ -35,9 +36,9 @@ public class DateIndexNameProcessorTests extends ESTestCase { public void testJavaPattern() throws Exception { - Function function = DateFormat.Java.getFunction("yyyy-MM-dd'T'HH:mm:ss.SSSXX", DateTimeZone.UTC, Locale.ROOT); + Function function = DateFormat.Java.getFunction("yyyy-MM-dd'T'HH:mm:ss.SSSXX", ZoneOffset.UTC, Locale.ROOT); DateIndexNameProcessor processor = createProcessor("_field", Collections.singletonList(function), - DateTimeZone.UTC, "events-", "y", "yyyyMMdd"); + ZoneOffset.UTC, "events-", "y", "yyyyMMdd"); IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, Collections.singletonMap("_field", "2016-04-25T12:24:20.101Z")); processor.execute(document); @@ -45,9 +46,9 @@ public void testJavaPattern() throws Exception { } public void testTAI64N()throws Exception { - Function function = DateFormat.Tai64n.getFunction(null, DateTimeZone.UTC, null); + Function function = DateFormat.Tai64n.getFunction(null, ZoneOffset.UTC, null); DateIndexNameProcessor dateProcessor = createProcessor("_field", Collections.singletonList(function), - DateTimeZone.UTC, "events-", "m", "yyyyMMdd"); + ZoneOffset.UTC, "events-", "m", "yyyyMMdd"); IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, Collections.singletonMap("_field", (randomBoolean() ? "@" : "") + "4000000050d506482dbdf024")); dateProcessor.execute(document); @@ -55,9 +56,9 @@ public void testTAI64N()throws Exception { } public void testUnixMs()throws Exception { - Function function = DateFormat.UnixMs.getFunction(null, DateTimeZone.UTC, null); + Function function = DateFormat.UnixMs.getFunction(null, ZoneOffset.UTC, null); DateIndexNameProcessor dateProcessor = createProcessor("_field", Collections.singletonList(function), - DateTimeZone.UTC, "events-", "m", "yyyyMMdd"); + ZoneOffset.UTC, "events-", "m", "yyyyMMdd"); IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, Collections.singletonMap("_field", "1000500")); dateProcessor.execute(document); @@ -70,9 +71,9 @@ public void testUnixMs()throws Exception { } public void testUnix()throws Exception { - Function function = DateFormat.Unix.getFunction(null, DateTimeZone.UTC, null); + Function function = DateFormat.Unix.getFunction(null, ZoneOffset.UTC, null); DateIndexNameProcessor dateProcessor = createProcessor("_field", Collections.singletonList(function), - DateTimeZone.UTC, "events-", "m", "yyyyMMdd"); + ZoneOffset.UTC, "events-", "m", "yyyyMMdd"); IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, Collections.singletonMap("_field", "1000.5")); dateProcessor.execute(document); @@ -84,10 +85,10 @@ public void testTemplatedFields() throws Exception { String dateRounding = randomFrom("y", "M", "w", "d", "h", "m", "s"); String indexNameFormat = randomFrom("yyyy-MM-dd'T'HH:mm:ss.SSSZZ", "yyyyMMdd", "MM/dd/yyyy"); String date = Integer.toString(randomInt()); - Function dateTimeFunction = DateFormat.Unix.getFunction(null, DateTimeZone.UTC, null); + Function dateTimeFunction = DateFormat.Unix.getFunction(null, ZoneOffset.UTC, null); DateIndexNameProcessor dateProcessor = createProcessor("_field", - Collections.singletonList(dateTimeFunction), DateTimeZone.UTC, indexNamePrefix, + Collections.singletonList(dateTimeFunction), ZoneOffset.UTC, indexNamePrefix, dateRounding, indexNameFormat); IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, @@ -95,12 +96,12 @@ public void testTemplatedFields() throws Exception { dateProcessor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), - equalTo("<"+indexNamePrefix+"{"+DateTimeFormat.forPattern(indexNameFormat) - .print(dateTimeFunction.apply(date))+"||/"+dateRounding+"{"+indexNameFormat+"|UTC}}>")); + equalTo("<"+indexNamePrefix+"{" + DateFormatter.forPattern(indexNameFormat) + .format(dateTimeFunction.apply(date))+"||/"+dateRounding+"{"+indexNameFormat+"|UTC}}>")); } - private DateIndexNameProcessor createProcessor(String field, List> dateFormats, - DateTimeZone timezone, String indexNamePrefix, String dateRounding, + private DateIndexNameProcessor createProcessor(String field, List> dateFormats, + ZoneId timezone, String indexNamePrefix, String dateRounding, String indexNameFormat) { return new DateIndexNameProcessor(randomAlphaOfLength(10), field, dateFormats, timezone, new TestTemplateService.MockTemplateScript.Factory(indexNamePrefix), diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java index 2cf11f6d215d0..7d227b222696f 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java @@ -22,9 +22,9 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTimeZone; import org.junit.Before; +import java.time.ZoneId; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -105,10 +105,10 @@ public void testParseTimezone() throws Exception { config.put("field", sourceField); config.put("formats", Collections.singletonList("dd/MM/yyyyy")); - DateTimeZone timezone = randomDateTimeZone(); - config.put("timezone", timezone.getID()); + ZoneId timezone = randomZone(); + config.put("timezone", timezone.getId()); DateProcessor processor = factory.create(null, null, config); - assertThat(processor.getTimezone().newInstance(Collections.emptyMap()).execute(), equalTo(timezone.getID())); + assertThat(processor.getTimezone().newInstance(Collections.emptyMap()).execute(), equalTo(timezone.getId())); } public void testParseMatchFormats() throws Exception { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java index 6157e3e9e50f9..c9ab07d82bbcc 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java @@ -45,9 +45,7 @@ private TemplateScript.Factory templatize(Locale locale) { } private TemplateScript.Factory templatize(ZoneId timezone) { - // prevent writing "UTC" as string, as joda time does not parse it - String id = timezone.equals(ZoneOffset.UTC) ? "UTC" : timezone.getId(); - return new TestTemplateService.MockTemplateScript.Factory(id); + return new TestTemplateService.MockTemplateScript.Factory(timezone.getId()); } public void testJavaPattern() { @@ -186,7 +184,7 @@ public void testInvalidTimezone() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(RandomDocumentPicks.randomIngestDocument(random(), document))); assertThat(e.getMessage(), equalTo("unable to parse date [2010]")); - assertThat(e.getCause().getMessage(), equalTo("The datetime zone id 'invalid_timezone' is not recognised")); + assertThat(e.getCause().getMessage(), equalTo("Unknown time-zone ID: invalid_timezone")); } public void testInvalidLocale() { From 0e6a7c20a1730156756fad33d5f3297958a39db8 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 1 Feb 2019 11:16:54 +0100 Subject: [PATCH 17/54] Fix FullClusterRestartIT.testHistoryUUIDIsAdded (#38098) This test failed once because the index wasn't fully ready (ie, engine opened). This commit changes the test so that it waits for the index to be green before checking the history UUID. Closes #34452 --- .../org/elasticsearch/upgrades/FullClusterRestartIT.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 1b2503ccb99d5..f997255959256 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -68,6 +68,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; @@ -970,10 +971,14 @@ public void testHistoryUUIDIsAdded() throws Exception { createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); client().performRequest(createIndex); } else { + ensureGreenLongWait(index); + Request statsRequest = new Request("GET", index + "/_stats"); statsRequest.addParameter("level", "shards"); Response response = client().performRequest(statsRequest); List shardStats = ObjectPath.createFromResponse(response).evaluate("indices." + index + ".shards.0"); + assertThat(shardStats, notNullValue()); + assertThat("Expected stats for 2 shards", shardStats, hasSize(2)); String globalHistoryUUID = null; for (Object shard : shardStats) { final String nodeId = ObjectPath.evaluate(shard, "routing.node"); From bfd618cf8324ef3d1f67f304286bf468eb84ad8e Mon Sep 17 00:00:00 2001 From: Andrey Ershov Date: Fri, 1 Feb 2019 11:34:31 +0100 Subject: [PATCH 18/54] Universal cluster bootstrap method for tests with autoMinMasterNodes=false (#38038) Currently, there are a few tests that use autoMinMasterNodes=false and hence override addExtraClusterBootstrapSettings, mostly this is 10-30 lines of codes that are copy-pasted from class to class. This PR introduces `InternalTestCluster.setBootstrapMasterNodeIndex` which is suitable for all classes and copy-paste could be removed. Removing code is always a good thing! --- .../rest/discovery/Zen2RestApiIT.java | 25 +----- .../admin/indices/exists/IndicesExistsIT.java | 15 +--- .../cluster/MinimumMasterNodesIT.java | 34 +------- .../cluster/SpecificMasterNodesIT.java | 19 +---- .../coordination/UnsafeBootstrapMasterIT.java | 47 +--------- .../gateway/RecoverAfterNodesIT.java | 22 +---- .../elasticsearch/test/ESIntegTestCase.java | 20 +---- .../test/InternalTestCluster.java | 85 +++++++++++++++++-- .../test/NodeConfigurationSource.java | 5 -- .../test/test/InternalTestClusterTests.java | 46 ++-------- 10 files changed, 105 insertions(+), 213 deletions(-) diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java index 88afa57e83e23..f26b02696e7e5 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java @@ -28,7 +28,6 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.Priority; @@ -41,10 +40,8 @@ import org.hamcrest.Matchers; import java.io.IOException; -import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.stream.Collectors; import static org.hamcrest.core.Is.is; @@ -59,30 +56,13 @@ protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(TestZenDiscovery.USE_ZEN2.getKey(), true).build(); } - @Override - protected List addExtraClusterBootstrapSettings(List allNodesSettings) { - final Settings firstNodeSettings = allNodesSettings.get(0); - final List otherNodesSettings = allNodesSettings.subList(1, allNodesSettings.size()); - final List masterNodeNames = allNodesSettings.stream() - .filter(org.elasticsearch.node.Node.NODE_MASTER_SETTING::get) - .map(org.elasticsearch.node.Node.NODE_NAME_SETTING::get) - .collect(Collectors.toList()); - final List updatedSettings = new ArrayList<>(); - - updatedSettings.add(Settings.builder().put(firstNodeSettings) - .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), masterNodeNames) - .build()); - updatedSettings.addAll(otherNodesSettings); - - return updatedSettings; - } - @Override protected boolean addMockHttpTransport() { return false; // enable http } public void testRollingRestartOfTwoNodeCluster() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(1); final List nodes = internalCluster().startNodes(2); createIndex("test", Settings.builder() @@ -142,6 +122,7 @@ public Settings onNodeStopped(String nodeName) throws IOException { } public void testClearVotingTombstonesNotWaitingForRemoval() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(2); List nodes = internalCluster().startNodes(3); RestClient restClient = getRestClient(); Response response = restClient.performRequest(new Request("POST", "/_cluster/voting_config_exclusions/" + nodes.get(2))); @@ -154,6 +135,7 @@ public void testClearVotingTombstonesNotWaitingForRemoval() throws Exception { } public void testClearVotingTombstonesWaitingForRemoval() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(2); List nodes = internalCluster().startNodes(3); RestClient restClient = getRestClient(); String nodeToWithdraw = nodes.get(randomIntBetween(0, 2)); @@ -167,6 +149,7 @@ public void testClearVotingTombstonesWaitingForRemoval() throws Exception { } public void testFailsOnUnknownNode() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(2); internalCluster().startNodes(3); RestClient restClient = getRestClient(); try { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java index 33c0d22473c65..7cfc2ea1f280d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.exists; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.MasterNotDiscoveredException; @@ -29,26 +28,14 @@ import org.elasticsearch.test.InternalTestCluster; import java.io.IOException; -import java.util.List; -import java.util.stream.Collectors; - -import static org.elasticsearch.node.Node.NODE_MASTER_SETTING; -import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; @ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0, autoMinMasterNodes = false) public class IndicesExistsIT extends ESIntegTestCase { - @Override - protected List addExtraClusterBootstrapSettings(List allNodesSettings) { - final List masterNodeNames - = allNodesSettings.stream().filter(NODE_MASTER_SETTING::get).map(NODE_NAME_SETTING::get).collect(Collectors.toList()); - return allNodesSettings.stream().map(s -> Settings.builder().put(s) - .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), masterNodeNames).build()).collect(Collectors.toList()); - } - public void testIndexExistsWithBlocksInPlace() throws IOException { + internalCluster().setBootstrapMasterNodeIndex(0); Settings settings = Settings.builder() .put(GatewayService.RECOVER_AFTER_NODES_SETTING.getKey(), 99).build(); String node = internalCluster().startNode(settings); diff --git a/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index 26b8ae88d266d..4b23cd223a9dd 100644 --- a/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -25,7 +25,6 @@ import org.elasticsearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -35,7 +34,6 @@ import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -46,12 +44,10 @@ import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashSet; -import java.util.List; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; @@ -68,8 +64,6 @@ @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE") public class MinimumMasterNodesIT extends ESIntegTestCase { - private int bootstrapNodeId; - @Override protected Collection> nodePlugins() { final HashSet> classes = new HashSet<>(super.nodePlugins()); @@ -77,28 +71,8 @@ protected Collection> nodePlugins() { return classes; } - @Override - protected List addExtraClusterBootstrapSettings(List allNodesSettings) { - if (internalCluster().size() + allNodesSettings.size() == bootstrapNodeId) { - List nodeNames = new ArrayList<>(); - Collections.addAll(nodeNames, internalCluster().getNodeNames()); - allNodesSettings.forEach(settings -> nodeNames.add(Node.NODE_NAME_SETTING.get(settings))); - - List otherNodesSettings = allNodesSettings.subList(0, allNodesSettings.size() - 1); - Settings lastNodeSettings = allNodesSettings.get(allNodesSettings.size()-1); - List newSettings = new ArrayList<>(); - newSettings.addAll(otherNodesSettings); - newSettings.add(Settings.builder().put(lastNodeSettings) - .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), nodeNames) - .build()); - return newSettings; - } - return allNodesSettings; - } - public void testTwoNodesNoMasterBlock() throws Exception { - //bootstrap cluster once second node is started - bootstrapNodeId = 2; + internalCluster().setBootstrapMasterNodeIndex(1); Settings settings = Settings.builder() .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms") @@ -231,8 +205,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { } public void testThreeNodesNoMasterBlock() throws Exception { - //bootstrap cluster once 3rd node is started - bootstrapNodeId = 3; + internalCluster().setBootstrapMasterNodeIndex(2); Settings settings = Settings.builder() .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "1s") @@ -307,8 +280,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { } public void testCannotCommitStateThreeNodes() throws Exception { - //bootstrap cluster once 3rd node is started - bootstrapNodeId = 3; + internalCluster().setBootstrapMasterNodeIndex(2); Settings settings = Settings.builder() .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms") diff --git a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index 8758e169b5124..071c8a0195531 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -22,7 +22,6 @@ import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.discovery.MasterNotDiscoveredException; @@ -35,8 +34,6 @@ import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; -import java.util.Collections; -import java.util.List; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @@ -46,20 +43,8 @@ @TestLogging("_root:DEBUG,org.elasticsearch.action.admin.cluster.state:TRACE") public class SpecificMasterNodesIT extends ESIntegTestCase { - @Override - protected List addExtraClusterBootstrapSettings(List allNodesSettings) { - // if it's the first master in the cluster bootstrap the cluster with this node name - Settings settings = allNodesSettings.get(0); - if (internalCluster().numMasterNodes() == 0 && settings.getAsBoolean(Node.NODE_MASTER_SETTING.getKey(), false)) { - return Collections.singletonList(Settings.builder() - .put(settings) - .put(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), settings.get(Node.NODE_NAME_SETTING.getKey())) - .build()); - } - return allNodesSettings; - } - public void testSimpleOnlyMasterNodeElection() throws IOException { + internalCluster().setBootstrapMasterNodeIndex(0); logger.info("--> start data node / non master node"); internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true) .put(Node.NODE_MASTER_SETTING.getKey(), false) @@ -100,6 +85,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { } public void testElectOnlyBetweenMasterNodes() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); logger.info("--> start data node / non master node"); internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true) .put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s")); @@ -146,6 +132,7 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { } public void testAliasFilterValidation() { + internalCluster().setBootstrapMasterNodeIndex(0); logger.info("--> start master node / non data"); internalCluster().startNode(Settings.builder() .put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterIT.java index 334d392b1793d..be983ff8b5f32 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterIT.java @@ -36,11 +36,8 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.junit.annotations.TestLogging; -import org.junit.Before; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Locale; @@ -50,42 +47,6 @@ @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE") public class UnsafeBootstrapMasterIT extends ESIntegTestCase { - private int bootstrapNodeId; - - @Before - public void resetBootstrapNodeId() { - bootstrapNodeId = -1; - } - - /** - * Performs cluster bootstrap when node with id bootstrapNodeId is started. - * Any node of the batch could be selected as bootstrap target. - */ - @Override - protected List addExtraClusterBootstrapSettings(List allNodesSettings) { - if (internalCluster().size() + allNodesSettings.size() == bootstrapNodeId) { - List nodeNames = new ArrayList<>(); - Collections.addAll(nodeNames, internalCluster().getNodeNames()); - allNodesSettings.forEach(settings -> nodeNames.add(Node.NODE_NAME_SETTING.get(settings))); - - List newSettings = new ArrayList<>(); - int bootstrapIndex = randomInt(allNodesSettings.size() - 1); - for (int i = 0; i < allNodesSettings.size(); i++) { - Settings nodeSettings = allNodesSettings.get(i); - if (i == bootstrapIndex) { - newSettings.add(Settings.builder().put(nodeSettings) - .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), nodeNames) - .build()); - } else { - newSettings.add(nodeSettings); - } - } - - return newSettings; - } - return allNodesSettings; - } - private MockTerminal executeCommand(Environment environment, boolean abort) throws Exception { final UnsafeBootstrapMasterCommand command = new UnsafeBootstrapMasterCommand(); final MockTerminal terminal = new MockTerminal(); @@ -169,7 +130,7 @@ public void testNotBootstrappedCluster() throws Exception { } public void testNoManifestFile() throws IOException { - bootstrapNodeId = 1; + internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().startNode(); ensureStableCluster(1); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); @@ -181,7 +142,7 @@ public void testNoManifestFile() throws IOException { } public void testNoMetaData() throws IOException { - bootstrapNodeId = 1; + internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().startNode(); ensureStableCluster(1); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); @@ -194,7 +155,7 @@ public void testNoMetaData() throws IOException { } public void testAbortedByUser() throws IOException { - bootstrapNodeId = 1; + internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().startNode(); ensureStableCluster(1); internalCluster().stopRandomDataNode(); @@ -204,7 +165,7 @@ public void testAbortedByUser() throws IOException { } public void test3MasterNodes2Failed() throws Exception { - bootstrapNodeId = 3; + internalCluster().setBootstrapMasterNodeIndex(2); List masterNodes = internalCluster().startMasterOnlyNodes(3, Settings.EMPTY); String dataNode = internalCluster().startDataOnlyNode(); diff --git a/server/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java b/server/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java index e6fc2ed975fbb..86976d553fa2a 100644 --- a/server/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java @@ -22,7 +22,6 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.node.Node; @@ -30,8 +29,6 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import java.util.ArrayList; -import java.util.List; import java.util.Set; import static org.hamcrest.Matchers.equalTo; @@ -41,22 +38,6 @@ public class RecoverAfterNodesIT extends ESIntegTestCase { private static final TimeValue BLOCK_WAIT_TIMEOUT = TimeValue.timeValueSeconds(10); - @Override - protected List addExtraClusterBootstrapSettings(List allNodesSettings) { - if (internalCluster().numDataAndMasterNodes() == 0) { - final Settings firstNodeSettings = allNodesSettings.get(0); - final List otherNodesSettings = allNodesSettings.subList(1, allNodesSettings.size()); - - final List updatedSettings = new ArrayList<>(); - updatedSettings.add(Settings.builder().put(firstNodeSettings) - .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), - Node.NODE_NAME_SETTING.get(firstNodeSettings)).build()); - updatedSettings.addAll(otherNodesSettings); - - return updatedSettings; - } - return super.addExtraClusterBootstrapSettings(allNodesSettings); - } public Set waitForNoBlocksOnNode(TimeValue timeout, Client nodeClient) { long start = System.currentTimeMillis(); @@ -75,6 +56,7 @@ public Client startNode(Settings.Builder settings) { } public void testRecoverAfterNodes() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); logger.info("--> start node (1)"); Client clientNode1 = startNode(Settings.builder().put("gateway.recover_after_nodes", 3)); assertThat(clientNode1.admin().cluster().prepareState().setLocal(true).execute().actionGet() @@ -100,6 +82,7 @@ public void testRecoverAfterNodes() throws Exception { } public void testRecoverAfterMasterNodes() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); logger.info("--> start master_node (1)"); Client master1 = startNode(Settings.builder() .put("gateway.recover_after_master_nodes", 2).put(Node.NODE_DATA_SETTING.getKey(), false) @@ -145,6 +128,7 @@ public void testRecoverAfterMasterNodes() throws Exception { } public void testRecoverAfterDataNodes() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); logger.info("--> start master_node (1)"); Client master1 = startNode(Settings.builder() .put("gateway.recover_after_data_nodes", 2) diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 45f8682dc5e61..0dfdd2505235a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1942,11 +1942,6 @@ public Settings nodeSettings(int nodeOrdinal) { .put(ESIntegTestCase.this.nodeSettings(nodeOrdinal)).build(); } - @Override - public List addExtraClusterBootstrapSettings(List allNodesSettings) { - return ESIntegTestCase.this.addExtraClusterBootstrapSettings(allNodesSettings); - } - @Override public Path nodeConfigPath(int nodeOrdinal) { return ESIntegTestCase.this.nodeConfigPath(nodeOrdinal); @@ -1975,18 +1970,6 @@ public Collection> transportClientPlugins() { }; } - /** - * This method is called before starting a collection of nodes. - * At this point the test has a holistic view on all nodes settings and might perform settings adjustments as needed. - * For instance, the test could retrieve master node names and fill in - * {@link org.elasticsearch.cluster.coordination.ClusterBootstrapService#INITIAL_MASTER_NODES_SETTING} setting. - * - * @param allNodesSettings list of node settings before update - * @return list of node settings after update - */ - protected List addExtraClusterBootstrapSettings(List allNodesSettings) { - return allNodesSettings; - } /** * Iff this returns true mock transport implementations are used for the test runs. Otherwise not mock transport impls are used. @@ -2214,6 +2197,9 @@ public final void cleanUpCluster() throws Exception { // Deleting indices is going to clear search contexts implicitly so we // need to check that there are no more in-flight search contexts before // we remove indices + if (isInternalCluster()) { + internalCluster().setBootstrapMasterNodeIndex(-1); + } super.ensureAllSearchContextsReleased(); if (runTestScopeLifecycle()) { printTestMessage("cleaning up after"); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 9313d9389d49c..5e75a50bef4d9 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -43,6 +43,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; +import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode.Role; @@ -243,6 +244,8 @@ public final class InternalTestCluster extends TestCluster { // If set to true only the first node in the cluster will be made a unicast node private boolean hostsListContainsOnlyFirstNode; + private int bootstrapMasterNodeIndex = -1; + public InternalTestCluster( final long clusterSeed, final Path baseDir, @@ -400,6 +403,22 @@ public InternalTestCluster( EsExecutors.daemonThreadFactory("test_" + clusterName), new ThreadContext(Settings.EMPTY)); } + public int getBootstrapMasterNodeIndex() { + return bootstrapMasterNodeIndex; + } + + /** + * Sets {@link #bootstrapMasterNodeIndex} to the given value, see {@link #bootstrapMasterNodeWithSpecifiedIndex(List)} + * for the description of how this field is used. + * It's only possible to change {@link #bootstrapMasterNodeIndex} value if autoManageMinMasterNodes is false. + */ + public void setBootstrapMasterNodeIndex(int bootstrapMasterNodeIndex) { + if (autoManageMinMasterNodes && bootstrapMasterNodeIndex != -1) { + throw new AssertionError("bootstrapMasterNodeIndex should be -1 if autoManageMinMasterNodes is true"); + } + this.bootstrapMasterNodeIndex = bootstrapMasterNodeIndex; + } + @Override public String getClusterName() { return clusterName; @@ -1146,7 +1165,7 @@ private synchronized void reset(boolean wipeData) throws IOException { settings.add(getNodeSettings(i, sharedNodesSeeds[i], extraSettings.build(), defaultMinMasterNodes)); } - int bootstrapNodeIndex = -1; + int autoBootstrapMasterNodeIndex = -1; final List masterNodeNames = settings.stream() .filter(Node.NODE_MASTER_SETTING::get) .map(Node.NODE_NAME_SETTING::get) @@ -1154,17 +1173,17 @@ private synchronized void reset(boolean wipeData) throws IOException { if (prevNodeCount == 0 && autoManageMinMasterNodes) { if (numSharedDedicatedMasterNodes > 0) { - bootstrapNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDedicatedMasterNodes - 1); + autoBootstrapMasterNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDedicatedMasterNodes - 1); } else if (numSharedDataNodes > 0) { - bootstrapNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDataNodes - 1); + autoBootstrapMasterNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDataNodes - 1); } } - final List updatedSettings = nodeConfigurationSource.addExtraClusterBootstrapSettings(settings); + final List updatedSettings = bootstrapMasterNodeWithSpecifiedIndex(settings); for (int i = 0; i < numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes; i++) { Settings nodeSettings = updatedSettings.get(i); - if (i == bootstrapNodeIndex) { + if (i == autoBootstrapMasterNodeIndex) { nodeSettings = Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), masterNodeNames).put(nodeSettings).build(); } final NodeAndClient nodeAndClient = buildNode(i, nodeSettings, true, onTransportServiceStarted); @@ -1944,6 +1963,54 @@ public synchronized Set nodesInclude(String index) { return Collections.emptySet(); } + /** + * Performs cluster bootstrap when node with index {@link #bootstrapMasterNodeIndex} is started + * with the names of all existing and new master-eligible nodes. + * Indexing starts from 0. + * If {@link #bootstrapMasterNodeIndex} is -1 (default), this method does nothing. + */ + private List bootstrapMasterNodeWithSpecifiedIndex(List allNodesSettings) { + if (getBootstrapMasterNodeIndex() == -1) { // fast-path + return allNodesSettings; + } + + int currentNodeId = numMasterNodes() - 1; + List newSettings = new ArrayList<>(); + + for (Settings settings : allNodesSettings) { + if (Node.NODE_MASTER_SETTING.get(settings) == false) { + newSettings.add(settings); + } else { + currentNodeId++; + if (currentNodeId != bootstrapMasterNodeIndex) { + newSettings.add(settings); + } else { + List nodeNames = new ArrayList<>(); + + for (Settings nodeSettings : getDataOrMasterNodeInstances(Settings.class)) { + if (Node.NODE_MASTER_SETTING.get(nodeSettings)) { + nodeNames.add(Node.NODE_NAME_SETTING.get(nodeSettings)); + } + } + + for (Settings nodeSettings : allNodesSettings) { + if (Node.NODE_MASTER_SETTING.get(nodeSettings)) { + nodeNames.add(Node.NODE_NAME_SETTING.get(nodeSettings)); + } + } + + newSettings.add(Settings.builder().put(settings) + .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), nodeNames) + .build()); + + setBootstrapMasterNodeIndex(-1); + } + } + } + + return newSettings; + } + /** * Starts a node with default settings and returns its name. */ @@ -1992,7 +2059,7 @@ public synchronized List startNodes(Settings... extraSettings) { } final List nodes = new ArrayList<>(); final int prevMasterCount = getMasterNodesCount(); - int bootstrapMasterNodeIndex = + int autoBootstrapMasterNodeIndex = prevMasterCount == 0 && autoManageMinMasterNodes && newMasterCount > 0 && Arrays.stream(extraSettings) .allMatch(s -> Node.NODE_MASTER_SETTING.get(s) == false || TestZenDiscovery.USE_ZEN2.get(s) == true) ? RandomNumbers.randomIntBetween(random, 0, newMasterCount - 1) : -1; @@ -2010,16 +2077,16 @@ public synchronized List startNodes(Settings... extraSettings) { .map(Node.NODE_NAME_SETTING::get) .collect(Collectors.toList()); - final List updatedSettings = nodeConfigurationSource.addExtraClusterBootstrapSettings(settings); + final List updatedSettings = bootstrapMasterNodeWithSpecifiedIndex(settings); for (int i = 0; i < numOfNodes; i++) { final Settings nodeSettings = updatedSettings.get(i); final Builder builder = Settings.builder(); if (Node.NODE_MASTER_SETTING.get(nodeSettings)) { - if (bootstrapMasterNodeIndex == 0) { + if (autoBootstrapMasterNodeIndex == 0) { builder.putList(INITIAL_MASTER_NODES_SETTING.getKey(), initialMasterNodes); } - bootstrapMasterNodeIndex -= 1; + autoBootstrapMasterNodeIndex -= 1; } final NodeAndClient nodeAndClient = diff --git a/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java b/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java index 5ed21d64c6890..60c69bbd6c652 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java +++ b/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java @@ -24,7 +24,6 @@ import java.nio.file.Path; import java.util.Collection; import java.util.Collections; -import java.util.List; public abstract class NodeConfigurationSource { @@ -52,10 +51,6 @@ public Settings transportClientSettings() { public abstract Path nodeConfigPath(int nodeOrdinal); - public List addExtraClusterBootstrapSettings(List allNodesSettings) { - return allNodesSettings; - } - /** Returns plugins that should be loaded on the node */ public Collection> nodePlugins() { return Collections.emptyList(); diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index ca2fe8c753e44..b48c9b9ddcf18 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -56,15 +56,11 @@ import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; -import java.util.stream.IntStream; -import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING; import static org.elasticsearch.cluster.node.DiscoveryNode.Role.DATA; import static org.elasticsearch.cluster.node.DiscoveryNode.Role.INGEST; import static org.elasticsearch.cluster.node.DiscoveryNode.Role.MASTER; import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING; -import static org.elasticsearch.node.Node.NODE_MASTER_SETTING; -import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileNotExists; import static org.hamcrest.Matchers.equalTo; @@ -144,21 +140,23 @@ public void testBeforeTest() throws Exception { final boolean masterNodes; final int minNumDataNodes; final int maxNumDataNodes; + final int bootstrapMasterNodeIndex; if (autoManageMinMasterNodes) { masterNodes = randomBoolean(); minNumDataNodes = randomIntBetween(0, 3); maxNumDataNodes = randomIntBetween(minNumDataNodes, 4); + bootstrapMasterNodeIndex = -1; } else { // if we manage min master nodes, we need to lock down the number of nodes minNumDataNodes = randomIntBetween(0, 4); maxNumDataNodes = minNumDataNodes; masterNodes = false; + bootstrapMasterNodeIndex = maxNumDataNodes == 0 ? -1 : randomIntBetween(0, maxNumDataNodes - 1); } final int numClientNodes = randomIntBetween(0, 2); final String clusterName1 = "shared1"; final String clusterName2 = "shared2"; String transportClient = getTestTransportType(); - final long bootstrapNodeSelectionSeed = randomLong(); NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() { @Override public Settings nodeSettings(int nodeOrdinal) { @@ -176,14 +174,6 @@ public Settings nodeSettings(int nodeOrdinal) { return settings.build(); } - @Override - public List addExtraClusterBootstrapSettings(List allNodesSettings) { - if (autoManageMinMasterNodes) { - return allNodesSettings; - } - return addBootstrapConfiguration(new Random(bootstrapNodeSelectionSeed), allNodesSettings); - } - @Override public Path nodeConfigPath(int nodeOrdinal) { return null; @@ -202,9 +192,12 @@ public Settings transportClientSettings() { InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, nodePrefix, mockPlugins(), Function.identity()); + cluster0.setBootstrapMasterNodeIndex(bootstrapMasterNodeIndex); + InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, nodePrefix, mockPlugins(), Function.identity()); + cluster1.setBootstrapMasterNodeIndex(bootstrapMasterNodeIndex); assertClusters(cluster0, cluster1, false); long seed = randomLong(); @@ -231,19 +224,6 @@ public Settings transportClientSettings() { } } - private static List addBootstrapConfiguration(Random random, List allNodesSettings) { - final List updatedSettings = new ArrayList<>(allNodesSettings); - final int bootstrapIndex = randomFrom(random, IntStream.range(0, updatedSettings.size()) - .filter(i -> NODE_MASTER_SETTING.get(allNodesSettings.get(i))).boxed().collect(Collectors.toList())); - final Settings settings = updatedSettings.get(bootstrapIndex); - assertFalse(INITIAL_MASTER_NODES_SETTING.exists(settings)); - assertTrue(NODE_MASTER_SETTING.get(settings)); - updatedSettings.set(bootstrapIndex, - Settings.builder().put(settings).putList(INITIAL_MASTER_NODES_SETTING.getKey(), allNodesSettings.stream() - .filter(NODE_MASTER_SETTING::get).map(NODE_NAME_SETTING::get).collect(Collectors.toList())).build()); - return updatedSettings; - } - public void testDataFolderAssignmentAndCleaning() throws IOException, InterruptedException { long clusterSeed = randomLong(); boolean masterNodes = randomBoolean(); @@ -353,8 +333,6 @@ public void testDifferentRolesMaintainPathOnRestart() throws Exception { InternalTestCluster cluster = new InternalTestCluster(randomLong(), baseDir, false, false, 0, 0, "test", new NodeConfigurationSource() { - private boolean bootstrapConfigurationSet; - @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() @@ -369,16 +347,6 @@ public Settings nodeSettings(int nodeOrdinal) { .build(); } - @Override - public List addExtraClusterBootstrapSettings(List allNodesSettings) { - if (bootstrapConfigurationSet || allNodesSettings.stream().noneMatch(NODE_MASTER_SETTING::get)) { - return allNodesSettings; - } - - bootstrapConfigurationSet = true; - return addBootstrapConfiguration(random(), allNodesSettings); - } - @Override public Path nodeConfigPath(int nodeOrdinal) { return null; @@ -399,6 +367,8 @@ public Settings transportClientSettings() { roles.add(role); } + cluster.setBootstrapMasterNodeIndex(randomIntBetween(0, (int) roles.stream().filter(role -> role.equals(MASTER)).count() - 1)); + try { Map> pathsPerRole = new HashMap<>(); for (int i = 0; i < numNodes; i++) { From 57b1d245e8e9e766cb708088dd78fbc691557dfc Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 1 Feb 2019 11:41:17 +0100 Subject: [PATCH 19/54] Remove AtomiFieldData#getLegacyFieldValues (#38087) This function is unused now that we format the docvalue fields with the default formatter on the field (#30831) --- .../index/fielddata/AtomicFieldData.java | 7 ---- .../fielddata/plain/AtomicLongFieldData.java | 33 ------------------- 2 files changed, 40 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java index c4f310073c488..20c9c3c70c10c 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java @@ -32,13 +32,6 @@ public interface AtomicFieldData extends Accountable, Releasable { */ ScriptDocValues getScriptValues(); - /** - * Returns field values for use by returned hits. - */ - default ScriptDocValues getLegacyFieldValues() { - return getScriptValues(); - } - /** * Return a String representation of the values. */ diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java index 66b25c21c8051..9e0f3ab073619 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java @@ -25,11 +25,6 @@ import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; -import org.elasticsearch.script.JodaCompatibleZonedDateTime; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; - -import java.io.IOException; /** * Specialization of {@link AtomicNumericFieldData} for integers. @@ -52,34 +47,6 @@ public long ramBytesUsed() { return ramBytesUsed; } - @Override - public final ScriptDocValues getLegacyFieldValues() { - switch (numericType) { - case DATE: - final ScriptDocValues.Dates realDV = new ScriptDocValues.Dates(getLongValues()); - return new ScriptDocValues() { - - @Override - public int size() { - return realDV.size(); - } - - @Override - public DateTime get(int index) { - JodaCompatibleZonedDateTime dt = realDV.get(index); - return new DateTime(dt.toInstant().toEpochMilli(), DateTimeZone.UTC); - } - - @Override - public void setNextDocId(int docId) throws IOException { - realDV.setNextDocId(docId); - } - }; - default: - return getScriptValues(); - } - } - @Override public final ScriptDocValues getScriptValues() { switch (numericType) { From 6fa93ca49387d5f3ae3208ed64623a0c319d570e Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 1 Feb 2019 11:41:40 +0100 Subject: [PATCH 20/54] Forbid negative field boosts in analyzed queries (#37930) This change forbids negative field boost in the `query_string`, `simple_query_string` and `multi_match` queries. Negative boosts are not allowed in Lucene 8 (scores must be positive). The backport of this change to 6x will turn the error into a deprecation warning in order to raise the awareness of this breaking change in 7.0. Closes #33309 --- .../reference/migration/migrate_7_0/search.asciidoc | 4 ++-- .../index/query/AbstractQueryBuilder.java | 13 +++++++++---- .../index/query/MultiMatchQueryBuilder.java | 9 ++++++++- .../index/query/QueryStringQueryBuilder.java | 9 ++++++++- .../index/query/SimpleQueryStringBuilder.java | 5 +++++ .../elasticsearch/index/search/MultiMatchQuery.java | 3 +-- .../index/query/MultiMatchQueryBuilderTests.java | 9 +++++++++ .../index/query/QueryStringQueryBuilderTests.java | 10 ++++++++++ .../index/query/SimpleQueryStringBuilderTests.java | 10 ++++++++++ 9 files changed, 62 insertions(+), 10 deletions(-) diff --git a/docs/reference/migration/migrate_7_0/search.asciidoc b/docs/reference/migration/migrate_7_0/search.asciidoc index 0f3dcf9771c3d..afe96fd8851a9 100644 --- a/docs/reference/migration/migrate_7_0/search.asciidoc +++ b/docs/reference/migration/migrate_7_0/search.asciidoc @@ -159,8 +159,8 @@ instead which is a more appropriate value for a scenario where scores are not av [float] ==== Negative boosts are not allowed -Setting a negative `boost` in a query, deprecated in 6x, are not allowed in this version. -To deboost a specific query you can use a `boost` comprise between 0 and 1. +Setting a negative `boost` for a query or a field, deprecated in 6x, is not allowed in this version. +To deboost a specific query or field you can use a `boost` comprise between 0 and 1. [float] ==== Negative scores are not allowed in Function Score Query diff --git a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java index cca1ca0fcc0d1..def32f0c75059 100644 --- a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java @@ -64,6 +64,7 @@ protected AbstractQueryBuilder() { protected AbstractQueryBuilder(StreamInput in) throws IOException { boost = in.readFloat(); + checkNegativeBoost(boost); queryName = in.readOptionalString(); } @@ -139,6 +140,13 @@ public final float boost() { return this.boost; } + protected final void checkNegativeBoost(float boost) { + if (Float.compare(boost, 0f) < 0) { + throw new IllegalArgumentException("negative [boost] are not allowed in [" + toString() + "], " + + "use a value between 0 and 1 to deboost"); + } + } + /** * Sets the boost for this query. Documents matching this query will (in addition to the normal * weightings) have their score multiplied by the boost provided. @@ -146,10 +154,7 @@ public final float boost() { @SuppressWarnings("unchecked") @Override public final QB boost(float boost) { - if (Float.compare(boost, 0f) < 0) { - throw new IllegalArgumentException("negative [boost] are not allowed in [" + toString() + "], " + - "use a value between 0 and 1 to deboost"); - } + checkNegativeBoost(boost); this.boost = boost; return (QB) this; } diff --git a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java index d8476d791d7ec..9f2c85106de08 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java @@ -211,7 +211,10 @@ public MultiMatchQueryBuilder(StreamInput in) throws IOException { int size = in.readVInt(); fieldsBoosts = new TreeMap<>(); for (int i = 0; i < size; i++) { - fieldsBoosts.put(in.readString(), in.readFloat()); + String field = in.readString(); + float boost = in.readFloat(); + checkNegativeBoost(boost); + fieldsBoosts.put(field, boost); } type = Type.readFromStream(in); operator = Operator.readFromStream(in); @@ -295,6 +298,7 @@ public MultiMatchQueryBuilder field(String field, float boost) { if (Strings.isEmpty(field)) { throw new IllegalArgumentException("supplied field is null or empty."); } + checkNegativeBoost(boost); this.fieldsBoosts.put(field, boost); return this; } @@ -303,6 +307,9 @@ public MultiMatchQueryBuilder field(String field, float boost) { * Add several fields to run the query against with a specific boost. */ public MultiMatchQueryBuilder fields(Map fields) { + for (float fieldBoost : fields.values()) { + checkNegativeBoost(fieldBoost); + } this.fieldsBoosts.putAll(fields); return this; } diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java index 363384030a2ac..3f8a0acc91695 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java @@ -169,7 +169,10 @@ public QueryStringQueryBuilder(StreamInput in) throws IOException { defaultField = in.readOptionalString(); int size = in.readVInt(); for (int i = 0; i < size; i++) { - fieldsAndWeights.put(in.readString(), in.readFloat()); + String field = in.readString(); + Float weight = in.readFloat(); + checkNegativeBoost(weight); + fieldsAndWeights.put(field, weight); } defaultOperator = Operator.readFromStream(in); analyzer = in.readOptionalString(); @@ -264,6 +267,7 @@ public QueryStringQueryBuilder field(String field) { * Adds a field to run the query string against with a specific boost. */ public QueryStringQueryBuilder field(String field, float boost) { + checkNegativeBoost(boost); this.fieldsAndWeights.put(field, boost); return this; } @@ -272,6 +276,9 @@ public QueryStringQueryBuilder field(String field, float boost) { * Add several fields to run the query against with a specific boost. */ public QueryStringQueryBuilder fields(Map fields) { + for (float fieldBoost : fields.values()) { + checkNegativeBoost(fieldBoost); + } this.fieldsAndWeights.putAll(fields); return this; } diff --git a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index 466341e3cc8b4..2b2045266455b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -161,6 +161,7 @@ public SimpleQueryStringBuilder(StreamInput in) throws IOException { for (int i = 0; i < size; i++) { String field = in.readString(); Float weight = in.readFloat(); + checkNegativeBoost(weight); fields.put(field, weight); } fieldsAndWeights.putAll(fields); @@ -223,6 +224,7 @@ public SimpleQueryStringBuilder field(String field, float boost) { if (Strings.isEmpty(field)) { throw new IllegalArgumentException("supplied field is null or empty"); } + checkNegativeBoost(boost); this.fieldsAndWeights.put(field, boost); return this; } @@ -230,6 +232,9 @@ public SimpleQueryStringBuilder field(String field, float boost) { /** Add several fields to run the query against with a specific boost. */ public SimpleQueryStringBuilder fields(Map fields) { Objects.requireNonNull(fields, "fields cannot be null"); + for (float fieldBoost : fields.values()) { + checkNegativeBoost(fieldBoost); + } this.fieldsAndWeights.putAll(fields); return this; } diff --git a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index 7eefaadaadde2..88fd5293392b5 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -97,11 +97,10 @@ private List buildFieldQueries(MultiMatchQueryBuilder.Type type, Map new MultiMatchQueryBuilder("the quick fox") + .field(STRING_FIELD_NAME, -1.0f) + .field(STRING_FIELD_NAME_2) + .toQuery(createShardContext())); + assertThat(exc.getMessage(), containsString("negative [boost]")); + } + private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings, Settings indexSettings) { Settings build = Settings.builder().put(oldIndexSettings) .put(indexSettings) diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 6f72277007dd5..7181c1de1fb41 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -63,6 +63,7 @@ import org.elasticsearch.index.search.QueryStringQueryParser; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; +import org.hamcrest.CoreMatchers; import org.hamcrest.Matchers; import java.io.IOException; @@ -1471,6 +1472,15 @@ public void testAnalyzedPrefix() throws Exception { assertEquals(expected, query); } + public void testNegativeFieldBoost() { + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, + () -> new QueryStringQueryBuilder("the quick fox") + .field(STRING_FIELD_NAME, -1.0f) + .field(STRING_FIELD_NAME_2) + .toQuery(createShardContext())); + assertThat(exc.getMessage(), CoreMatchers.containsString("negative [boost]")); + } + private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings, Settings indexSettings) { Settings build = Settings.builder().put(oldIndexSettings) .put(indexSettings) diff --git a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index bbc89ddf750aa..3242f343379aa 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -57,6 +57,7 @@ import java.util.Map; import java.util.Set; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; @@ -718,6 +719,15 @@ public void testUnmappedFieldNoTokenWithAndOperator() throws IOException { assertEquals(expected, query); } + public void testNegativeFieldBoost() { + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, + () -> new SimpleQueryStringBuilder("the quick fox") + .field(STRING_FIELD_NAME, -1.0f) + .field(STRING_FIELD_NAME_2) + .toQuery(createShardContext())); + assertThat(exc.getMessage(), containsString("negative [boost]")); + } + private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings, Settings indexSettings) { Settings build = Settings.builder().put(oldIndexSettings) .put(indexSettings) From e18cac3659a82b60884564ec78d7c36b6eb0817a Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Fri, 1 Feb 2019 12:11:42 +0100 Subject: [PATCH 21/54] Add finalReduce flag to SearchRequest (#38104) With #37000 we made sure that fnial reduction is automatically disabled whenever a localClusterAlias is provided with a SearchRequest. While working on #37838, we found a scenario where we do need to set a localClusterAlias yet we would like to perform a final reduction in the remote cluster: when searching on a single remote cluster. Relates to #32125 This commit adds support for a separate finalReduce flag to SearchRequest and makes use of it in TransportSearchAction in case we are searching against a single remote cluster. This also makes sure that num_reduce_phases is correct when searching against a single remote cluster: it makes little sense to return `num_reduce_phases` set to `2`, which looks especially weird in case the search was performed against a single remote shard. We should perform one reduction phase only in this case and `num_reduce_phases` should reflect that. * line length --- .../modules/cross-cluster-search.asciidoc | 1 - .../test/multi_cluster/10_basic.yml | 9 ++ .../test/multi_cluster/40_scroll.yml | 2 + .../action/search/SearchPhaseController.java | 6 +- .../action/search/SearchRequest.java | 50 ++++++++--- .../action/search/SearchResponse.java | 12 +++ .../action/search/SearchResponseMerger.java | 6 +- .../action/search/TransportSearchAction.java | 87 ++++++++++++++----- .../search/builder/SearchSourceBuilder.java | 14 +-- .../search/SearchPhaseControllerTests.java | 21 +++-- .../action/search/SearchRequestTests.java | 24 +++-- .../TransportSearchActionSingleNodeTests.java | 67 ++++++++++++-- .../search/TransportSearchActionTests.java | 7 +- 13 files changed, 229 insertions(+), 77 deletions(-) diff --git a/docs/reference/modules/cross-cluster-search.asciidoc b/docs/reference/modules/cross-cluster-search.asciidoc index b59f74198c3e8..186c8e8ee3837 100644 --- a/docs/reference/modules/cross-cluster-search.asciidoc +++ b/docs/reference/modules/cross-cluster-search.asciidoc @@ -65,7 +65,6 @@ GET /cluster_one:twitter/_search { "took": 150, "timed_out": false, - "num_reduce_phases": 2, "_shards": { "total": 1, "successful": 1, diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml index 4499a60bfe24a..fa4ca0588940c 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml @@ -36,6 +36,7 @@ terms: field: f1.keyword + - match: { num_reduce_phases: 3 } - match: {_clusters.total: 2} - match: {_clusters.successful: 2} - match: {_clusters.skipped: 0} @@ -63,6 +64,7 @@ terms: field: f1.keyword + - match: { num_reduce_phases: 3 } - match: {_clusters.total: 2} - match: {_clusters.successful: 2} - match: {_clusters.skipped: 0} @@ -83,6 +85,7 @@ terms: field: f1.keyword + - is_false: num_reduce_phases - match: {_clusters.total: 1} - match: {_clusters.successful: 1} - match: {_clusters.skipped: 0} @@ -103,6 +106,7 @@ terms: field: f1.keyword + - is_false: num_reduce_phases - is_false: _clusters - match: { _shards.total: 2 } - match: { hits.total: 5} @@ -133,6 +137,7 @@ rest_total_hits_as_int: true index: test_remote_cluster:test_index + - is_false: num_reduce_phases - match: {_clusters.total: 1} - match: {_clusters.successful: 1} - match: {_clusters.skipped: 0} @@ -162,6 +167,7 @@ rest_total_hits_as_int: true index: "*:test_index" + - match: { num_reduce_phases: 3 } - match: {_clusters.total: 2} - match: {_clusters.successful: 2} - match: {_clusters.skipped: 0} @@ -176,6 +182,7 @@ rest_total_hits_as_int: true index: my_remote_cluster:aliased_test_index + - is_false: num_reduce_phases - match: {_clusters.total: 1} - match: {_clusters.successful: 1} - match: {_clusters.skipped: 0} @@ -192,6 +199,7 @@ rest_total_hits_as_int: true index: my_remote_cluster:aliased_test_index,my_remote_cluster:field_caps_index_1 + - is_false: num_reduce_phases - match: {_clusters.total: 1} - match: {_clusters.successful: 1} - match: {_clusters.skipped: 0} @@ -208,6 +216,7 @@ rest_total_hits_as_int: true index: "my_remote_cluster:single_doc_index" + - is_false: num_reduce_phases - match: {_clusters.total: 1} - match: {_clusters.successful: 1} - match: {_clusters.skipped: 0} diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/40_scroll.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/40_scroll.yml index 6a7fe3c5356c0..ea404702db529 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/40_scroll.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/40_scroll.yml @@ -12,6 +12,7 @@ query: match_all: {} + - is_false: num_reduce_phases - match: {_clusters.total: 1} - match: {_clusters.successful: 1} - match: {_clusters.skipped: 0} @@ -28,6 +29,7 @@ rest_total_hits_as_int: true body: { "scroll_id": "$scroll_id", "scroll": "1m"} + - is_false: num_reduce_phases - is_false: _clusters - match: {hits.total: 6 } - length: {hits.hits: 2 } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 67f33398bba68..e5c5b17414b96 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -714,20 +714,18 @@ InitialSearchPhase.ArraySearchPhaseResults newSearchPhaseResu final boolean hasAggs = source != null && source.aggregations() != null; final boolean hasTopDocs = source == null || source.size() != 0; final int trackTotalHitsUpTo = resolveTrackTotalHits(request); - final boolean finalReduce = request.getLocalClusterAlias() == null; - if (isScrollRequest == false && (hasAggs || hasTopDocs)) { // no incremental reduce if scroll is used - we only hit a single shard or sometimes more... if (request.getBatchedReduceSize() < numShards) { // only use this if there are aggs and if there are more shards than we should reduce at once return new QueryPhaseResultConsumer(this, numShards, request.getBatchedReduceSize(), hasTopDocs, hasAggs, - trackTotalHitsUpTo, finalReduce); + trackTotalHitsUpTo, request.isFinalReduce()); } } return new InitialSearchPhase.ArraySearchPhaseResults(numShards) { @Override ReducedQueryPhase reduce() { - return reducedQueryPhase(results.asList(), isScrollRequest, trackTotalHitsUpTo, finalReduce); + return reducedQueryPhase(results.asList(), isScrollRequest, trackTotalHitsUpTo, request.isFinalReduce()); } }; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 55122b6806fd2..602a7123d0014 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -67,6 +67,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest private final String localClusterAlias; private final long absoluteStartMillis; + private final boolean finalReduce; private SearchType searchType = SearchType.DEFAULT; @@ -102,13 +103,15 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest public SearchRequest() { this.localClusterAlias = null; this.absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; + this.finalReduce = true; } /** * Constructs a new search request from the provided search request */ public SearchRequest(SearchRequest searchRequest) { - this(searchRequest, searchRequest.indices, searchRequest.localClusterAlias, searchRequest.absoluteStartMillis); + this(searchRequest, searchRequest.indices, searchRequest.localClusterAlias, + searchRequest.absoluteStartMillis, searchRequest.finalReduce); } /** @@ -132,25 +135,30 @@ public SearchRequest(String[] indices, SearchSourceBuilder source) { } /** - * Creates a new search request by providing the search request to copy all fields from, the indices to search against, - * the alias of the cluster where it will be executed, as well as the start time in milliseconds from the epoch time. - * Used when a {@link SearchRequest} is created and executed as part of a cross-cluster search request performing local reduction - * on each cluster. The coordinating CCS node provides the original search request, the indices to search against as well as the - * alias to prefix index names with in the returned search results, and the absolute start time to be used on the remote clusters - * to ensure that the same value is used. + * Creates a new search request by providing the search request to copy all fields from, the indices to search against, the alias of + * the cluster where it will be executed, as well as the start time in milliseconds from the epoch time and whether the reduction + * should be final or not. Used when a {@link SearchRequest} is created and executed as part of a cross-cluster search request + * performing reduction on each cluster in order to minimize network round-trips between the coordinating node and the remote clusters. + * + * @param originalSearchRequest the original search request + * @param indices the indices to search against + * @param localClusterAlias the alias to prefix index names with in the returned search results + * @param absoluteStartMillis the absolute start time to be used on the remote clusters to ensure that the same value is used + * @param finalReduce whether the reduction should be final or not */ static SearchRequest withLocalReduction(SearchRequest originalSearchRequest, String[] indices, - String localClusterAlias, long absoluteStartMillis) { + String localClusterAlias, long absoluteStartMillis, boolean finalReduce) { Objects.requireNonNull(originalSearchRequest, "search request must not be null"); validateIndices(indices); Objects.requireNonNull(localClusterAlias, "cluster alias must not be null"); if (absoluteStartMillis < 0) { throw new IllegalArgumentException("absoluteStartMillis must not be negative but was [" + absoluteStartMillis + "]"); } - return new SearchRequest(originalSearchRequest, indices, localClusterAlias, absoluteStartMillis); + return new SearchRequest(originalSearchRequest, indices, localClusterAlias, absoluteStartMillis, finalReduce); } - private SearchRequest(SearchRequest searchRequest, String[] indices, String localClusterAlias, long absoluteStartMillis) { + private SearchRequest(SearchRequest searchRequest, String[] indices, String localClusterAlias, long absoluteStartMillis, + boolean finalReduce) { this.allowPartialSearchResults = searchRequest.allowPartialSearchResults; this.batchedReduceSize = searchRequest.batchedReduceSize; this.ccsMinimizeRoundtrips = searchRequest.ccsMinimizeRoundtrips; @@ -167,6 +175,7 @@ private SearchRequest(SearchRequest searchRequest, String[] indices, String loca this.types = searchRequest.types; this.localClusterAlias = localClusterAlias; this.absoluteStartMillis = absoluteStartMillis; + this.finalReduce = finalReduce; } /** @@ -203,6 +212,12 @@ public SearchRequest(StreamInput in) throws IOException { localClusterAlias = null; absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; } + //TODO move to the 6_7_0 branch once backported to 6.x + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + finalReduce = in.readBoolean(); + } else { + finalReduce = true; + } if (in.getVersion().onOrAfter(Version.V_7_0_0)) { ccsMinimizeRoundtrips = in.readBoolean(); } @@ -232,6 +247,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(absoluteStartMillis); } } + //TODO move to the 6_7_0 branch once backported to 6.x + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeBoolean(finalReduce); + } if (out.getVersion().onOrAfter(Version.V_7_0_0)) { out.writeBoolean(ccsMinimizeRoundtrips); } @@ -277,11 +296,18 @@ String getLocalClusterAlias() { return localClusterAlias; } + /** + * Returns whether the reduction phase that will be performed needs to be final or not. + */ + boolean isFinalReduce() { + return finalReduce; + } + /** * Returns the current time in milliseconds from the time epoch, to be used for the execution of this search request. Used to * ensure that the same value, determined by the coordinating node, is used on all nodes involved in the execution of the search - * request. When created through {@link #withLocalReduction(SearchRequest, String[], String, long)}, this method returns the provided - * current time, otherwise it will return {@link System#currentTimeMillis()}. + * request. When created through {@link #withLocalReduction(SearchRequest, String[], String, long, boolean)}, this method returns + * the provided current time, otherwise it will return {@link System#currentTimeMillis()}. * */ long getOrCreateAbsoluteStartMillis() { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index dd0d4de07d6f4..6ae5e1a553eb6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.search; +import org.apache.lucene.search.TotalHits; import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Nullable; @@ -35,8 +36,10 @@ import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestActions; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.profile.SearchProfileShardResults; @@ -47,6 +50,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Supplier; import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; @@ -497,4 +501,12 @@ public String toString() { return "Clusters{total=" + total + ", successful=" + successful + ", skipped=" + skipped + '}'; } } + + static SearchResponse empty(Supplier tookInMillisSupplier, Clusters clusters) { + SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), Float.NaN); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, + InternalAggregations.EMPTY, null, null, false, null, 0); + return new SearchResponse(internalSearchResponse, null, 0, 0, 0, tookInMillisSupplier.get(), + ShardSearchFailure.EMPTY_ARRAY, clusters); + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java index 567040246c50f..3b28ca19477ab 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java @@ -115,11 +115,7 @@ SearchResponse getMergedResponse(Clusters clusters) { //if the search is only across remote clusters, none of them are available, and all of them have skip_unavailable set to true, //we end up calling merge without anything to merge, we just return an empty search response if (searchResponses.size() == 0) { - SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), Float.NaN); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, - InternalAggregations.EMPTY, null, null, false, null, 0); - return new SearchResponse(internalSearchResponse, null, 0, 0, 0, searchTimeProvider.buildTookInMillis(), - ShardSearchFailure.EMPTY_ARRAY, clusters); + return SearchResponse.empty(searchTimeProvider::buildTookInMillis, clusters); } int totalShards = 0; int skippedShards = 0; diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 48ae3f1249522..519f2c88e0e58 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -48,9 +48,13 @@ import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.profile.ProfileShardResult; +import org.elasticsearch.search.profile.SearchProfileShardResults; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterAware; @@ -253,30 +257,66 @@ static void ccsRemoteReduce(SearchRequest searchRequest, OriginalIndices localIn SearchTimeProvider timeProvider, Function reduceContext, RemoteClusterService remoteClusterService, ThreadPool threadPool, ActionListener listener, BiConsumer> localSearchConsumer) { - SearchResponseMerger searchResponseMerger = createSearchResponseMerger(searchRequest.source(), timeProvider, reduceContext); - AtomicInteger skippedClusters = new AtomicInteger(0); - final AtomicReference exceptions = new AtomicReference<>(); - int totalClusters = remoteIndices.size() + (localIndices == null ? 0 : 1); - final CountDown countDown = new CountDown(totalClusters); - for (Map.Entry entry : remoteIndices.entrySet()) { + + if (localIndices == null && remoteIndices.size() == 1) { + //if we are searching against a single remote cluster, we simply forward the original search request to such cluster + //and we directly perform final reduction in the remote cluster + Map.Entry entry = remoteIndices.entrySet().iterator().next(); String clusterAlias = entry.getKey(); boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias); OriginalIndices indices = entry.getValue(); SearchRequest ccsSearchRequest = SearchRequest.withLocalReduction(searchRequest, indices.indices(), - clusterAlias, timeProvider.getAbsoluteStartMillis()); - ActionListener ccsListener = createCCSListener(clusterAlias, skipUnavailable, countDown, - skippedClusters, exceptions, searchResponseMerger, totalClusters, listener); + clusterAlias, timeProvider.getAbsoluteStartMillis(), true); Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias); - remoteClusterClient.search(ccsSearchRequest, ccsListener); - } - if (localIndices != null) { - ActionListener ccsListener = createCCSListener(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, - false, countDown, skippedClusters, exceptions, searchResponseMerger, totalClusters, listener); - //here we provide the empty string a cluster alias, which means no prefix in index name, - //but the coord node will perform non final reduce as it's not null. - SearchRequest ccsLocalSearchRequest = SearchRequest.withLocalReduction(searchRequest, localIndices.indices(), - RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, timeProvider.getAbsoluteStartMillis()); - localSearchConsumer.accept(ccsLocalSearchRequest, ccsListener); + remoteClusterClient.search(ccsSearchRequest, new ActionListener() { + @Override + public void onResponse(SearchResponse searchResponse) { + Map profileResults = searchResponse.getProfileResults(); + SearchProfileShardResults profile = profileResults == null || profileResults.isEmpty() + ? null : new SearchProfileShardResults(profileResults); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchResponse.getHits(), + (InternalAggregations) searchResponse.getAggregations(), searchResponse.getSuggest(), profile, + searchResponse.isTimedOut(), searchResponse.isTerminatedEarly(), searchResponse.getNumReducePhases()); + listener.onResponse(new SearchResponse(internalSearchResponse, searchResponse.getScrollId(), + searchResponse.getTotalShards(), searchResponse.getSuccessfulShards(), searchResponse.getSkippedShards(), + timeProvider.buildTookInMillis(), searchResponse.getShardFailures(), new SearchResponse.Clusters(1, 1, 0))); + } + + @Override + public void onFailure(Exception e) { + if (skipUnavailable) { + listener.onResponse(SearchResponse.empty(timeProvider::buildTookInMillis, new SearchResponse.Clusters(1, 0, 1))); + } else { + listener.onFailure(wrapRemoteClusterFailure(clusterAlias, e)); + } + } + }); + } else { + SearchResponseMerger searchResponseMerger = createSearchResponseMerger(searchRequest.source(), timeProvider, reduceContext); + AtomicInteger skippedClusters = new AtomicInteger(0); + final AtomicReference exceptions = new AtomicReference<>(); + int totalClusters = remoteIndices.size() + (localIndices == null ? 0 : 1); + final CountDown countDown = new CountDown(totalClusters); + for (Map.Entry entry : remoteIndices.entrySet()) { + String clusterAlias = entry.getKey(); + boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias); + OriginalIndices indices = entry.getValue(); + SearchRequest ccsSearchRequest = SearchRequest.withLocalReduction(searchRequest, indices.indices(), + clusterAlias, timeProvider.getAbsoluteStartMillis(), false); + ActionListener ccsListener = createCCSListener(clusterAlias, skipUnavailable, countDown, + skippedClusters, exceptions, searchResponseMerger, totalClusters, listener); + Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias); + remoteClusterClient.search(ccsSearchRequest, ccsListener); + } + if (localIndices != null) { + ActionListener ccsListener = createCCSListener(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + false, countDown, skippedClusters, exceptions, searchResponseMerger, totalClusters, listener); + //here we provide the empty string a cluster alias, which means no prefix in index name, + //but the coord node will perform non final reduce as it's not null. + SearchRequest ccsLocalSearchRequest = SearchRequest.withLocalReduction(searchRequest, localIndices.indices(), + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, timeProvider.getAbsoluteStartMillis(), false); + localSearchConsumer.accept(ccsLocalSearchRequest, ccsListener); + } } } @@ -297,9 +337,6 @@ static SearchResponseMerger createSearchResponseMerger(SearchSourceBuilder sourc //here we modify the original source so we can re-use it by setting it to each outgoing search request source.from(0); source.size(from + size); - //TODO when searching only against a remote cluster, we could ask directly for the final number of results and let - //the remote cluster do a final reduction, yet that is not possible as we are providing a localClusterAlias which - //will automatically make the reduction non final } return new SearchResponseMerger(from, size, trackTotalHitsUpTo, timeProvider, reduceContextFunction); } @@ -604,7 +641,7 @@ public final void onFailure(Exception e) { } else { Exception exception = e; if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias) == false) { - exception = new RemoteTransportException("error while communicating with remote cluster [" + clusterAlias + "]", e); + exception = wrapRemoteClusterFailure(clusterAlias, e); } if (exceptions.compareAndSet(null, exception) == false) { exceptions.accumulateAndGet(exception, (previous, current) -> { @@ -636,4 +673,8 @@ private void maybeFinish() { abstract FinalResponse createFinalResponse(); } + + private static RemoteTransportException wrapRemoteClusterFailure(String clusterAlias, Exception e) { + return new RemoteTransportException("error while communicating with remote cluster [" + clusterAlias + "]", e); + } } diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index f5c99fc513759..7085e5ba5868c 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -641,23 +641,23 @@ public SearchSourceBuilder collapse(CollapseBuilder collapse) { * Add an aggregation to perform as part of the search. */ public SearchSourceBuilder aggregation(AggregationBuilder aggregation) { - if (aggregations == null) { + if (aggregations == null) { aggregations = AggregatorFactories.builder(); - } + } aggregations.addAggregator(aggregation); - return this; + return this; } /** * Add an aggregation to perform as part of the search. */ public SearchSourceBuilder aggregation(PipelineAggregationBuilder aggregation) { - if (aggregations == null) { + if (aggregations == null) { aggregations = AggregatorFactories.builder(); - } - aggregations.addPipelineAggregator(aggregation); - return this; } + aggregations.addPipelineAggregator(aggregation); + return this; + } /** * Gets the bytes representing the aggregation builders for this request. diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index e9cde3f7aadea..9107b75db1798 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -30,6 +30,7 @@ import org.apache.lucene.search.grouping.CollapseTopFieldDocs; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.text.Text; @@ -313,9 +314,14 @@ private static AtomicArray generateFetchResults(int nShards, return fetchResults; } + private static SearchRequest randomSearchRequest() { + return randomBoolean() ? new SearchRequest() : SearchRequest.withLocalReduction(new SearchRequest(), + Strings.EMPTY_ARRAY, "remote", 0, randomBoolean()); + } + public void testConsumer() { int bufferSize = randomIntBetween(2, 3); - SearchRequest request = randomBoolean() ? new SearchRequest() : new SearchRequest("remote"); + SearchRequest request = randomSearchRequest(); request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo"))); request.setBatchedReduceSize(bufferSize); InitialSearchPhase.ArraySearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults(request, 3); @@ -377,7 +383,7 @@ public void testConsumerConcurrently() throws InterruptedException { int expectedNumResults = randomIntBetween(1, 100); int bufferSize = randomIntBetween(2, 200); - SearchRequest request = randomBoolean() ? new SearchRequest() : new SearchRequest("remote"); + SearchRequest request = randomSearchRequest(); request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo"))); request.setBatchedReduceSize(bufferSize); InitialSearchPhase.ArraySearchPhaseResults consumer = @@ -424,7 +430,7 @@ public void testConsumerConcurrently() throws InterruptedException { public void testConsumerOnlyAggs() { int expectedNumResults = randomIntBetween(1, 100); int bufferSize = randomIntBetween(2, 200); - SearchRequest request = randomBoolean() ? new SearchRequest() : new SearchRequest("remote"); + SearchRequest request = randomSearchRequest(); request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo")).size(0)); request.setBatchedReduceSize(bufferSize); InitialSearchPhase.ArraySearchPhaseResults consumer = @@ -460,7 +466,7 @@ public void testConsumerOnlyAggs() { public void testConsumerOnlyHits() { int expectedNumResults = randomIntBetween(1, 100); int bufferSize = randomIntBetween(2, 200); - SearchRequest request = randomBoolean() ? new SearchRequest() : new SearchRequest("remote"); + SearchRequest request = randomSearchRequest(); if (randomBoolean()) { request.source(new SearchSourceBuilder().size(randomIntBetween(1, 10))); } @@ -493,8 +499,7 @@ public void testConsumerOnlyHits() { private void assertFinalReduction(SearchRequest searchRequest) { assertThat(reductions.size(), greaterThanOrEqualTo(1)); - //the last reduction step was the final one only if no cluster alias was provided with the search request - assertEquals(searchRequest.getLocalClusterAlias() == null, reductions.get(reductions.size() - 1)); + assertEquals(searchRequest.isFinalReduce(), reductions.get(reductions.size() - 1)); } public void testNewSearchPhaseResults() { @@ -568,7 +573,7 @@ public void testReduceTopNWithFromOffset() { public void testConsumerSortByField() { int expectedNumResults = randomIntBetween(1, 100); int bufferSize = randomIntBetween(2, 200); - SearchRequest request = randomBoolean() ? new SearchRequest() : new SearchRequest("remote"); + SearchRequest request = randomSearchRequest(); int size = randomIntBetween(1, 10); request.setBatchedReduceSize(bufferSize); InitialSearchPhase.ArraySearchPhaseResults consumer = @@ -604,7 +609,7 @@ public void testConsumerSortByField() { public void testConsumerFieldCollapsing() { int expectedNumResults = randomIntBetween(30, 100); int bufferSize = randomIntBetween(2, 200); - SearchRequest request = randomBoolean() ? new SearchRequest() : new SearchRequest("remote"); + SearchRequest request = randomSearchRequest(); int size = randomIntBetween(5, 10); request.setBatchedReduceSize(bufferSize); InitialSearchPhase.ArraySearchPhaseResults consumer = diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 1d2d59c60e2ae..c139b75f45c42 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -54,17 +54,20 @@ protected SearchRequest createSearchRequest() throws IOException { } //clusterAlias and absoluteStartMillis do not have public getters/setters hence we randomize them only in this test specifically. return SearchRequest.withLocalReduction(request, request.indices(), - randomAlphaOfLengthBetween(5, 10), randomNonNegativeLong()); + randomAlphaOfLengthBetween(5, 10), randomNonNegativeLong(), randomBoolean()); } public void testWithLocalReduction() { - expectThrows(NullPointerException.class, () -> SearchRequest.withLocalReduction(null, Strings.EMPTY_ARRAY, "", 0)); + expectThrows(NullPointerException.class, () -> SearchRequest.withLocalReduction(null, Strings.EMPTY_ARRAY, "", 0, randomBoolean())); SearchRequest request = new SearchRequest(); - expectThrows(NullPointerException.class, () -> SearchRequest.withLocalReduction(request, null, "", 0)); - expectThrows(NullPointerException.class, () -> SearchRequest.withLocalReduction(request, new String[]{null}, "", 0)); - expectThrows(NullPointerException.class, () -> SearchRequest.withLocalReduction(request, Strings.EMPTY_ARRAY, null, 0)); - expectThrows(IllegalArgumentException.class, () -> SearchRequest.withLocalReduction(request, Strings.EMPTY_ARRAY, "", -1)); - SearchRequest searchRequest = SearchRequest.withLocalReduction(request, Strings.EMPTY_ARRAY, "", 0); + expectThrows(NullPointerException.class, () -> SearchRequest.withLocalReduction(request, null, "", 0, randomBoolean())); + expectThrows(NullPointerException.class, () -> SearchRequest.withLocalReduction(request, + new String[]{null}, "", 0, randomBoolean())); + expectThrows(NullPointerException.class, () -> SearchRequest.withLocalReduction(request, + Strings.EMPTY_ARRAY, null, 0, randomBoolean())); + expectThrows(IllegalArgumentException.class, () -> SearchRequest.withLocalReduction(request, + Strings.EMPTY_ARRAY, "", -1, randomBoolean())); + SearchRequest searchRequest = SearchRequest.withLocalReduction(request, Strings.EMPTY_ARRAY, "", 0, randomBoolean()); assertNull(searchRequest.validate()); } @@ -92,6 +95,12 @@ public void testRandomVersionSerialization() throws IOException { assertEquals(searchRequest.getLocalClusterAlias(), deserializedRequest.getLocalClusterAlias()); assertEquals(searchRequest.getOrCreateAbsoluteStartMillis(), deserializedRequest.getOrCreateAbsoluteStartMillis()); } + //TODO move to the 6_7_0 branch once backported to 6.x + if (version.before(Version.V_7_0_0)) { + assertTrue(deserializedRequest.isFinalReduce()); + } else { + assertEquals(searchRequest.isFinalReduce(), deserializedRequest.isFinalReduce()); + } } public void testReadFromPre6_7_0() throws IOException { @@ -103,6 +112,7 @@ public void testReadFromPre6_7_0() throws IOException { assertNull(searchRequest.getLocalClusterAlias()); assertAbsoluteStartMillisIsCurrentTime(searchRequest); assertTrue(searchRequest.isCcsMinimizeRoundtrips()); + assertTrue(searchRequest.isFinalReduce()); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java index 8fd75c5fd673d..ed14d11946f75 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java @@ -27,13 +27,17 @@ import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.bucket.terms.LongTerms; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESSingleNodeTestCase; public class TransportSearchActionSingleNodeTests extends ESSingleNodeTestCase { public void testLocalClusterAlias() { - long nowInMillis = System.currentTimeMillis(); + long nowInMillis = randomLongBetween(0, Long.MAX_VALUE); IndexRequest indexRequest = new IndexRequest("test"); indexRequest.id("1"); indexRequest.source("field", "value"); @@ -42,7 +46,8 @@ public void testLocalClusterAlias() { assertEquals(RestStatus.CREATED, indexResponse.status()); { - SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), Strings.EMPTY_ARRAY, "local", nowInMillis); + SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), Strings.EMPTY_ARRAY, + "local", nowInMillis, randomBoolean()); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(1, searchResponse.getHits().getTotalHits().value); SearchHit[] hits = searchResponse.getHits().getHits(); @@ -53,7 +58,8 @@ public void testLocalClusterAlias() { assertEquals("1", hit.getId()); } { - SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), Strings.EMPTY_ARRAY, "", nowInMillis); + SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), Strings.EMPTY_ARRAY, + "", nowInMillis, randomBoolean()); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(1, searchResponse.getHits().getTotalHits().value); SearchHit[] hits = searchResponse.getHits().getHits(); @@ -94,19 +100,22 @@ public void testAbsoluteStartMillis() { assertEquals(0, searchResponse.getTotalShards()); } { - SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), Strings.EMPTY_ARRAY, "", 0); + SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), + Strings.EMPTY_ARRAY, "", 0, randomBoolean()); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(2, searchResponse.getHits().getTotalHits().value); } { - SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), Strings.EMPTY_ARRAY, "", 0); + SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), + Strings.EMPTY_ARRAY, "", 0, randomBoolean()); searchRequest.indices(""); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(1, searchResponse.getHits().getTotalHits().value); assertEquals("test-1970.01.01", searchResponse.getHits().getHits()[0].getIndex()); } { - SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), Strings.EMPTY_ARRAY, "", 0); + SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), + Strings.EMPTY_ARRAY, "", 0, randomBoolean()); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); RangeQueryBuilder rangeQuery = new RangeQueryBuilder("date"); rangeQuery.gte("1970-01-01"); @@ -118,4 +127,50 @@ public void testAbsoluteStartMillis() { assertEquals("test-1970.01.01", searchResponse.getHits().getHits()[0].getIndex()); } } + + public void testFinalReduce() { + long nowInMillis = randomLongBetween(0, Long.MAX_VALUE); + { + IndexRequest indexRequest = new IndexRequest("test"); + indexRequest.id("1"); + indexRequest.source("price", 10); + IndexResponse indexResponse = client().index(indexRequest).actionGet(); + assertEquals(RestStatus.CREATED, indexResponse.status()); + } + { + IndexRequest indexRequest = new IndexRequest("test"); + indexRequest.id("2"); + indexRequest.source("price", 100); + IndexResponse indexResponse = client().index(indexRequest).actionGet(); + assertEquals(RestStatus.CREATED, indexResponse.status()); + } + client().admin().indices().prepareRefresh("test").get(); + + SearchRequest originalRequest = new SearchRequest(); + SearchSourceBuilder source = new SearchSourceBuilder(); + source.size(0); + originalRequest.source(source); + TermsAggregationBuilder terms = new TermsAggregationBuilder("terms", ValueType.NUMERIC); + terms.field("price"); + terms.size(1); + source.aggregation(terms); + + { + SearchRequest searchRequest = randomBoolean() ? originalRequest : SearchRequest.withLocalReduction(originalRequest, + Strings.EMPTY_ARRAY, "remote", nowInMillis, true); + SearchResponse searchResponse = client().search(searchRequest).actionGet(); + assertEquals(2, searchResponse.getHits().getTotalHits().value); + Aggregations aggregations = searchResponse.getAggregations(); + LongTerms longTerms = aggregations.get("terms"); + assertEquals(1, longTerms.getBuckets().size()); + } + { + SearchRequest searchRequest = SearchRequest.withLocalReduction(originalRequest, + Strings.EMPTY_ARRAY, "remote", nowInMillis, false); + SearchResponse searchResponse = client().search(searchRequest).actionGet(); + assertEquals(2, searchResponse.getHits().getTotalHits().value); + Aggregations aggregations = searchResponse.getAggregations(); + LongTerms longTerms = aggregations.get("terms"); + } + } } diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index 8a5859e200eac..9a9524d0ff57e 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -402,7 +402,7 @@ public void testCCSRemoteReduceMergeFails() throws Exception { } public void testCCSRemoteReduce() throws Exception { - int numClusters = randomIntBetween(2, 10); + int numClusters = randomIntBetween(1, 10); DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; Map remoteIndicesByCluster = new HashMap<>(); Settings.Builder builder = Settings.builder(); @@ -440,7 +440,7 @@ public void testCCSRemoteReduce() throws Exception { assertEquals(0, searchResponse.getClusters().getSkipped()); assertEquals(totalClusters, searchResponse.getClusters().getTotal()); assertEquals(totalClusters, searchResponse.getClusters().getSuccessful()); - assertEquals(totalClusters + 1, searchResponse.getNumReducePhases()); + assertEquals(totalClusters == 1 ? 1 : totalClusters + 1, searchResponse.getNumReducePhases()); } { SearchRequest searchRequest = new SearchRequest(); @@ -510,7 +510,6 @@ public void onNodeDisconnected(DiscoveryNode node) { awaitLatch(latch, 5, TimeUnit.SECONDS); assertNotNull(failure.get()); assertThat(failure.get(), instanceOf(RemoteTransportException.class)); - RemoteTransportException remoteTransportException = (RemoteTransportException) failure.get(); assertThat(failure.get().getMessage(), containsString("error while communicating with remote cluster [")); assertThat(failure.get().getCause(), instanceOf(NodeDisconnectedException.class)); } @@ -583,7 +582,7 @@ public void onNodeDisconnected(DiscoveryNode node) { assertEquals(0, searchResponse.getClusters().getSkipped()); assertEquals(totalClusters, searchResponse.getClusters().getTotal()); assertEquals(totalClusters, searchResponse.getClusters().getSuccessful()); - assertEquals(totalClusters + 1, searchResponse.getNumReducePhases()); + assertEquals(totalClusters == 1 ? 1 : totalClusters + 1, searchResponse.getNumReducePhases()); } assertEquals(0, service.getConnectionManager().size()); } finally { From ce469cfda57dcdfac8e21a62d1057df7b360534d Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Fri, 1 Feb 2019 12:51:38 +0100 Subject: [PATCH 22/54] Fix testCorruptedIndex (#38161) Folks at the Lucene project do not seem to be interested in classifying corruptions and distinguishing them from file-system exceptions (see https://issues.apache.org/jira/browse/LUCENE-8525), so we'll just cop out as well. Closes #34322 --- .../elasticsearch/index/translog/TruncateTranslogAction.java | 2 ++ .../index/shard/RemoveCorruptedShardDataCommandTests.java | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java index 133055f29178b..e99128fd3e0a2 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java @@ -71,6 +71,8 @@ public Tuple getCleanStatus commits = DirectoryReader.listCommits(indexDirectory); } catch (IndexNotFoundException infe) { throw new ElasticsearchException("unable to find a valid shard at [" + indexPath + "]", infe); + } catch (IOException e) { + throw new ElasticsearchException("unable to list commits at [" + indexPath + "]", e); } // Retrieve the generation and UUID from the existing data diff --git a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java index 02e26604dcd25..1c3c3b28773cf 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java @@ -56,6 +56,7 @@ import java.util.regex.Pattern; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -174,7 +175,7 @@ public void testCorruptedIndex() throws Exception { fail(); } catch (ElasticsearchException e) { if (corruptSegments) { - assertThat(e.getMessage(), is("Index is unrecoverable")); + assertThat(e.getMessage(), either(is("Index is unrecoverable")).or(startsWith("unable to list commits"))); } else { assertThat(e.getMessage(), containsString("aborted by user")); } From 029e4b6278c91670a509bd9ce35f0081e51513cd Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 1 Feb 2019 12:58:46 +0100 Subject: [PATCH 23/54] Clear send behavior rule in CloseWhileRelocatingShardsIT (#38159) The current CloseWhileRelocatingShardsIT test adds some "send behavior" rule to a target node's mocked transport service in order to detect when shard relocating are started. These rules are never cleared and prevent the test to complete normally after the rebalance is re-enabled again. This commit changes the test so that rules are cleared and most verifications are done before the rebalance is reenabled again. Closes #38090 --- .../state/CloseWhileRelocatingShardsIT.java | 43 ++++++++++--------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java b/server/src/test/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java index 555bf24335413..99c50a839abc6 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java @@ -119,6 +119,8 @@ public void testCloseWhileRelocatingShards() throws Exception { final String targetNode = internalCluster().startDataOnlyNode(); ensureClusterSizeConsistency(); // wait for the master to finish processing join. + final MockTransportService targetTransportService = + (MockTransportService) internalCluster().getInstance(TransportService.class, targetNode); final Set acknowledgedCloses = ConcurrentCollections.newConcurrentSet(); try { @@ -146,8 +148,7 @@ public void testCloseWhileRelocatingShards() throws Exception { } final DiscoveryNode sourceNode = clusterService.state().nodes().resolveNode(primary.currentNodeId()); - ((MockTransportService) internalCluster().getInstance(TransportService.class, targetNode)) - .addSendBehavior(internalCluster().getInstance(TransportService.class, sourceNode.getName()), + targetTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, sourceNode.getName()), (connection, requestId, action, request, options) -> { if (PeerRecoverySourceService.Actions.START_RECOVERY.equals(action)) { logger.debug("blocking recovery of shard {}", ((StartRecoveryRequest) request).shardId()); @@ -210,28 +211,30 @@ public void testCloseWhileRelocatingShards() throws Exception { } } } - } finally { - assertAcked(client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder() - .putNull(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey()))); - } - for (String index : indices) { - if (acknowledgedCloses.contains(index)) { - assertIndexIsClosed(index); - } else { - assertIndexIsOpened(index); + for (String index : indices) { + if (acknowledgedCloses.contains(index)) { + assertIndexIsClosed(index); + } else { + assertIndexIsOpened(index); + } } - } - assertThat("Consider that the test failed if no indices were successfully closed", acknowledgedCloses.size(), greaterThan(0)); - assertAcked(client().admin().indices().prepareOpen("index-*")); - ensureGreen(indices); + targetTransportService.clearAllRules(); + + assertThat("Consider that the test failed if no indices were successfully closed", acknowledgedCloses.size(), greaterThan(0)); + assertAcked(client().admin().indices().prepareOpen("index-*")); + ensureGreen(indices); - for (String index : acknowledgedCloses) { - long docsCount = client().prepareSearch(index).setSize(0).get().getHits().getTotalHits().value; - assertEquals("Expected " + docsPerIndex.get(index) + " docs in index " + index + " but got " + docsCount - + " (close acknowledged=" + acknowledgedCloses.contains(index) + ")", (long) docsPerIndex.get(index), docsCount); + for (String index : acknowledgedCloses) { + long docsCount = client().prepareSearch(index).setSize(0).get().getHits().getTotalHits().value; + assertEquals("Expected " + docsPerIndex.get(index) + " docs in index " + index + " but got " + docsCount + + " (close acknowledged=" + acknowledgedCloses.contains(index) + ")", (long) docsPerIndex.get(index), docsCount); + } + } finally { + assertAcked(client().admin().cluster().prepareUpdateSettings() + .setTransientSettings(Settings.builder() + .putNull(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey()))); } } } From 2229e7231e1ddb139b52cc9d3cd770aeebe64474 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 1 Feb 2019 13:55:51 +0100 Subject: [PATCH 24/54] Enable bw tests for #37871 and #38032. (#38167) Mixed-version clusters tests had been disabled initially since they wouldn't work until the functionality would be backported. --- .../test/indices.create/20_mix_typeless_typeful.yml | 10 +++++----- .../indices.put_mapping/20_mix_typeless_typeful.yml | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml index bb9157fe684f8..50a5239e70675 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml @@ -2,8 +2,8 @@ "Create a typeless index while there is a typed template": - skip: - version: " - 6.99.99" - reason: needs change to be backported to 6.7 + version: " - 6.6.99" + reason: Merging typeless/typed mappings/templates was added in 6.7 - do: indices.put_template: @@ -41,8 +41,8 @@ "Create a typed index while there is a typeless template": - skip: - version: " - 6.99.99" - reason: needs change to be backported to 6.7 + version: " - 6.6.99" + reason: Merging typeless/typed mappings/templates was added in 6.7 - do: indices.put_template: @@ -81,7 +81,7 @@ - skip: version: " - 6.99.99" - reason: needs change to be backported to 6.7 + reason: include_type_name only supported as of 6.7 - do: indices.put_template: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml index d964a382137f8..13cb3321841cf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml @@ -55,8 +55,8 @@ "PUT mapping with _doc on an index that has types": - skip: - version: " - 6.99.99" - reason: Backport first + version: " - 6.6.99" + reason: include_type_name is only supported as of 6.7 - do: From 979e5576e5fcabec7cde63cca845dcd43b06584d Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Fri, 1 Feb 2019 14:48:37 +0100 Subject: [PATCH 25/54] Add tests for fractional epoch parsing (#38162) Fractional epoch parsing is supported, the tests we used were edge cases that did not make sense. This adds tests to properly check for this. --- .../common/time/DateFormattersTests.java | 31 ++++++++++++++----- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java index 7b535f9d4c9d6..e573a2ede6bdb 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java @@ -54,6 +54,11 @@ public void testEpochMillisParser() { assertThat(instant.getEpochSecond(), is(0L)); assertThat(instant.getNano(), is(0)); } + { + Instant instant = Instant.from(formatter.parse("123.123456")); + assertThat(instant.getEpochSecond(), is(0L)); + assertThat(instant.getNano(), is(123123456)); + } } public void testInvalidEpochMilliParser() { @@ -68,17 +73,27 @@ public void testInvalidEpochMilliParser() { // this is not in the duelling tests, because the epoch second parser in joda time drops the milliseconds after the comma // but is able to parse the rest // as this feature is supported it also makes sense to make it exact - public void testEpochSecondParser() { + public void testEpochSecondParserWithFraction() { DateFormatter formatter = DateFormatters.forPattern("epoch_second"); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("1234.1234567890")); + TemporalAccessor accessor = formatter.parse("1234.1"); + Instant instant = DateFormatters.from(accessor).toInstant(); + assertThat(instant.getEpochSecond(), is(1234L)); + assertThat(DateFormatters.from(accessor).toInstant().getNano(), is(100_000_000)); + + accessor = formatter.parse("1234"); + instant = DateFormatters.from(accessor).toInstant(); + assertThat(instant.getEpochSecond(), is(1234L)); + assertThat(instant.getNano(), is(0)); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("abc")); + assertThat(e.getMessage(), is("failed to parse date field [abc] with format [epoch_second]")); + + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("1234.abc")); + assertThat(e.getMessage(), is("failed to parse date field [1234.abc] with format [epoch_second]")); + + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("1234.1234567890")); assertThat(e.getMessage(), is("failed to parse date field [1234.1234567890] with format [epoch_second]")); - e = expectThrows(IllegalArgumentException .class, () -> formatter.parse("1234.123456789013221")); - assertThat(e.getMessage(), containsString("[1234.123456789013221]")); - e = expectThrows(IllegalArgumentException .class, () -> formatter.parse("abc")); - assertThat(e.getMessage(), containsString("[abc]")); - e = expectThrows(IllegalArgumentException .class, () -> formatter.parse("1234.abc")); - assertThat(e.getMessage(), containsString("[1234.abc]")); } public void testEpochMilliParsersWithDifferentFormatters() { From bda591453cc4cfc076eb2bccc4b96877f27f393c Mon Sep 17 00:00:00 2001 From: Andrey Ershov Date: Fri, 1 Feb 2019 14:53:55 +0100 Subject: [PATCH 26/54] Add elasticsearch-node detach-cluster command (#37979) This commit adds the second part of `elasticsearch-node` tool - `detach-cluster` command in addition to `unsafe-bootstrap` command. Also, this commit changes the semantics of `unsafe-bootstrap`, now `unsafe-bootstrap` changes clusterUUID. So the algorithm of running `elasticsearch-node` tool is the following: 1) Stop all nodes in the cluster. 2) Pick master-eligible node with the highest (term, version) pair and run the `unsafe-bootstrap` command on it. If there are no survived master-eligible nodes - skip this step. 3) Run `detach-cluster` command on the remaining survived nodes. Detach cluster makes the following changes to the node metadata: 1) Sets clusterUUID committed to false. 2) Sets currentTerm and term to 0. 3) Removes voting tombstones and sets voting configurations to special constant MUST_JOIN_ELECTED_MASTER, that prevents initial cluster bootstrap. `ElasticsearchNodeCommand` base abstract class is introduced, because `UnsafeBootstrapMasterCommand` and `DetachClusterCommand` have a lot in common. Also, this commit adds "ordinal" parameter to both commands, because it's impossible to write IT otherwise. For MUST_JOIN_ELECTED_MASTER case special handling is introduced in `ClusterFormationFailureHelper`. Tests for both commands reside in `ElasticsearchNodeCommandIT` (renamed from `UnsafeBootstrapMasterIT`). --- .../ClusterFormationFailureHelper.java | 6 + .../coordination/CoordinationMetaData.java | 2 + .../coordination/DetachClusterCommand.java | 85 ++++ .../ElasticsearchNodeCommand.java | 151 +++++++ .../cluster/coordination/NodeToolCli.java | 1 + .../UnsafeBootstrapMasterCommand.java | 101 +---- .../ClusterFormationFailureHelperTests.java | 36 ++ .../coordination/CoordinatorTests.java | 10 +- .../ElasticsearchNodeCommandIT.java | 418 ++++++++++++++++++ .../coordination/UnsafeBootstrapMasterIT.java | 209 --------- .../discovery/ClusterDisruptionIT.java | 23 - .../gateway/GatewayIndexStateIT.java | 53 --- 12 files changed, 720 insertions(+), 375 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/cluster/coordination/DetachClusterCommand.java create mode 100644 server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java create mode 100644 server/src/test/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommandIT.java delete mode 100644 server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterIT.java diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java index cc58628b53893..67d2103ce672d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java @@ -167,6 +167,12 @@ String getDescription() { assert clusterState.getLastCommittedConfiguration().isEmpty() == false; + if (clusterState.getLastCommittedConfiguration().equals(VotingConfiguration.MUST_JOIN_ELECTED_MASTER)) { + return String.format(Locale.ROOT, + "master not discovered yet and this node was detached from its previous cluster, have discovered %s; %s", + foundPeers, discoveryWillContinueDescription); + } + final String quorumDescription; if (clusterState.getLastAcceptedConfiguration().equals(clusterState.getLastCommittedConfiguration())) { quorumDescription = describeQuorum(clusterState.getLastAcceptedConfiguration()); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationMetaData.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationMetaData.java index 01ef85b656d1e..b63cb07feff99 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationMetaData.java @@ -325,6 +325,8 @@ public String toString() { public static class VotingConfiguration implements Writeable, ToXContentFragment { public static final VotingConfiguration EMPTY_CONFIG = new VotingConfiguration(Collections.emptySet()); + public static final VotingConfiguration MUST_JOIN_ELECTED_MASTER = new VotingConfiguration(Collections.singleton( + "_must_join_elected_master_")); private final Set nodeIds; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/DetachClusterCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/DetachClusterCommand.java new file mode 100644 index 0000000000000..6bd41ccf37f0c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/DetachClusterCommand.java @@ -0,0 +1,85 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.coordination; + +import joptsimple.OptionSet; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cluster.metadata.Manifest; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.env.Environment; + +import java.io.IOException; +import java.nio.file.Path; + +public class DetachClusterCommand extends ElasticsearchNodeCommand { + + static final String NODE_DETACHED_MSG = "Node was successfully detached from the cluster"; + static final String CONFIRMATION_MSG = + "-------------------------------------------------------------------------------\n" + + "\n" + + "You should run this tool only if you have permanently lost all\n" + + "your master-eligible nodes, and you cannot restore the cluster\n" + + "from a snapshot, or you have already run `elasticsearch-node unsafe-bootstrap`\n" + + "on a master-eligible node that formed a cluster with this node.\n" + + "This tool can cause arbitrary data loss and its use should be your last resort.\n" + + "Do you want to proceed?\n"; + + public DetachClusterCommand() { + super("Detaches this node from its cluster, allowing it to unsafely join a new cluster"); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + super.execute(terminal, options, env); + + processNodePathsWithLock(terminal, options, env); + + terminal.println(NODE_DETACHED_MSG); + } + + @Override + protected void processNodePaths(Terminal terminal, Path[] dataPaths) throws IOException { + final Tuple manifestMetaDataTuple = loadMetaData(terminal, dataPaths); + final Manifest manifest = manifestMetaDataTuple.v1(); + final MetaData metaData = manifestMetaDataTuple.v2(); + + confirm(terminal, CONFIRMATION_MSG); + + writeNewMetaData(terminal, manifest, updateCurrentTerm(), metaData, updateMetaData(metaData), dataPaths); + } + + // package-private for tests + static MetaData updateMetaData(MetaData oldMetaData) { + final CoordinationMetaData coordinationMetaData = CoordinationMetaData.builder() + .lastAcceptedConfiguration(CoordinationMetaData.VotingConfiguration.MUST_JOIN_ELECTED_MASTER) + .lastCommittedConfiguration(CoordinationMetaData.VotingConfiguration.MUST_JOIN_ELECTED_MASTER) + .term(0) + .build(); + return MetaData.builder(oldMetaData) + .coordinationMetaData(coordinationMetaData) + .clusterUUIDCommitted(false) + .build(); + } + + //package-private for tests + static long updateCurrentTerm() { + return 0; + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java new file mode 100644 index 0000000000000..9ef75879e9275 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java @@ -0,0 +1,151 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.coordination; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.store.LockObtainFailedException; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.metadata.Manifest; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Objects; + +public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand { + private static final Logger logger = LogManager.getLogger(ElasticsearchNodeCommand.class); + protected final NamedXContentRegistry namedXContentRegistry; + static final String STOP_WARNING_MSG = + "--------------------------------------------------------------------------\n" + + "\n" + + " WARNING: Elasticsearch MUST be stopped before running this tool." + + "\n"; + static final String FAILED_TO_OBTAIN_NODE_LOCK_MSG = "failed to lock node's directory, is Elasticsearch still running?"; + static final String NO_NODE_FOLDER_FOUND_MSG = "no node folder is found in data folder(s), node has not been started yet?"; + static final String NO_MANIFEST_FILE_FOUND_MSG = "no manifest file is found, do you run pre 7.0 Elasticsearch?"; + static final String GLOBAL_GENERATION_MISSING_MSG = "no metadata is referenced from the manifest file, cluster has never been " + + "bootstrapped?"; + static final String NO_GLOBAL_METADATA_MSG = "failed to find global metadata, metadata corrupted?"; + static final String WRITE_METADATA_EXCEPTION_MSG = "exception occurred when writing new metadata to disk"; + static final String ABORTED_BY_USER_MSG = "aborted by user"; + final OptionSpec nodeOrdinalOption; + + public ElasticsearchNodeCommand(String description) { + super(description); + nodeOrdinalOption = parser.accepts("ordinal", "Optional node ordinal, 0 if not specified") + .withRequiredArg().ofType(Integer.class); + namedXContentRegistry = new NamedXContentRegistry(ClusterModule.getNamedXWriteables()); + } + + protected void processNodePathsWithLock(Terminal terminal, OptionSet options, Environment env) throws IOException { + terminal.println(Terminal.Verbosity.VERBOSE, "Obtaining lock for node"); + Integer nodeOrdinal = nodeOrdinalOption.value(options); + if (nodeOrdinal == null) { + nodeOrdinal = 0; + } + try (NodeEnvironment.NodeLock lock = new NodeEnvironment.NodeLock(nodeOrdinal, logger, env, Files::exists)) { + final Path[] dataPaths = + Arrays.stream(lock.getNodePaths()).filter(Objects::nonNull).map(p -> p.path).toArray(Path[]::new); + if (dataPaths.length == 0) { + throw new ElasticsearchException(NO_NODE_FOLDER_FOUND_MSG); + } + processNodePaths(terminal, dataPaths); + } catch (LockObtainFailedException ex) { + throw new ElasticsearchException( + FAILED_TO_OBTAIN_NODE_LOCK_MSG + " [" + ex.getMessage() + "]"); + } + } + + protected Tuple loadMetaData(Terminal terminal, Path[] dataPaths) throws IOException { + terminal.println(Terminal.Verbosity.VERBOSE, "Loading manifest file"); + final Manifest manifest = Manifest.FORMAT.loadLatestState(logger, namedXContentRegistry, dataPaths); + + if (manifest == null) { + throw new ElasticsearchException(NO_MANIFEST_FILE_FOUND_MSG); + } + if (manifest.isGlobalGenerationMissing()) { + throw new ElasticsearchException(GLOBAL_GENERATION_MISSING_MSG); + } + terminal.println(Terminal.Verbosity.VERBOSE, "Loading global metadata file"); + final MetaData metaData = MetaData.FORMAT.loadGeneration(logger, namedXContentRegistry, manifest.getGlobalGeneration(), + dataPaths); + if (metaData == null) { + throw new ElasticsearchException(NO_GLOBAL_METADATA_MSG + " [generation = " + manifest.getGlobalGeneration() + "]"); + } + + return Tuple.tuple(manifest, metaData); + } + + protected void confirm(Terminal terminal, String msg) { + terminal.println(msg); + String text = terminal.readText("Confirm [y/N] "); + if (text.equalsIgnoreCase("y") == false) { + throw new ElasticsearchException(ABORTED_BY_USER_MSG); + } + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + terminal.println(STOP_WARNING_MSG); + } + + protected abstract void processNodePaths(Terminal terminal, Path[] dataPaths) throws IOException; + + + protected void writeNewMetaData(Terminal terminal, Manifest oldManifest, long newCurrentTerm, + MetaData oldMetaData, MetaData newMetaData, Path[] dataPaths) { + try { + terminal.println(Terminal.Verbosity.VERBOSE, + "[clusterUUID = " + oldMetaData.clusterUUID() + ", committed = " + oldMetaData.clusterUUIDCommitted() + "] => " + + "[clusterUUID = " + newMetaData.clusterUUID() + ", committed = " + newMetaData.clusterUUIDCommitted() + "]"); + terminal.println(Terminal.Verbosity.VERBOSE, "New coordination metadata is " + newMetaData.coordinationMetaData()); + terminal.println(Terminal.Verbosity.VERBOSE, "Writing new global metadata to disk"); + long newGeneration = MetaData.FORMAT.write(newMetaData, dataPaths); + Manifest newManifest = new Manifest(newCurrentTerm, oldManifest.getClusterStateVersion(), newGeneration, + oldManifest.getIndexGenerations()); + terminal.println(Terminal.Verbosity.VERBOSE, "New manifest is " + newManifest); + terminal.println(Terminal.Verbosity.VERBOSE, "Writing new manifest file to disk"); + Manifest.FORMAT.writeAndCleanup(newManifest, dataPaths); + terminal.println(Terminal.Verbosity.VERBOSE, "Cleaning up old metadata"); + MetaData.FORMAT.cleanupOldFiles(newGeneration, dataPaths); + } catch (Exception e) { + terminal.println(Terminal.Verbosity.VERBOSE, "Cleaning up new metadata"); + MetaData.FORMAT.cleanupOldFiles(oldManifest.getGlobalGeneration(), dataPaths); + throw new ElasticsearchException(WRITE_METADATA_EXCEPTION_MSG, e); + } + } + + //package-private for testing + OptionParser getParser() { + return parser; + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java index d8fb77433faef..e2a94f1140b92 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java @@ -35,6 +35,7 @@ public NodeToolCli() { super("A CLI tool to unsafely recover a cluster after the permanent loss of too many master-eligible nodes", ()->{}); CommandLoggingConfigurator.configureLoggingWithoutConfig(); subcommands.put("unsafe-bootstrap", new UnsafeBootstrapMasterCommand()); + subcommands.put("detach-cluster", new DetachClusterCommand()); } public static void main(String[] args) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java index 9db750c2a1f08..72afe8ec70428 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java @@ -21,40 +21,27 @@ import joptsimple.OptionSet; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.lucene.store.LockObtainFailedException; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.Terminal; -import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeMetaData; import org.elasticsearch.node.Node; import java.io.IOException; -import java.nio.file.Files; import java.nio.file.Path; -import java.util.Arrays; import java.util.Collections; import java.util.Locale; -import java.util.Objects; -public class UnsafeBootstrapMasterCommand extends EnvironmentAwareCommand { +public class UnsafeBootstrapMasterCommand extends ElasticsearchNodeCommand { private static final Logger logger = LogManager.getLogger(UnsafeBootstrapMasterCommand.class); - private final NamedXContentRegistry namedXContentRegistry; - static final String STOP_WARNING_MSG = - "--------------------------------------------------------------------------\n" + - "\n" + - " WARNING: Elasticsearch MUST be stopped before running this tool." + - "\n"; static final String CLUSTER_STATE_TERM_VERSION_MSG_FORMAT = "Current node cluster state (term, version) pair is (%s, %s)"; static final String CONFIRMATION_MSG = @@ -62,35 +49,29 @@ public class UnsafeBootstrapMasterCommand extends EnvironmentAwareCommand { "\n" + "You should run this tool only if you have permanently lost half\n" + "or more of the master-eligible nodes, and you cannot restore the cluster\n" + - "from a snapshot. This tool can result in arbitrary data loss and\n" + - "should be the last resort.\n" + + "from a snapshot. This tool can cause arbitrary data loss and its use " + + "should be your last resort.\n" + "If you have multiple survived master eligible nodes, consider running\n" + "this tool on the node with the highest cluster state (term, version) pair.\n" + "Do you want to proceed?\n"; - static final String ABORTED_BY_USER_MSG = "aborted by user"; static final String NOT_MASTER_NODE_MSG = "unsafe-bootstrap tool can only be run on master eligible node"; - static final String FAILED_TO_OBTAIN_NODE_LOCK_MSG = "failed to lock node's directory, is Elasticsearch still running?"; - static final String NO_NODE_FOLDER_FOUND_MSG = "no node folder is found in data folder(s), node has not been started yet?"; + static final String NO_NODE_METADATA_FOUND_MSG = "no node meta data is found, node has not been started yet?"; - static final String NO_MANIFEST_FILE_FOUND_MSG = "no manifest file is found, do you run pre 7.0 Elasticsearch?"; - static final String GLOBAL_GENERATION_MISSING_MSG = "no metadata is referenced from the manifest file, cluster has never been " + - "bootstrapped?"; - static final String NO_GLOBAL_METADATA_MSG = "failed to find global metadata, metadata corrupted?"; + static final String EMPTY_LAST_COMMITTED_VOTING_CONFIG_MSG = "last committed voting voting configuration is empty, cluster has never been bootstrapped?"; - static final String WRITE_METADATA_EXCEPTION_MSG = "exception occurred when writing new metadata to disk"; + static final String MASTER_NODE_BOOTSTRAPPED_MSG = "Master node was successfully bootstrapped"; static final Setting UNSAFE_BOOTSTRAP = ClusterService.USER_DEFINED_META_DATA.getConcreteSetting("cluster.metadata.unsafe-bootstrap"); UnsafeBootstrapMasterCommand() { super("Forces the successful election of the current node after the permanent loss of the half or more master-eligible nodes"); - namedXContentRegistry = new NamedXContentRegistry(ClusterModule.getNamedXWriteables()); } @Override protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { - terminal.println(STOP_WARNING_MSG); + super.execute(terminal, options, env); Settings settings = env.settings(); terminal.println(Terminal.Verbosity.VERBOSE, "Checking node.master setting"); @@ -98,27 +79,13 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th if (master == false) { throw new ElasticsearchException(NOT_MASTER_NODE_MSG); } - final int nodeOrdinal = 0; - - terminal.println(Terminal.Verbosity.VERBOSE, "Obtaining lock for node"); - try (NodeEnvironment.NodeLock lock = new NodeEnvironment.NodeLock(nodeOrdinal, logger, env, Files::exists)) { - processNodePaths(logger, terminal, lock.getNodePaths()); - } catch (LockObtainFailedException ex) { - throw new ElasticsearchException( - FAILED_TO_OBTAIN_NODE_LOCK_MSG + " [" + ex.getMessage() + "]"); - } + processNodePathsWithLock(terminal, options, env); terminal.println(MASTER_NODE_BOOTSTRAPPED_MSG); } - private void processNodePaths(Logger logger, Terminal terminal, NodeEnvironment.NodePath[] nodePaths) throws IOException { - final Path[] dataPaths = - Arrays.stream(nodePaths).filter(Objects::nonNull).map(p -> p.path).toArray(Path[]::new); - if (dataPaths.length == 0) { - throw new ElasticsearchException(NO_NODE_FOLDER_FOUND_MSG); - } - + protected void processNodePaths(Terminal terminal, Path[] dataPaths) throws IOException { terminal.println(Terminal.Verbosity.VERBOSE, "Loading node metadata"); final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, dataPaths); if (nodeMetaData == null) { @@ -127,21 +94,10 @@ private void processNodePaths(Logger logger, Terminal terminal, NodeEnvironment. String nodeId = nodeMetaData.nodeId(); terminal.println(Terminal.Verbosity.VERBOSE, "Current nodeId is " + nodeId); - terminal.println(Terminal.Verbosity.VERBOSE, "Loading manifest file"); - final Manifest manifest = Manifest.FORMAT.loadLatestState(logger, namedXContentRegistry, dataPaths); - if (manifest == null) { - throw new ElasticsearchException(NO_MANIFEST_FILE_FOUND_MSG); - } - if (manifest.isGlobalGenerationMissing()) { - throw new ElasticsearchException(GLOBAL_GENERATION_MISSING_MSG); - } - terminal.println(Terminal.Verbosity.VERBOSE, "Loading global metadata file"); - final MetaData metaData = MetaData.FORMAT.loadGeneration(logger, namedXContentRegistry, manifest.getGlobalGeneration(), - dataPaths); - if (metaData == null) { - throw new ElasticsearchException(NO_GLOBAL_METADATA_MSG + " [generation = " + manifest.getGlobalGeneration() + "]"); - } + final Tuple manifestMetaDataTuple = loadMetaData(terminal, dataPaths); + final Manifest manifest = manifestMetaDataTuple.v1(); + final MetaData metaData = manifestMetaDataTuple.v2(); final CoordinationMetaData coordinationMetaData = metaData.coordinationMetaData(); if (coordinationMetaData == null || coordinationMetaData.getLastCommittedConfiguration() == null || @@ -151,45 +107,26 @@ private void processNodePaths(Logger logger, Terminal terminal, NodeEnvironment. terminal.println(String.format(Locale.ROOT, CLUSTER_STATE_TERM_VERSION_MSG_FORMAT, coordinationMetaData.term(), metaData.version())); - terminal.println(CONFIRMATION_MSG); - String text = terminal.readText("Confirm [y/N] "); - if (text.equalsIgnoreCase("y") == false) { - throw new ElasticsearchException(ABORTED_BY_USER_MSG); - } + confirm(terminal, CONFIRMATION_MSG); CoordinationMetaData newCoordinationMetaData = CoordinationMetaData.builder(coordinationMetaData) .clearVotingConfigExclusions() .lastAcceptedConfiguration(new CoordinationMetaData.VotingConfiguration(Collections.singleton(nodeId))) .lastCommittedConfiguration(new CoordinationMetaData.VotingConfiguration(Collections.singleton(nodeId))) .build(); - terminal.println(Terminal.Verbosity.VERBOSE, "New coordination metadata is constructed " + newCoordinationMetaData); + Settings persistentSettings = Settings.builder() .put(metaData.persistentSettings()) .put(UNSAFE_BOOTSTRAP.getKey(), true) .build(); MetaData newMetaData = MetaData.builder(metaData) + .clusterUUID(MetaData.UNKNOWN_CLUSTER_UUID) + .generateClusterUuidIfNeeded() + .clusterUUIDCommitted(true) .persistentSettings(persistentSettings) .coordinationMetaData(newCoordinationMetaData) .build(); - writeNewMetaData(terminal, manifest, newMetaData, dataPaths); - } - private void writeNewMetaData(Terminal terminal, Manifest manifest, MetaData newMetaData, Path[] dataPaths) { - try { - terminal.println(Terminal.Verbosity.VERBOSE, "Writing new global metadata to disk"); - long newGeneration = MetaData.FORMAT.write(newMetaData, dataPaths); - long newCurrentTerm = manifest.getCurrentTerm() + 1; - terminal.println(Terminal.Verbosity.VERBOSE, "Incrementing currentTerm. New value is " + newCurrentTerm); - Manifest newManifest = new Manifest(newCurrentTerm, manifest.getClusterStateVersion(), newGeneration, - manifest.getIndexGenerations()); - terminal.println(Terminal.Verbosity.VERBOSE, "Writing new manifest file to disk"); - Manifest.FORMAT.writeAndCleanup(newManifest, dataPaths); - terminal.println(Terminal.Verbosity.VERBOSE, "Cleaning up old metadata"); - MetaData.FORMAT.cleanupOldFiles(newGeneration, dataPaths); - } catch (Exception e) { - terminal.println(Terminal.Verbosity.VERBOSE, "Cleaning up new metadata"); - MetaData.FORMAT.cleanupOldFiles(manifest.getGlobalGeneration(), dataPaths); - throw new ElasticsearchException(WRITE_METADATA_EXCEPTION_MSG, e); - } + writeNewMetaData(terminal, manifest, manifest.getCurrentTerm(), metaData, newMetaData, dataPaths); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java index cf8e1737a7708..6e90aed5f74bf 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java @@ -201,6 +201,42 @@ private static ClusterState state(DiscoveryNode localNode, String[] acceptedConf .lastCommittedConfiguration(config(committedConfig)).build())).build(); } + + public void testDescriptionAfterDetachCluster() { + final DiscoveryNode localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT); + + final ClusterState clusterState = state(localNode, + VotingConfiguration.MUST_JOIN_ELECTED_MASTER.getNodeIds().toArray(new String[0])); + + assertThat(new ClusterFormationState(Settings.EMPTY, clusterState, emptyList(), emptyList(), 0L).getDescription(), + is("master not discovered yet and this node was detached from its previous cluster, " + + "have discovered []; " + + "discovery will continue using [] from hosts providers and [" + localNode + + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + + final TransportAddress otherAddress = buildNewFakeTransportAddress(); + assertThat(new ClusterFormationState(Settings.EMPTY, clusterState, singletonList(otherAddress), emptyList(), 0L).getDescription(), + is("master not discovered yet and this node was detached from its previous cluster, " + + "have discovered []; " + + "discovery will continue using [" + otherAddress + "] from hosts providers and [" + localNode + + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + + final DiscoveryNode otherNode = new DiscoveryNode("otherNode", buildNewFakeTransportAddress(), Version.CURRENT); + assertThat(new ClusterFormationState(Settings.EMPTY, clusterState, emptyList(), singletonList(otherNode), 0L).getDescription(), + is("master not discovered yet and this node was detached from its previous cluster, " + + "have discovered [" + otherNode + "]; " + + "discovery will continue using [] from hosts providers and [" + localNode + + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + + final DiscoveryNode yetAnotherNode = new DiscoveryNode("yetAnotherNode", buildNewFakeTransportAddress(), Version.CURRENT); + assertThat(new ClusterFormationState(Settings.EMPTY, clusterState, emptyList(), singletonList(yetAnotherNode), 0L).getDescription(), + is("master not discovered yet and this node was detached from its previous cluster, " + + "have discovered [" + yetAnotherNode + "]; " + + "discovery will continue using [] from hosts providers and [" + localNode + + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + + } + public void testDescriptionAfterBootstrapping() { final DiscoveryNode localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index c3028de1801da..93c89cfafabd5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -1048,15 +1048,9 @@ public void testCannotJoinClusterWithDifferentUUID() throws IllegalAccessExcepti } assertTrue(newNode.getLastAppliedClusterState().version() == 0); - // reset clusterUUIDCommitted (and node / cluster state term) to let node join again - // TODO: use elasticsearch-node detach-cluster tool once it's implemented final ClusterNode detachedNode = newNode.restartedNode( - metaData -> MetaData.builder(metaData) - .clusterUUIDCommitted(false) - .coordinationMetaData(CoordinationMetaData.builder(metaData.coordinationMetaData()) - .term(0L).build()) - .build(), - term -> 0L); + metaData -> DetachClusterCommand.updateMetaData(metaData), + term -> DetachClusterCommand.updateCurrentTerm()); cluster1.clusterNodes.replaceAll(cn -> cn == newNode ? detachedNode : cn); cluster1.stabilise(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommandIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommandIT.java new file mode 100644 index 0000000000000..ae8eba050020a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommandIT.java @@ -0,0 +1,418 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.coordination; + +import joptsimple.OptionSet; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.Manifest; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.env.NodeMetaData; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.junit.annotations.TestLogging; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false) +@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE") +public class ElasticsearchNodeCommandIT extends ESIntegTestCase { + + private MockTerminal executeCommand(ElasticsearchNodeCommand command, Environment environment, int nodeOrdinal, boolean abort) + throws Exception { + final MockTerminal terminal = new MockTerminal(); + final OptionSet options = command.getParser().parse("-ordinal", Integer.toString(nodeOrdinal)); + final String input; + + if (abort) { + input = randomValueOtherThanMany(c -> c.equalsIgnoreCase("y"), () -> randomAlphaOfLength(1)); + } else { + input = randomBoolean() ? "y" : "Y"; + } + + terminal.addTextInput(input); + + try { + command.execute(terminal, options, environment); + } finally { + assertThat(terminal.getOutput(), containsString(ElasticsearchNodeCommand.STOP_WARNING_MSG)); + } + + return terminal; + } + + private MockTerminal unsafeBootstrap(Environment environment, int nodeOrdinal, boolean abort) throws Exception { + final MockTerminal terminal = executeCommand(new UnsafeBootstrapMasterCommand(), environment, nodeOrdinal, abort); + assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.CONFIRMATION_MSG)); + assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.MASTER_NODE_BOOTSTRAPPED_MSG)); + return terminal; + } + + private MockTerminal detachCluster(Environment environment, int nodeOrdinal, boolean abort) throws Exception { + final MockTerminal terminal = executeCommand(new DetachClusterCommand(), environment, nodeOrdinal, abort); + assertThat(terminal.getOutput(), containsString(DetachClusterCommand.CONFIRMATION_MSG)); + assertThat(terminal.getOutput(), containsString(DetachClusterCommand.NODE_DETACHED_MSG)); + return terminal; + } + + private MockTerminal unsafeBootstrap(Environment environment) throws Exception { + return unsafeBootstrap(environment, 0, false); + } + + private MockTerminal detachCluster(Environment environment) throws Exception { + return detachCluster(environment, 0, false); + } + + private void expectThrows(ThrowingRunnable runnable, String message) { + ElasticsearchException ex = expectThrows(ElasticsearchException.class, runnable); + assertThat(ex.getMessage(), containsString(message)); + } + + public void testBootstrapNotMasterEligible() { + final Environment environment = TestEnvironment.newEnvironment(Settings.builder() + .put(internalCluster().getDefaultSettings()) + .put(Node.NODE_MASTER_SETTING.getKey(), false) + .build()); + expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.NOT_MASTER_NODE_MSG); + } + + public void testBootstrapNoDataFolder() { + final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_NODE_FOLDER_FOUND_MSG); + } + + public void testDetachNoDataFolder() { + final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.NO_NODE_FOLDER_FOUND_MSG); + } + + public void testBootstrapNodeLocked() throws IOException { + Settings envSettings = buildEnvSettings(Settings.EMPTY); + Environment environment = TestEnvironment.newEnvironment(envSettings); + try (NodeEnvironment ignored = new NodeEnvironment(envSettings, environment)) { + expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); + } + } + + public void testDetachNodeLocked() throws IOException { + Settings envSettings = buildEnvSettings(Settings.EMPTY); + Environment environment = TestEnvironment.newEnvironment(envSettings); + try (NodeEnvironment ignored = new NodeEnvironment(envSettings, environment)) { + expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); + } + } + + public void testBootstrapNoNodeMetaData() throws IOException { + Settings envSettings = buildEnvSettings(Settings.EMPTY); + Environment environment = TestEnvironment.newEnvironment(envSettings); + try (NodeEnvironment nodeEnvironment = new NodeEnvironment(envSettings, environment)) { + NodeMetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); + } + + expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.NO_NODE_METADATA_FOUND_MSG); + } + + public void testBootstrapNotBootstrappedCluster() throws Exception { + internalCluster().startNode( + Settings.builder() + .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") // to ensure quick node startup + .build()); + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().setLocal(true) + .execute().actionGet().getState(); + assertTrue(state.blocks().hasGlobalBlockWithId(DiscoverySettings.NO_MASTER_BLOCK_ID)); + }); + + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.GLOBAL_GENERATION_MISSING_MSG); + } + + public void testDetachNotBootstrappedCluster() throws Exception { + internalCluster().startNode( + Settings.builder() + .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") // to ensure quick node startup + .build()); + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().setLocal(true) + .execute().actionGet().getState(); + assertTrue(state.blocks().hasGlobalBlockWithId(DiscoverySettings.NO_MASTER_BLOCK_ID)); + }); + + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.GLOBAL_GENERATION_MISSING_MSG); + } + + public void testBootstrapNoManifestFile() throws IOException { + internalCluster().setBootstrapMasterNodeIndex(0); + internalCluster().startNode(); + ensureStableCluster(1); + NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); + internalCluster().stopRandomDataNode(); + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + Manifest.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); + + expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_MANIFEST_FILE_FOUND_MSG); + } + + public void testDetachNoManifestFile() throws IOException { + internalCluster().setBootstrapMasterNodeIndex(0); + internalCluster().startNode(); + ensureStableCluster(1); + NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); + internalCluster().stopRandomDataNode(); + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + Manifest.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); + + expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.NO_MANIFEST_FILE_FOUND_MSG); + } + + public void testBootstrapNoMetaData() throws IOException { + internalCluster().setBootstrapMasterNodeIndex(0); + internalCluster().startNode(); + ensureStableCluster(1); + NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + MetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); + + expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_GLOBAL_METADATA_MSG); + } + + public void testDetachNoMetaData() throws IOException { + internalCluster().setBootstrapMasterNodeIndex(0); + internalCluster().startNode(); + ensureStableCluster(1); + NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + MetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); + + expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.NO_GLOBAL_METADATA_MSG); + } + + public void testBootstrapAbortedByUser() throws IOException { + internalCluster().setBootstrapMasterNodeIndex(0); + internalCluster().startNode(); + ensureStableCluster(1); + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + expectThrows(() -> unsafeBootstrap(environment, 0, true), ElasticsearchNodeCommand.ABORTED_BY_USER_MSG); + } + + public void testDetachAbortedByUser() throws IOException { + internalCluster().setBootstrapMasterNodeIndex(0); + internalCluster().startNode(); + ensureStableCluster(1); + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + expectThrows(() -> detachCluster(environment, 0, true), ElasticsearchNodeCommand.ABORTED_BY_USER_MSG); + } + + public void test3MasterNodes2Failed() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(2); + List masterNodes = new ArrayList<>(); + + logger.info("--> start 1st master-eligible node"); + masterNodes.add(internalCluster().startMasterOnlyNode(Settings.builder() + .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") + .build())); // node ordinal 0 + + logger.info("--> start one data-only node"); + String dataNode = internalCluster().startDataOnlyNode(Settings.builder() + .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") + .build()); // node ordinal 1 + + logger.info("--> start 2nd and 3rd master-eligible nodes and bootstrap"); + masterNodes.addAll(internalCluster().startMasterOnlyNodes(2)); // node ordinals 2 and 3 + + logger.info("--> create index test"); + createIndex("test"); + + logger.info("--> stop 2nd and 3d master eligible node"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(1))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(2))); + + logger.info("--> ensure NO_MASTER_BLOCK on data-only node"); + assertBusy(() -> { + ClusterState state = internalCluster().client(dataNode).admin().cluster().prepareState().setLocal(true) + .execute().actionGet().getState(); + assertTrue(state.blocks().hasGlobalBlockWithId(DiscoverySettings.NO_MASTER_BLOCK_ID)); + }); + + logger.info("--> try to unsafely bootstrap 1st master-eligible node, while node lock is held"); + final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); + + logger.info("--> stop 1st master-eligible node and data-only node"); + NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(0))); + internalCluster().stopRandomDataNode(); + + logger.info("--> unsafely-bootstrap 1st master-eligible node"); + MockTerminal terminal = unsafeBootstrap(environment); + MetaData metaData = MetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodeEnvironment.nodeDataPaths()); + assertThat(terminal.getOutput(), containsString( + String.format(Locale.ROOT, UnsafeBootstrapMasterCommand.CLUSTER_STATE_TERM_VERSION_MSG_FORMAT, + metaData.coordinationMetaData().term(), metaData.version()))); + + logger.info("--> start 1st master-eligible node"); + internalCluster().startMasterOnlyNode(); + + logger.info("--> detach-cluster on data-only node"); + detachCluster(environment, 1, false); + + logger.info("--> start data-only node"); + String dataNode2 = internalCluster().startDataOnlyNode(); + + logger.info("--> ensure there is no NO_MASTER_BLOCK and unsafe-bootstrap is reflected in cluster state"); + assertBusy(() -> { + ClusterState state = internalCluster().client(dataNode2).admin().cluster().prepareState().setLocal(true) + .execute().actionGet().getState(); + assertFalse(state.blocks().hasGlobalBlockWithId(DiscoverySettings.NO_MASTER_BLOCK_ID)); + assertTrue(state.metaData().persistentSettings().getAsBoolean(UnsafeBootstrapMasterCommand.UNSAFE_BOOTSTRAP.getKey(), false)); + }); + + logger.info("--> ensure index test is green"); + ensureGreen("test"); + + logger.info("--> detach-cluster on 2nd and 3rd master-eligible nodes"); + detachCluster(environment, 2, false); + detachCluster(environment, 3, false); + + logger.info("--> start 2nd and 3rd master-eligible nodes and ensure 4 nodes stable cluster"); + internalCluster().startMasterOnlyNodes(2); + ensureStableCluster(4); + } + + public void testAllMasterEligibleNodesFailedDanglingIndexImport() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + + logger.info("--> start mixed data and master-eligible node and bootstrap cluster"); + String masterNode = internalCluster().startNode(); // node ordinal 0 + + logger.info("--> start data-only node and ensure 2 nodes stable cluster"); + String dataNode = internalCluster().startDataOnlyNode(); // node ordinal 1 + ensureStableCluster(2); + + logger.info("--> index 1 doc and ensure index is green"); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + ensureGreen("test"); + + logger.info("--> verify 1 doc in the index"); + assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L); + assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); + + logger.info("--> stop data-only node and detach it from the old cluster"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataNode)); + final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + detachCluster(environment, 1, false); + + logger.info("--> stop master-eligible node, clear its data and start it again - new cluster should form"); + internalCluster().restartNode(masterNode, new InternalTestCluster.RestartCallback(){ + @Override + public boolean clearData(String nodeName) { + return true; + } + }); + + logger.info("--> start data-only only node and ensure 2 nodes stable cluster"); + internalCluster().startDataOnlyNode(); + ensureStableCluster(2); + + logger.info("--> verify that the dangling index exists and has green status"); + assertBusy(() -> { + assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(true)); + }); + ensureGreen("test"); + + logger.info("--> verify the doc is there"); + assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); + } + + public void testNoInitialBootstrapAfterDetach() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + internalCluster().startMasterOnlyNode(); + internalCluster().stopCurrentMasterNode(); + + final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + detachCluster(environment); + + String node = internalCluster().startMasterOnlyNode(Settings.builder() + // give the cluster 2 seconds to elect the master (it should not) + .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "2s") + .build()); + + ClusterState state = internalCluster().client().admin().cluster().prepareState().setLocal(true) + .execute().actionGet().getState(); + assertTrue(state.blocks().hasGlobalBlockWithId(DiscoverySettings.NO_MASTER_BLOCK_ID)); + + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node)); + } + + public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetaData() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + internalCluster().startMasterOnlyNode(); + ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings( + Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb")); + internalCluster().client().admin().cluster().updateSettings(req).get(); + + ClusterState state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState(); + assertThat(state.metaData().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), + equalTo("1234kb")); + + internalCluster().stopCurrentMasterNode(); + + final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + detachCluster(environment); + unsafeBootstrap(environment); + + internalCluster().startMasterOnlyNode(); + ensureStableCluster(1); + + state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState(); + assertThat(state.metaData().settings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), + equalTo("1234kb")); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterIT.java deleted file mode 100644 index be983ff8b5f32..0000000000000 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterIT.java +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.cluster.coordination; - -import joptsimple.OptionParser; -import joptsimple.OptionSet; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cli.MockTerminal; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.Manifest; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.env.NodeMetaData; -import org.elasticsearch.env.TestEnvironment; -import org.elasticsearch.node.Node; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.junit.annotations.TestLogging; - -import java.io.IOException; -import java.util.List; -import java.util.Locale; - -import static org.hamcrest.Matchers.containsString; - -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false) -@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE") -public class UnsafeBootstrapMasterIT extends ESIntegTestCase { - - private MockTerminal executeCommand(Environment environment, boolean abort) throws Exception { - final UnsafeBootstrapMasterCommand command = new UnsafeBootstrapMasterCommand(); - final MockTerminal terminal = new MockTerminal(); - final OptionParser parser = new OptionParser(); - final OptionSet options = parser.parse(); - final String input; - - if (abort) { - input = randomValueOtherThanMany(c -> c.equalsIgnoreCase("y"), () -> randomAlphaOfLength(1)); - } else { - input = randomBoolean() ? "y" : "Y"; - } - - terminal.addTextInput(input); - - try { - command.execute(terminal, options, environment); - assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.MASTER_NODE_BOOTSTRAPPED_MSG)); - } finally { - assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.STOP_WARNING_MSG)); - } - - return terminal; - } - - private MockTerminal executeCommand(Environment environment) throws Exception { - return executeCommand(environment, false); - } - - private void expectThrows(ThrowingRunnable runnable, String message) { - ElasticsearchException ex = expectThrows(ElasticsearchException.class, runnable); - assertThat(ex.getMessage(), containsString(message)); - } - - public void testNotMasterEligible() { - final Environment environment = TestEnvironment.newEnvironment(Settings.builder() - .put(internalCluster().getDefaultSettings()) - .put(Node.NODE_MASTER_SETTING.getKey(), false) - .build()); - expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.NOT_MASTER_NODE_MSG); - } - - public void testNoDataFolder() { - final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.NO_NODE_FOLDER_FOUND_MSG); - } - - public void testNodeLocked() throws IOException { - Settings envSettings = buildEnvSettings(Settings.EMPTY); - Environment environment = TestEnvironment.newEnvironment(envSettings); - try (NodeEnvironment ignored = new NodeEnvironment(envSettings, environment)) { - expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); - } - } - - public void testNoNodeMetaData() throws IOException { - Settings envSettings = buildEnvSettings(Settings.EMPTY); - Environment environment = TestEnvironment.newEnvironment(envSettings); - try (NodeEnvironment nodeEnvironment = new NodeEnvironment(envSettings, environment)) { - NodeMetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); - } - - expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.NO_NODE_METADATA_FOUND_MSG); - } - - public void testNotBootstrappedCluster() throws Exception { - internalCluster().startNode( - Settings.builder() - .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") // to ensure quick node startup - .build()); - assertBusy(() -> { - ClusterState state = client().admin().cluster().prepareState().setLocal(true) - .execute().actionGet().getState(); - assertTrue(state.blocks().hasGlobalBlockWithId(DiscoverySettings.NO_MASTER_BLOCK_ID)); - }); - - internalCluster().stopRandomDataNode(); - - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.GLOBAL_GENERATION_MISSING_MSG); - } - - public void testNoManifestFile() throws IOException { - internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startNode(); - ensureStableCluster(1); - NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); - internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - Manifest.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); - - expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.NO_MANIFEST_FILE_FOUND_MSG); - } - - public void testNoMetaData() throws IOException { - internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startNode(); - ensureStableCluster(1); - NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); - internalCluster().stopRandomDataNode(); - - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - MetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); - - expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.NO_GLOBAL_METADATA_MSG); - } - - public void testAbortedByUser() throws IOException { - internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startNode(); - ensureStableCluster(1); - internalCluster().stopRandomDataNode(); - - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - expectThrows(() -> executeCommand(environment, true), UnsafeBootstrapMasterCommand.ABORTED_BY_USER_MSG); - } - - public void test3MasterNodes2Failed() throws Exception { - internalCluster().setBootstrapMasterNodeIndex(2); - List masterNodes = internalCluster().startMasterOnlyNodes(3, Settings.EMPTY); - - String dataNode = internalCluster().startDataOnlyNode(); - createIndex("test"); - - Client dataNodeClient = internalCluster().client(dataNode); - - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(1))); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(2))); - - assertBusy(() -> { - ClusterState state = dataNodeClient.admin().cluster().prepareState().setLocal(true) - .execute().actionGet().getState(); - assertTrue(state.blocks().hasGlobalBlockWithId(DiscoverySettings.NO_MASTER_BLOCK_ID)); - }); - - final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); - - NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(0))); - - MockTerminal terminal = executeCommand(environment); - - MetaData metaData = MetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodeEnvironment.nodeDataPaths()); - assertThat(terminal.getOutput(), containsString( - String.format(Locale.ROOT, UnsafeBootstrapMasterCommand.CLUSTER_STATE_TERM_VERSION_MSG_FORMAT, - metaData.coordinationMetaData().term(), metaData.version()))); - - internalCluster().startMasterOnlyNode(); - - assertBusy(() -> { - ClusterState state = dataNodeClient.admin().cluster().prepareState().setLocal(true) - .execute().actionGet().getState(); - assertFalse(state.blocks().hasGlobalBlockWithId(DiscoverySettings.NO_MASTER_BLOCK_ID)); - assertTrue(state.metaData().persistentSettings().getAsBoolean(UnsafeBootstrapMasterCommand.UNSAFE_BOOTSTRAP.getKey(), false)); - }); - - ensureGreen("test"); - } -} diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 0a9016c20111b..78b6b81189c2b 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -39,7 +39,6 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.Bridge; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect; @@ -63,7 +62,6 @@ import static org.elasticsearch.action.DocWriteResponse.Result.CREATED; import static org.elasticsearch.action.DocWriteResponse.Result.UPDATED; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -358,27 +356,6 @@ public void onFailure(Exception e) { } } - public void testIndexImportedFromDataOnlyNodesIfMasterLostDataFolder() throws Exception { - // test for https://github.com/elastic/elasticsearch/issues/8823 - Settings zen1Settings = Settings.builder().put(TestZenDiscovery.USE_ZEN2.getKey(), false).build(); // TODO: needs adaptions for Zen2 - String masterNode = internalCluster().startMasterOnlyNode(zen1Settings); - internalCluster().startDataOnlyNode(zen1Settings); - ensureStableCluster(2); - assertAcked(prepareCreate("index").setSettings(Settings.builder().put("index.number_of_replicas", 0))); - index("index", "_doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); - ensureGreen(); - - internalCluster().restartNode(masterNode, new InternalTestCluster.RestartCallback() { - @Override - public boolean clearData(String nodeName) { - return true; - } - }); - - ensureGreen("index"); - assertTrue(client().prepareGet("index", "_doc", "1").get().isExists()); - } - public void testCannotJoinIfMasterLostDataFolder() throws Exception { String masterNode = internalCluster().startMasterOnlyNode(); String dataNode = internalCluster().startDataOnlyNode(); diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 0cddb929472b7..e13f252c52064 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -37,7 +37,6 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.discovery.zen.ElectMasterService; @@ -49,7 +48,6 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalTestCluster.RestartCallback; -import org.elasticsearch.test.discovery.TestZenDiscovery; import java.io.IOException; import java.util.List; @@ -275,57 +273,6 @@ public void testTwoNodesSingleDoc() throws Exception { } } - public void testDanglingIndices() throws Exception { - /*TODO This test test does not work with Zen2, because once master node looses its cluster state during restart - it will start with term = 1, which is the same as the term data node has. Data node won't accept cluster state from master - after the restart, because the term is the same, but version of the cluster state is greater on the data node. - Consider adding term to JoinRequest, so that master node can bump its term if its current term is less than JoinRequest#term. - */ - logger.info("--> starting two nodes"); - - final String node_1 = internalCluster().startNodes(2, - Settings.builder().put(TestZenDiscovery.USE_ZEN2.getKey(), false).build()).get(0); - - logger.info("--> indexing a simple document"); - client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); - - logger.info("--> waiting for green status"); - ensureGreen(); - - logger.info("--> verify 1 doc in the index"); - for (int i = 0; i < 10; i++) { - assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L); - } - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); - - logger.info("--> restarting the nodes"); - internalCluster().fullRestart(new RestartCallback() { - @Override - public boolean clearData(String nodeName) { - return node_1.equals(nodeName); - } - }); - - logger.info("--> waiting for green status"); - ensureGreen(); - - // spin a bit waiting for the index to exists - long time = System.currentTimeMillis(); - while ((System.currentTimeMillis() - time) < TimeValue.timeValueSeconds(10).millis()) { - if (client().admin().indices().prepareExists("test").execute().actionGet().isExists()) { - break; - } - } - - logger.info("--> verify that the dangling index exists"); - assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(true)); - logger.info("--> waiting for green status"); - ensureGreen(); - - logger.info("--> verify the doc is there"); - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); - } - /** * This test ensures that when an index deletion takes place while a node is offline, when that * node rejoins the cluster, it deletes the index locally instead of importing it as a dangling index. From 19dccf8f3e75aa26fb0b451d48939e56daf7cc92 Mon Sep 17 00:00:00 2001 From: Marios Trivyzas Date: Fri, 1 Feb 2019 16:13:51 +0200 Subject: [PATCH 27/54] SQL: [Docs] Add limitation for aggregate functions on scalars (#38186) Currently aggregate functions can operate only directly on fields. They cannot be used on top of scalar functions as painless scripting is currently not supported. --- docs/reference/sql/limitations.asciidoc | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/reference/sql/limitations.asciidoc b/docs/reference/sql/limitations.asciidoc index 39b7c191131ff..8104580e2998c 100644 --- a/docs/reference/sql/limitations.asciidoc +++ b/docs/reference/sql/limitations.asciidoc @@ -70,6 +70,12 @@ When doing aggregations (`GROUP BY`) {es-sql} relies on {es}'s `composite` aggre But this type of aggregation does come with a limitation: sorting can only be applied on the key used for the aggregation's buckets. This means that queries like `SELECT * FROM test GROUP BY age ORDER BY COUNT(*)` are not possible. +[float] +=== Using aggregation functions on top of scalar functions + +Aggregation functions like <>, <>, etc. can only be used +directly on fields, and so queries like `SELECT MAX(abs(age)) FROM test` are not possible. + [float] === Using a sub-select @@ -92,7 +98,7 @@ But, if the sub-select would include a `GROUP BY` or `HAVING` or the enclosing ` FROM (SELECT ...) WHERE [simple_condition]`, this is currently **un-supported**. [float] -=== Use <>/<> aggregation functions in `HAVING` clause +=== Using <>/<> aggregation functions in `HAVING` clause Using `FIRST` and `LAST` in the `HAVING` clause is not supported. The same applies to <> and <> when their target column From c1270e97b004a7fbeeb02d3265029222f6ac0ab9 Mon Sep 17 00:00:00 2001 From: Andrey Ershov Date: Fri, 1 Feb 2019 15:24:08 +0100 Subject: [PATCH 28/54] Zen2ify testMasterFailoverDuringIndexingWithMappingChanges (#38178) In Zen2 cluster bootstrap is required and some parameters are called differently in Zen2. --- .../action/support/master/IndexingMasterFailoverIT.java | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java b/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java index 461c92d69f444..6b51836b4381d 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.FaultDetection; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -72,12 +71,12 @@ public void testMasterFailoverDuringIndexingWithMappingChanges() throws Throwabl final Settings sharedSettings = Settings.builder() .put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1s") // for hitting simulated network failures quickly .put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1") // for hitting simulated network failures quickly - .put(TestZenDiscovery.USE_ZEN2.getKey(), false) - .put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out + .put("cluster.join.timeout", "10s") // still long to induce failures but not too long so test won't time out .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2) .build(); + internalCluster().setBootstrapMasterNodeIndex(2); + internalCluster().startMasterOnlyNodes(3, sharedSettings); String dataNode = internalCluster().startDataOnlyNode(sharedSettings); From 2e475d63f7ca5d2354c1f603539763adae85f49e Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 1 Feb 2019 09:30:03 -0500 Subject: [PATCH 29/54] Do not set timeout for IndexRequests in GatewayIndexStateIT (#38147) CI might not be fast enough to publish a dynamic mapping update within 100ms. --- .../java/org/elasticsearch/gateway/GatewayIndexStateIT.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index e13f252c52064..ebdae985a39c7 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -124,7 +124,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> trying to index into a closed index ..."); try { - client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTimeout("1s").execute().actionGet(); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1").execute().actionGet(); fail(); } catch (IndexClosedException e) { // all is well @@ -168,7 +168,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> trying to index into a closed index ..."); try { - client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTimeout("1s").execute().actionGet(); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1").execute().actionGet(); fail(); } catch (IndexClosedException e) { // all is well @@ -229,7 +229,7 @@ public void testJustMasterNodeAndJustDataNode() throws Exception { logger.info("--> create an index"); client().admin().indices().prepareCreate("test").execute().actionGet(); - client().prepareIndex("test", "type1").setSource("field1", "value1").setTimeout("100ms").execute().actionGet(); + client().prepareIndex("test", "type1").setSource("field1", "value1").execute().actionGet(); } public void testTwoNodesSingleDoc() throws Exception { From 66e4fb4fb6e1b4cd1224409cb2f0541bcfb1900f Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 1 Feb 2019 15:32:19 +0100 Subject: [PATCH 30/54] Do not compute cardinality if the `terms` execution mode does not use `global_ordinals` (#38169) In #38158 we ensured that global ordinals are not loaded when another execution hint is explicitly set on the source. This change is a follow up that addresses a comment https://github.com/elastic/elasticsearch/pull/38158/files/dd6043c1c019974fe1c58810384b89e30cd8b89b#r252984782 added after the merge. --- .../bucket/terms/TermsAggregatorFactory.java | 21 +++++-------------- 1 file changed, 5 insertions(+), 16 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 346da32763bd8..877a8e59bc2d3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.apache.logging.log4j.LogManager; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.IndexSearcher; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.logging.DeprecationLogger; @@ -134,7 +133,7 @@ protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator pare if (valuesSource instanceof ValuesSource.Bytes.WithOrdinals == false) { execution = ExecutionMode.MAP; } - final long maxOrd = getMaxOrd(context.searcher(), valuesSource, execution); + final long maxOrd = execution == ExecutionMode.GLOBAL_ORDINALS ? getMaxOrd(valuesSource, context.searcher()) : -1; if (execution == null) { execution = ExecutionMode.GLOBAL_ORDINALS; } @@ -208,23 +207,13 @@ static SubAggCollectionMode subAggCollectionMode(int expectedSize, long maxOrd) } /** - * Get the maximum ordinal value for the provided {@link ValuesSource} or -1 + * Get the maximum global ordinal value for the provided {@link ValuesSource} or -1 * if the values source is not an instance of {@link ValuesSource.Bytes.WithOrdinals}. */ - static long getMaxOrd(IndexSearcher searcher, ValuesSource source, ExecutionMode executionMode) throws IOException { + static long getMaxOrd(ValuesSource source, IndexSearcher searcher) throws IOException { if (source instanceof ValuesSource.Bytes.WithOrdinals) { ValuesSource.Bytes.WithOrdinals valueSourceWithOrdinals = (ValuesSource.Bytes.WithOrdinals) source; - if (executionMode == ExecutionMode.MAP) { - // global ordinals are not requested so we don't load them - // and return the biggest cardinality per segment instead. - long maxOrd = -1; - for (LeafReaderContext leaf : searcher.getIndexReader().leaves()) { - maxOrd = Math.max(maxOrd, valueSourceWithOrdinals.ordinalsValues(leaf).getValueCount()); - } - return maxOrd; - } else { - return valueSourceWithOrdinals.globalMaxOrd(searcher); - } + return valueSourceWithOrdinals.globalMaxOrd(searcher); } else { return -1; } @@ -269,7 +258,7 @@ Aggregator create(String name, List pipelineAggregators, Map metaData) throws IOException { - final long maxOrd = getMaxOrd(context.searcher(), valuesSource, ExecutionMode.GLOBAL_ORDINALS); + final long maxOrd = getMaxOrd(valuesSource, context.searcher()); assert maxOrd != -1; final double ratio = maxOrd / ((double) context.searcher().getIndexReader().numDocs()); From 35ed137684778ae80363576318d2919a32dce79e Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Fri, 1 Feb 2019 15:42:56 +0100 Subject: [PATCH 31/54] Ensure joda compatibility in custom date formats (#38171) If custom date formats are used, there may be combinations that the new performat DateFormatters.from() method has not covered yet. This adds a few such corner cases and ensures the tests are correctly commented out. --- .../common/time/DateFormatters.java | 17 ++++++++++++++++- .../common/joda/JavaJodaTimeDuellingTests.java | 11 +++++++---- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index 6f16e4bc71a71..1cbaaeb80b884 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -1604,13 +1604,16 @@ public static ZonedDateTime from(TemporalAccessor accessor) { } else if (isLocalDateSet) { return localDate.atStartOfDay(zoneId); } else if (isLocalTimeSet) { - return of(LOCALDATE_EPOCH, localTime, zoneId); + return of(getLocaldate(accessor), localTime, zoneId); } else if (accessor.isSupported(ChronoField.YEAR)) { if (accessor.isSupported(MONTH_OF_YEAR)) { return getFirstOfMonth(accessor).atStartOfDay(zoneId); } else { return Year.of(accessor.get(ChronoField.YEAR)).atDay(1).atStartOfDay(zoneId); } + } else if (accessor.isSupported(MONTH_OF_YEAR)) { + // missing year, falling back to the epoch and then filling + return getLocaldate(accessor).atStartOfDay(zoneId); } else if (accessor.isSupported(WeekFields.ISO.weekBasedYear())) { if (accessor.isSupported(WeekFields.ISO.weekOfWeekBasedYear())) { return Year.of(accessor.get(WeekFields.ISO.weekBasedYear())) @@ -1630,6 +1633,18 @@ public static ZonedDateTime from(TemporalAccessor accessor) { throw new IllegalArgumentException("temporal accessor [" + accessor + "] cannot be converted to zoned date time"); } + private static LocalDate getLocaldate(TemporalAccessor accessor) { + if (accessor.isSupported(MONTH_OF_YEAR)) { + if (accessor.isSupported(DAY_OF_MONTH)) { + return LocalDate.of(1970, accessor.get(MONTH_OF_YEAR), accessor.get(DAY_OF_MONTH)); + } else { + return LocalDate.of(1970, accessor.get(MONTH_OF_YEAR), 1); + } + } + + return LOCALDATE_EPOCH; + } + @SuppressForbidden(reason = "ZonedDateTime.of is fine here") private static ZonedDateTime of(LocalDate localDate, LocalTime localTime, ZoneId zoneId) { return ZonedDateTime.of(localDate, localTime, zoneId); diff --git a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java index a0fcf988ca811..cd92061ae25d5 100644 --- a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java @@ -62,11 +62,14 @@ public void testTimeZoneFormatting() { formatter3.parse("20181126T121212.123-0830"); } + public void testCustomTimeFormats() { + assertSameDate("2010 12 06 11:05:15", "yyyy dd MM HH:mm:ss"); + assertSameDate("12/06", "dd/MM"); + assertSameDate("Nov 24 01:29:01 -0800", "MMM dd HH:mm:ss Z"); + } + // this test requires tests to run with -Djava.locale.providers=COMPAT in order to work -// public void testCustomTimeFormats() { -// assertSameDate("2010 12 06 11:05:15", "yyyy dd MM HH:mm:ss"); -// assertSameDate("12/06", "dd/MM"); -// assertSameDate("Nov 24 01:29:01 -0800", "MMM dd HH:mm:ss Z"); +// public void testCustomLocales() { // // // also ensure that locale based dates are the same // assertSameDate("Di., 05 Dez. 2000 02:55:00 -0800", "E, d MMM yyyy HH:mm:ss Z", LocaleUtils.parse("de")); From c1c4abae10266f82e7ec2a34ed56c60e078023b7 Mon Sep 17 00:00:00 2001 From: Desmond Vehar Date: Fri, 1 Feb 2019 06:53:50 -0800 Subject: [PATCH 32/54] Throw if two inner_hits have the same name (#37645) This change throws an error if two inner_hits have the same name Closes #37584 --- .../join/query/HasChildQueryBuilder.java | 6 ++- .../join/query/HasParentQueryBuilder.java | 6 ++- .../join/query/HasChildQueryBuilderTests.java | 8 ++++ .../query/HasParentQueryBuilderTests.java | 8 ++++ .../index/query/NestedQueryBuilder.java | 6 ++- .../index/query/NestedQueryBuilderTests.java | 9 ++++ .../search/aggregations/bucket/NestedIT.java | 47 +++++++++++++++++++ 7 files changed, 87 insertions(+), 3 deletions(-) diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java index 696c4a72bdba8..1c44daea4e982 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java @@ -460,9 +460,13 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws I @Override protected void extractInnerHitBuilders(Map innerHits) { if (innerHitBuilder != null) { + String name = innerHitBuilder.getName() != null ? innerHitBuilder.getName() : type; + if (innerHits.containsKey(name)) { + throw new IllegalArgumentException("[inner_hits] already contains an entry for key [" + name + "]"); + } + Map children = new HashMap<>(); InnerHitContextBuilder.extractInnerHits(query, children); - String name = innerHitBuilder.getName() != null ? innerHitBuilder.getName() : type; InnerHitContextBuilder innerHitContextBuilder = new ParentChildInnerHitContextBuilder(type, true, query, innerHitBuilder, children); innerHits.put(name, innerHitContextBuilder); diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java index e98fdb9e9699d..30a2718aab054 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java @@ -285,9 +285,13 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws I @Override protected void extractInnerHitBuilders(Map innerHits) { if (innerHitBuilder != null) { + String name = innerHitBuilder.getName() != null ? innerHitBuilder.getName() : type; + if (innerHits.containsKey(name)) { + throw new IllegalArgumentException("[inner_hits] already contains an entry for key [" + name + "]"); + } + Map children = new HashMap<>(); InnerHitContextBuilder.extractInnerHits(query, children); - String name = innerHitBuilder.getName() != null ? innerHitBuilder.getName() : type; InnerHitContextBuilder innerHitContextBuilder = new ParentChildInnerHitContextBuilder(type, false, query, innerHitBuilder, children); innerHits.put(name, innerHitContextBuilder); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java index eea01d61386de..2a28e232b5eda 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java @@ -367,4 +367,12 @@ public void testIgnoreUnmappedWithRewrite() throws IOException { assertThat(query, notNullValue()); assertThat(query, instanceOf(MatchNoDocsQuery.class)); } + + public void testExtractInnerHitBuildersWithDuplicate() { + final HasChildQueryBuilder queryBuilder + = new HasChildQueryBuilder(CHILD_DOC, new WrapperQueryBuilder(new MatchAllQueryBuilder().toString()), ScoreMode.None); + queryBuilder.innerHit(new InnerHitBuilder("some_name")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> InnerHitContextBuilder.extractInnerHits(queryBuilder, Collections.singletonMap("some_name", null))); + } } diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java index 164405f653444..ea77ad80799ba 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java @@ -268,4 +268,12 @@ public void testIgnoreUnmappedWithRewrite() throws IOException { assertThat(query, notNullValue()); assertThat(query, instanceOf(MatchNoDocsQuery.class)); } + + public void testExtractInnerHitBuildersWithDuplicate() { + final HasParentQueryBuilder queryBuilder + = new HasParentQueryBuilder(CHILD_DOC, new WrapperQueryBuilder(new MatchAllQueryBuilder().toString()), false); + queryBuilder.innerHit(new InnerHitBuilder("some_name")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> InnerHitContextBuilder.extractInnerHits(queryBuilder, Collections.singletonMap("some_name", null))); + } } diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index 3c3856e208f04..ee8062308ac11 100644 --- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -317,10 +317,14 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws @Override public void extractInnerHitBuilders(Map innerHits) { if (innerHitBuilder != null) { + String name = innerHitBuilder.getName() != null ? innerHitBuilder.getName() : path; + if (innerHits.containsKey(name)) { + throw new IllegalArgumentException("[inner_hits] already contains an entry for key [" + name + "]"); + } + Map children = new HashMap<>(); InnerHitContextBuilder.extractInnerHits(query, children); InnerHitContextBuilder innerHitContextBuilder = new NestedInnerHitContextBuilder(path, query, innerHitBuilder, children); - String name = innerHitBuilder.getName() != null ? innerHitBuilder.getName() : path; innerHits.put(name, innerHitContextBuilder); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java index ac9ae8d0fa7fb..a3b6376a048f2 100644 --- a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java @@ -41,6 +41,7 @@ import org.hamcrest.Matchers; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -354,4 +355,12 @@ public void testBuildIgnoreUnmappedNestQuery() throws Exception { nestedContextBuilder.build(searchContext, innerHitsContext); assertThat(innerHitsContext.getInnerHits().size(), Matchers.equalTo(0)); } + + public void testExtractInnerHitBuildersWithDuplicate() { + final NestedQueryBuilder queryBuilder + = new NestedQueryBuilder("path", new WrapperQueryBuilder(new MatchAllQueryBuilder().toString()), ScoreMode.None); + queryBuilder.innerHit(new InnerHitBuilder("some_name")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> InnerHitContextBuilder.extractInnerHits(queryBuilder,Collections.singletonMap("some_name", null))); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java index d68c85ab652ae..14fa6a9f565ef 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java @@ -21,10 +21,13 @@ import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.InnerHitBuilder; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; @@ -46,6 +49,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.nestedQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; @@ -57,6 +61,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; @@ -674,4 +679,46 @@ public void testFilterAggInsideNestedAgg() throws Exception { numStringParams = bucket.getAggregations().get("num_string_params"); assertThat(numStringParams.getDocCount(), equalTo(0L)); } + + public void testExtractInnerHitBuildersWithDuplicateHitName() throws Exception { + assertAcked( + prepareCreate("idxduplicatehitnames") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) + .addMapping("product", "categories", "type=keyword", "name", "type=text", "property", "type=nested") + ); + ensureGreen("idxduplicatehitnames"); + + SearchRequestBuilder searchRequestBuilder = client() + .prepareSearch("idxduplicatehitnames") + .setQuery(boolQuery() + .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder("ih1"))) + .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder("ih2"))) + .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder("ih1")))); + + assertFailures( + searchRequestBuilder, + RestStatus.BAD_REQUEST, + containsString("[inner_hits] already contains an entry for key [ih1]")); + } + + public void testExtractInnerHitBuildersWithDuplicatePath() throws Exception { + assertAcked( + prepareCreate("idxnullhitnames") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) + .addMapping("product", "categories", "type=keyword", "name", "type=text", "property", "type=nested") + ); + ensureGreen("idxnullhitnames"); + + SearchRequestBuilder searchRequestBuilder = client() + .prepareSearch("idxnullhitnames") + .setQuery(boolQuery() + .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder())) + .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder())) + .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder()))); + + assertFailures( + searchRequestBuilder, + RestStatus.BAD_REQUEST, + containsString("[inner_hits] already contains an entry for key [property]")); + } } From da6269b456d5a3cf0b75d184d0b2148b8520e2b4 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 1 Feb 2019 15:59:11 +0100 Subject: [PATCH 33/54] RestoreService should update primary terms when restoring shards of existing indices (#38177) When restoring shards of existing indices, the RestoreService also restores the values of primary terms stored in the snapshot index metadata. The primary terms are not updated and could potentially conflict with current index primary terms if the restored primary terms are lower than the existing ones. This situation is likely to happen with replicated closed indices (because primary terms are increased when the index is transitioning from open to closed state, and the snapshotted primary terms are the one at the time the index was opened) (see #38024) and maybe also with CCR. This commit changes the RestoreService so that it updates the primary terms using the maximum value between the snapshotted values and the existing values. Related to #33888 --- .../snapshots/RestoreService.java | 6 +++ .../SharedClusterSnapshotRestoreIT.java | 43 +++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index b8fa8c6f1a9c8..49fd26c070af1 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -314,6 +314,12 @@ public ClusterState execute(ClusterState currentState) { currentIndexMetaData.getMappingVersion() + 1)); indexMdBuilder.settingsVersion(Math.max(snapshotIndexMetaData.getSettingsVersion(), currentIndexMetaData.getSettingsVersion() + 1)); + + for (int shard = 0; shard < snapshotIndexMetaData.getNumberOfShards(); shard++) { + indexMdBuilder.primaryTerm(shard, + Math.max(snapshotIndexMetaData.primaryTerm(shard), currentIndexMetaData.primaryTerm(shard))); + } + if (!request.includeAliases()) { // Remove all snapshot aliases if (!snapshotIndexMetaData.getAliases().isEmpty()) { diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 1a1b886e0e373..d633493622dcd 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -116,6 +116,7 @@ import java.util.function.Consumer; import java.util.function.Predicate; import java.util.stream.Collectors; +import java.util.stream.IntStream; import java.util.stream.Stream; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; @@ -3704,6 +3705,48 @@ public void testAbortedSnapshotDuringInitDoesNotStart() throws Exception { } } + public void testRestoreIncreasesPrimaryTerms() { + final String indexName = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); + createIndex(indexName, Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 2) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build()); + ensureGreen(indexName); + + if (randomBoolean()) { + // open and close the index to increase the primary terms + for (int i = 0; i < randomInt(3); i++) { + assertAcked(client().admin().indices().prepareClose(indexName)); + assertAcked(client().admin().indices().prepareOpen(indexName)); + } + } + + final IndexMetaData indexMetaData = client().admin().cluster().prepareState().clear().setIndices(indexName) + .setMetaData(true).get().getState().metaData().index(indexName); + final int numPrimaries = getNumShards(indexName).numPrimaries; + final Map primaryTerms = IntStream.range(0, numPrimaries) + .boxed().collect(Collectors.toMap(shardId -> shardId, indexMetaData::primaryTerm)); + + assertAcked(client().admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings(randomRepoSettings())); + final CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true).setIndices(indexName).get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(numPrimaries)); + assertThat(createSnapshotResponse.getSnapshotInfo().failedShards(), equalTo(0)); + + assertAcked(client().admin().indices().prepareClose(indexName)); + + final RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true).get(); + assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(numPrimaries)); + assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); + + final IndexMetaData restoredIndexMetaData = client().admin().cluster().prepareState().clear().setIndices(indexName) + .setMetaData(true).get().getState().metaData().index(indexName); + for (int shardId = 0; shardId < numPrimaries; shardId++) { + assertThat(restoredIndexMetaData.primaryTerm(shardId), equalTo(primaryTerms.get(shardId) + 1)); + } + } + private RepositoryData getRepositoryData(Repository repository) throws InterruptedException { ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, internalCluster().getMasterName()); final SetOnce repositoryData = new SetOnce<>(); From 025bf2840528bb25b82cbf2542c7a545728bcbf9 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Fri, 1 Feb 2019 16:02:37 +0100 Subject: [PATCH 34/54] Fix _host based require filters (#38173) Using index.routing.allocation.require._host does not correctly work because the boolean logic in filter matching is broken (DiscoveryNodeFilters.match(...) will return false) when opType ==OpType.AND --- .../cluster/node/DiscoveryNodeFilters.java | 11 +--------- .../node/DiscoveryNodeFiltersTests.java | 20 +++++++++++++++++++ 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java index 6b15d1f24581d..aacda43864e51 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java @@ -147,16 +147,7 @@ public boolean match(DiscoveryNode node) { } } else if ("_host".equals(attr)) { for (String value : values) { - if (Regex.simpleMatch(value, node.getHostName())) { - if (opType == OpType.OR) { - return true; - } - } else { - if (opType == OpType.AND) { - return false; - } - } - if (Regex.simpleMatch(value, node.getHostAddress())) { + if (Regex.simpleMatch(value, node.getHostName()) || Regex.simpleMatch(value, node.getHostAddress())) { if (opType == OpType.OR) { return true; } diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java index d6e6d1691a042..b22518a2e52b2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java @@ -235,6 +235,26 @@ public void testIpPublishFilteringMatchingOr() { assertThat(filters.match(node), equalTo(true)); } + public void testHostNameFilteringMatchingAnd() { + Settings settings = shuffleSettings(Settings.builder() + .put("xxx._host", "A") + .build()); + DiscoveryNodeFilters filters = buildFromSettings(AND, "xxx.", settings); + + DiscoveryNode node = new DiscoveryNode("", "", "", "A", "192.1.1.54", localAddress, emptyMap(), emptySet(), null); + assertThat(filters.match(node), equalTo(true)); + } + + public void testHostAddressFilteringMatchingAnd() { + Settings settings = shuffleSettings(Settings.builder() + .put("xxx._host", "192.1.1.54") + .build()); + DiscoveryNodeFilters filters = buildFromSettings(AND, "xxx.", settings); + + DiscoveryNode node = new DiscoveryNode("", "", "", "A", "192.1.1.54", localAddress, emptyMap(), emptySet(), null); + assertThat(filters.match(node), equalTo(true)); + } + public void testIpPublishFilteringNotMatchingOr() { Settings settings = shuffleSettings(Settings.builder() .put("xxx.tag", "A") From 2ca22209cd52f09e5f352e147e61000f1a79e519 Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Fri, 1 Feb 2019 08:34:11 -0700 Subject: [PATCH 35/54] Enable TLSv1.3 by default for JDKs with support (#38103) This commit enables the use of TLSv1.3 with security by enabling us to properly map `TLSv1.3` in the supported protocols setting to the algorithm for a SSLContext. Additionally, we also enable TLSv1.3 by default on JDKs that support it. An issue was uncovered with the MockWebServer when TLSv1.3 is used that ultimately winds up in an endless loop when the client does not trust the server's certificate. Due to this, SSLConfigurationReloaderTests has been pinned to TLSv1.2. Closes #32276 --- .../migration/migrate_7_0/settings.asciidoc | 4 +- .../settings/security-settings.asciidoc | 12 ++-- docs/reference/settings/ssl-settings.asciidoc | 3 +- .../common/ssl/SslConfiguration.java | 38 ++++++++-- .../common/ssl/SslConfigurationLoader.java | 6 +- .../xpack/core/XPackSettings.java | 17 ++++- .../xpack/core/ssl/SSLService.java | 69 +++++++++---------- .../xpack/core/XPackSettingsTests.java | 21 ++++++ .../ssl/SSLConfigurationReloaderTests.java | 21 ++++-- 9 files changed, 134 insertions(+), 57 deletions(-) diff --git a/docs/reference/migration/migrate_7_0/settings.asciidoc b/docs/reference/migration/migrate_7_0/settings.asciidoc index 0b18c267748b5..2e5631b378652 100644 --- a/docs/reference/migration/migrate_7_0/settings.asciidoc +++ b/docs/reference/migration/migrate_7_0/settings.asciidoc @@ -138,11 +138,11 @@ used. TLS version 1.0 is now disabled by default as it suffers from https://www.owasp.org/index.php/Transport_Layer_Protection_Cheat_Sheet#Rule_-_Only_Support_Strong_Protocols[known security issues]. -The default protocols are now TLSv1.2 and TLSv1.1. +The default protocols are now TLSv1.3 (if supported), TLSv1.2 and TLSv1.1. You can enable TLS v1.0 by configuring the relevant `ssl.supported_protocols` setting to include `"TLSv1"`, for example: [source,yaml] -------------------------------------------------- -xpack.security.http.ssl.supported_protocols: [ "TLSv1.2", "TLSv1.1", "TLSv1" ] +xpack.security.http.ssl.supported_protocols: [ "TLSv1.3", "TLSv1.2", "TLSv1.1", "TLSv1" ] -------------------------------------------------- [float] diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 16ce60e986b93..393428373f8c0 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -480,7 +480,8 @@ and `full`. Defaults to `full`. See <> for an explanation of these values. `ssl.supported_protocols`:: -Supported protocols for TLS/SSL (with versions). Defaults to `TLSv1.2,TLSv1.1`. +Supported protocols for TLS/SSL (with versions). Defaults to `TLSv1.3,TLSv1.2,TLSv1.1` if +the JVM supports TLSv1.3, otherwise `TLSv1.2,TLSv1.1`. `ssl.cipher_suites`:: Specifies the cipher suites that should be supported when communicating with the LDAP server. @@ -724,7 +725,8 @@ and `full`. Defaults to `full`. See <> for an explanation of these values. `ssl.supported_protocols`:: -Supported protocols for TLS/SSL (with versions). Defaults to `TLSv1.2, TLSv1.1`. +Supported protocols for TLS/SSL (with versions). Defaults to `TLSv1.3,TLSv1.2,TLSv1.1` if +the JVM supports TLSv1.3, otherwise `TLSv1.2,TLSv1.1`. `ssl.cipher_suites`:: Specifies the cipher suites that should be supported when communicating with the Active Directory server. @@ -1132,7 +1134,8 @@ Defaults to `full`. See <> for a more detailed explanation of these values. `ssl.supported_protocols`:: -Specifies the supported protocols for TLS/SSL. +Specifies the supported protocols for TLS/SSL. Defaults to `TLSv1.3,TLSv1.2,TLSv1.1` if +the JVM supports TLSv1.3, otherwise `TLSv1.2,TLSv1.1`. `ssl.cipher_suites`:: Specifies the @@ -1206,7 +1209,8 @@ settings. For more information, see `ssl.supported_protocols`:: Supported protocols with versions. Valid protocols: `SSLv2Hello`, -`SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`. Defaults to `TLSv1.2`, `TLSv1.1`. +`SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`, `TLSv1.3`. Defaults to `TLSv1.3,TLSv1.2,TLSv1.1` if +the JVM supports TLSv1.3, otherwise `TLSv1.2,TLSv1.1`. + -- NOTE: If `xpack.security.fips_mode.enabled` is `true`, you cannot use `SSLv2Hello` diff --git a/docs/reference/settings/ssl-settings.asciidoc b/docs/reference/settings/ssl-settings.asciidoc index a04f5581f2abd..a4422b8fb2d3c 100644 --- a/docs/reference/settings/ssl-settings.asciidoc +++ b/docs/reference/settings/ssl-settings.asciidoc @@ -11,7 +11,8 @@ endif::server[] +{ssl-prefix}.ssl.supported_protocols+:: Supported protocols with versions. Valid protocols: `SSLv2Hello`, -`SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`. Defaults to `TLSv1.2`, `TLSv1.1`. +`SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`, `TLSv1.3`. Defaults to `TLSv1.3,TLSv1.2,TLSv1.1` if +the JVM supports TLSv1.3, otherwise `TLSv1.2,TLSv1.1`. ifdef::server[] diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java index 146ba916b6b07..68df7d248340d 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java @@ -24,11 +24,14 @@ import javax.net.ssl.X509ExtendedTrustManager; import java.nio.file.Path; import java.security.GeneralSecurityException; -import java.util.Arrays; +import java.security.NoSuchAlgorithmException; import java.util.Collection; import java.util.Collections; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.List; +import java.util.Map; +import java.util.Map.Entry; import java.util.Objects; import java.util.Set; @@ -40,6 +43,30 @@ */ public class SslConfiguration { + /** + * An ordered map of protocol algorithms to SSLContext algorithms. The map is ordered from most + * secure to least secure. The names in this map are taken from the + * + * Java Security Standard Algorithm Names Documentation for Java 11. + */ + static final Map ORDERED_PROTOCOL_ALGORITHM_MAP; + static { + LinkedHashMap protocolAlgorithmMap = new LinkedHashMap<>(); + try { + SSLContext.getInstance("TLSv1.3"); + protocolAlgorithmMap.put("TLSv1.3", "TLSv1.3"); + } catch (NoSuchAlgorithmException e) { + // ignore since we support JVMs that do not support TLSv1.3 + } + protocolAlgorithmMap.put("TLSv1.2", "TLSv1.2"); + protocolAlgorithmMap.put("TLSv1.1", "TLSv1.1"); + protocolAlgorithmMap.put("TLSv1", "TLSv1"); + protocolAlgorithmMap.put("SSLv3", "SSLv3"); + protocolAlgorithmMap.put("SSLv2", "SSL"); + protocolAlgorithmMap.put("SSLv2Hello", "SSL"); + ORDERED_PROTOCOL_ALGORITHM_MAP = Collections.unmodifiableMap(protocolAlgorithmMap); + } + private final SslTrustConfig trustConfig; private final SslKeyConfig keyConfig; private final SslVerificationMode verificationMode; @@ -124,12 +151,13 @@ private String contextProtocol() { if (supportedProtocols.isEmpty()) { throw new SslConfigException("no SSL/TLS protocols have been configured"); } - for (String tryProtocol : Arrays.asList("TLSv1.2", "TLSv1.1", "TLSv1", "SSLv3")) { - if (supportedProtocols.contains(tryProtocol)) { - return tryProtocol; + for (Entry entry : ORDERED_PROTOCOL_ALGORITHM_MAP.entrySet()) { + if (supportedProtocols.contains(entry.getKey())) { + return entry.getValue(); } } - return "SSL"; + throw new SslConfigException("no supported SSL/TLS protocol was found in the configured supported protocols: " + + supportedProtocols); } @Override diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java index efe87f7c30322..6e511565a9f53 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java @@ -26,12 +26,14 @@ import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Objects; import java.util.function.Function; import java.util.stream.Collectors; import static org.elasticsearch.common.ssl.KeyStoreUtil.inferKeyStoreType; +import static org.elasticsearch.common.ssl.SslConfiguration.ORDERED_PROTOCOL_ALGORITHM_MAP; import static org.elasticsearch.common.ssl.SslConfigurationKeys.CERTIFICATE; import static org.elasticsearch.common.ssl.SslConfigurationKeys.CERTIFICATE_AUTHORITIES; import static org.elasticsearch.common.ssl.SslConfigurationKeys.CIPHERS; @@ -68,7 +70,9 @@ */ public abstract class SslConfigurationLoader { - static final List DEFAULT_PROTOCOLS = Arrays.asList("TLSv1.2", "TLSv1.1"); + static final List DEFAULT_PROTOCOLS = Collections.unmodifiableList( + ORDERED_PROTOCOL_ALGORITHM_MAP.containsKey("TLSv1.3") ? + Arrays.asList("TLSv1.3", "TLSv1.2", "TLSv1.1") : Arrays.asList("TLSv1.2", "TLSv1.1")); static final List DEFAULT_CIPHERS = loadDefaultCiphers(); private static final char[] EMPTY_PASSWORD = new char[0]; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index 6a2a693d3b15e..dd18e3b319468 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.xpack.core.security.SecurityField; @@ -16,6 +17,7 @@ import javax.crypto.Cipher; import javax.crypto.SecretKeyFactory; +import javax.net.ssl.SSLContext; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; @@ -154,7 +156,20 @@ private XPackSettings() { } }, Setting.Property.NodeScope); - public static final List DEFAULT_SUPPORTED_PROTOCOLS = Arrays.asList("TLSv1.2", "TLSv1.1"); + public static final List DEFAULT_SUPPORTED_PROTOCOLS; + + static { + boolean supportsTLSv13 = false; + try { + SSLContext.getInstance("TLSv1.3"); + supportsTLSv13 = true; + } catch (NoSuchAlgorithmException e) { + LogManager.getLogger(XPackSettings.class).debug("TLSv1.3 is not supported", e); + } + DEFAULT_SUPPORTED_PROTOCOLS = supportsTLSv13 ? + Arrays.asList("TLSv1.3", "TLSv1.2", "TLSv1.1") : Arrays.asList("TLSv1.2", "TLSv1.1"); + } + public static final SSLClientAuth CLIENT_AUTH_DEFAULT = SSLClientAuth.REQUIRED; public static final SSLClientAuth HTTP_CLIENT_AUTH_DEFAULT = SSLClientAuth.NONE; public static final VerificationMode VERIFICATION_MODE_DEFAULT = VerificationMode.FULL; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java index e832de629359a..3611b6663a38f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java @@ -46,6 +46,7 @@ import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -56,6 +57,8 @@ import java.util.function.Supplier; import java.util.stream.Collectors; +import static org.elasticsearch.xpack.core.XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS; + /** * Provides access to {@link SSLEngine} and {@link SSLSocketFactory} objects based on a provided configuration. All * configurations loaded by this service must be configured on construction. @@ -63,6 +66,26 @@ public class SSLService { private static final Logger logger = LogManager.getLogger(SSLService.class); + /** + * An ordered map of protocol algorithms to SSLContext algorithms. The map is ordered from most + * secure to least secure. The names in this map are taken from the + * + * Java Security Standard Algorithm Names Documentation for Java 11. + */ + private static final Map ORDERED_PROTOCOL_ALGORITHM_MAP; + static { + LinkedHashMap protocolAlgorithmMap = new LinkedHashMap<>(); + if (DEFAULT_SUPPORTED_PROTOCOLS.contains("TLSv1.3")) { + protocolAlgorithmMap.put("TLSv1.3", "TLSv1.3"); + } + protocolAlgorithmMap.put("TLSv1.2", "TLSv1.2"); + protocolAlgorithmMap.put("TLSv1.1", "TLSv1.1"); + protocolAlgorithmMap.put("TLSv1", "TLSv1"); + protocolAlgorithmMap.put("SSLv3", "SSLv3"); + protocolAlgorithmMap.put("SSLv2", "SSL"); + protocolAlgorithmMap.put("SSLv2Hello", "SSL"); + ORDERED_PROTOCOL_ALGORITHM_MAP = Collections.unmodifiableMap(protocolAlgorithmMap); + } private final Settings settings; @@ -691,47 +714,19 @@ public SSLConfiguration getSSLConfiguration(String contextName) { /** * Maps the supported protocols to an appropriate ssl context algorithm. We make an attempt to use the "best" algorithm when * possible. The names in this method are taken from the - * JCA Standard Algorithm Name - * Documentation for Java 8. + * Java Security + * Standard Algorithm Names Documentation for Java 11. */ private static String sslContextAlgorithm(List supportedProtocols) { if (supportedProtocols.isEmpty()) { - return "TLSv1.2"; - } - - String algorithm = "SSL"; - for (String supportedProtocol : supportedProtocols) { - switch (supportedProtocol) { - case "TLSv1.2": - return "TLSv1.2"; - case "TLSv1.1": - if ("TLSv1.2".equals(algorithm) == false) { - algorithm = "TLSv1.1"; - } - break; - case "TLSv1": - switch (algorithm) { - case "TLSv1.2": - case "TLSv1.1": - break; - default: - algorithm = "TLSv1"; - } - break; - case "SSLv3": - switch (algorithm) { - case "SSLv2": - case "SSL": - algorithm = "SSLv3"; - } - break; - case "SSLv2": - case "SSLv2Hello": - break; - default: - throw new IllegalArgumentException("found unexpected value in supported protocols: " + supportedProtocol); + throw new IllegalArgumentException("no SSL/TLS protocols have been configured"); + } + for (Entry entry : ORDERED_PROTOCOL_ALGORITHM_MAP.entrySet()) { + if (supportedProtocols.contains(entry.getKey())) { + return entry.getValue(); } } - return algorithm; + throw new IllegalArgumentException("no supported SSL/TLS protocol was found in the configured supported protocols: " + + supportedProtocols); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java index 7689ae4088f34..004b46897a48e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java @@ -9,9 +9,11 @@ import org.elasticsearch.test.ESTestCase; import javax.crypto.Cipher; import javax.crypto.SecretKeyFactory; +import javax.net.ssl.SSLContext; import java.security.NoSuchAlgorithmException; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.not; @@ -48,6 +50,16 @@ public void testPasswordHashingAlgorithmSettingValidation() { Settings.builder().put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), bcryptAlgo).build())); } + public void testDefaultSupportedProtocolsWithTLSv13() throws Exception { + assumeTrue("current JVM does not support TLSv1.3", supportTLSv13()); + assertThat(XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS, contains("TLSv1.3", "TLSv1.2", "TLSv1.1")); + } + + public void testDefaultSupportedProtocolsWithoutTLSv13() throws Exception { + assumeFalse("current JVM supports TLSv1.3", supportTLSv13()); + assertThat(XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS, contains("TLSv1.2", "TLSv1.1")); + } + private boolean isSecretkeyFactoryAlgoAvailable(String algorithmId) { try { SecretKeyFactory.getInstance(algorithmId); @@ -56,4 +68,13 @@ private boolean isSecretkeyFactoryAlgoAvailable(String algorithmId) { return false; } } + + private boolean supportTLSv13() { + try { + SSLContext.getInstance("TLSv1.3"); + return true; + } catch (NoSuchAlgorithmException e) { + return false; + } + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java index 318d8e4150a1d..6857d8a0456e3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java @@ -26,7 +26,6 @@ import javax.net.ssl.SSLContext; import javax.net.ssl.SSLHandshakeException; - import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -263,7 +262,7 @@ public void testReloadingPEMTrustConfig() throws Exception { try (MockWebServer server = getSslServer(serverKeyPath, serverCertPath, "testnode")) { final Consumer trustMaterialPreChecks = (context) -> { try (CloseableHttpClient client = HttpClients.custom().setSSLContext(context).build()) { - privilegedConnect(() -> client.execute(new HttpGet("https://localhost:" + server.getPort())).close()); + privilegedConnect(() -> client.execute(new HttpGet("https://localhost:" + server.getPort())));//.close()); } catch (Exception e) { throw new RuntimeException("Exception connecting to the mock server", e); } @@ -480,7 +479,9 @@ private static MockWebServer getSslServer(Path keyStorePath, String keyStorePass try (InputStream is = Files.newInputStream(keyStorePath)) { keyStore.load(is, keyStorePass.toCharArray()); } - final SSLContext sslContext = new SSLContextBuilder().loadKeyMaterial(keyStore, keyStorePass.toCharArray()) + final SSLContext sslContext = new SSLContextBuilder() + .loadKeyMaterial(keyStore, keyStorePass.toCharArray()) + .setProtocol("TLSv1.2") .build(); MockWebServer server = new MockWebServer(sslContext, false); server.enqueue(new MockResponse().setResponseCode(200).setBody("body")); @@ -494,7 +495,9 @@ private static MockWebServer getSslServer(Path keyPath, Path certPath, String pa keyStore.load(null, password.toCharArray()); keyStore.setKeyEntry("testnode_ec", PemUtils.readPrivateKey(keyPath, password::toCharArray), password.toCharArray(), CertParsingUtils.readCertificates(Collections.singletonList(certPath))); - final SSLContext sslContext = new SSLContextBuilder().loadKeyMaterial(keyStore, password.toCharArray()) + final SSLContext sslContext = new SSLContextBuilder() + .loadKeyMaterial(keyStore, password.toCharArray()) + .setProtocol("TLSv1.2") .build(); MockWebServer server = new MockWebServer(sslContext, false); server.enqueue(new MockResponse().setResponseCode(200).setBody("body")); @@ -509,7 +512,10 @@ private static CloseableHttpClient getSSLClient(Path trustStorePath, String trus try (InputStream is = Files.newInputStream(trustStorePath)) { trustStore.load(is, trustStorePass.toCharArray()); } - final SSLContext sslContext = new SSLContextBuilder().loadTrustMaterial(trustStore, null).build(); + final SSLContext sslContext = new SSLContextBuilder() + .loadTrustMaterial(trustStore, null) + .setProtocol("TLSv1.2") + .build(); return HttpClients.custom().setSSLContext(sslContext).build(); } @@ -526,7 +532,10 @@ private static CloseableHttpClient getSSLClient(List trustedCertificatePat for (Certificate cert : CertParsingUtils.readCertificates(trustedCertificatePaths)) { trustStore.setCertificateEntry(cert.toString(), cert); } - final SSLContext sslContext = new SSLContextBuilder().loadTrustMaterial(trustStore, null).build(); + final SSLContext sslContext = new SSLContextBuilder() + .loadTrustMaterial(trustStore, null) + .setProtocol("TLSv1.2") + .build(); return HttpClients.custom().setSSLContext(sslContext).build(); } From 5c58c2508e76347e04a8740e9e88683f5903ee89 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Fri, 1 Feb 2019 16:34:24 +0100 Subject: [PATCH 36/54] Disable bwc tests while backporting #38104 (#38182) Relates to #38180 --- build.gradle | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.gradle b/build.gradle index e5bc1ab3ba986..b163c9492f247 100644 --- a/build.gradle +++ b/build.gradle @@ -159,8 +159,8 @@ task verifyVersions { * the enabled state of every bwc task. It should be set back to true * after the backport of the backcompat code is complete. */ -final boolean bwc_tests_enabled = true -final String bwc_tests_disabled_issue = "" /* place a PR link here when committing bwc changes */ +final boolean bwc_tests_enabled = false +final String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/38180" /* place a PR link here when committing bwc changes */ if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") From 1fa413a16df76fef7c17c44e5fa7e547edd56a55 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Fri, 1 Feb 2019 15:36:04 +0000 Subject: [PATCH 37/54] [ML] Remove "8" prefixes from file structure finder timestamp formats (#38016) In 7.x Java timestamp formats are the default timestamp format and there is no need to prefix them with "8". (The "8" prefix was used in 6.7 to distinguish Java timestamp formats from Joda timestamp formats.) This change removes the "8" prefixes from timestamp formats in the output of the ML file structure finder. --- .../ml/apis/find-file-structure.asciidoc | 10 +++++----- .../filestructurefinder/FileStructureUtils.java | 17 +---------------- .../TimestampFormatFinder.java | 3 +-- .../FileStructureUtilsTests.java | 9 +++------ 4 files changed, 10 insertions(+), 29 deletions(-) diff --git a/docs/reference/ml/apis/find-file-structure.asciidoc b/docs/reference/ml/apis/find-file-structure.asciidoc index 9650efff16189..caed632bda0e5 100644 --- a/docs/reference/ml/apis/find-file-structure.asciidoc +++ b/docs/reference/ml/apis/find-file-structure.asciidoc @@ -606,11 +606,11 @@ If the request does not encounter errors, you receive the following result: }, "tpep_dropoff_datetime" : { "type" : "date", - "format" : "8yyyy-MM-dd HH:mm:ss" + "format" : "yyyy-MM-dd HH:mm:ss" }, "tpep_pickup_datetime" : { "type" : "date", - "format" : "8yyyy-MM-dd HH:mm:ss" + "format" : "yyyy-MM-dd HH:mm:ss" }, "trip_distance" : { "type" : "double" @@ -624,7 +624,7 @@ If the request does not encounter errors, you receive the following result: "field" : "tpep_pickup_datetime", "timezone" : "{{ beat.timezone }}", "formats" : [ - "8yyyy-MM-dd HH:mm:ss" + "yyyy-MM-dd HH:mm:ss" ] } } @@ -1398,7 +1398,7 @@ this: "field" : "timestamp", "timezone" : "{{ beat.timezone }}", "formats" : [ - "8yyyy-MM-dd'T'HH:mm:ss,SSS" + "yyyy-MM-dd'T'HH:mm:ss,SSS" ] } }, @@ -1558,7 +1558,7 @@ this: "field" : "timestamp", "timezone" : "{{ beat.timezone }}", "formats" : [ - "8yyyy-MM-dd'T'HH:mm:ss,SSS" + "yyyy-MM-dd'T'HH:mm:ss,SSS" ] } }, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java index ba22b170ecea0..9172de9dedaa5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java @@ -353,7 +353,7 @@ public static Map makeIngestPipelineDefinition(String grokPatter if (needClientTimezone) { dateProcessorSettings.put("timezone", "{{ " + BEAT_TIMEZONE_FIELD + " }}"); } - dateProcessorSettings.put("formats", jodaBwcJavaTimestampFormatsForIngestPipeline(timestampFormats)); + dateProcessorSettings.put("formats", timestampFormats); processors.add(Collections.singletonMap("date", dateProcessorSettings)); } @@ -365,19 +365,4 @@ public static Map makeIngestPipelineDefinition(String grokPatter pipeline.put(Pipeline.PROCESSORS_KEY, processors); return pipeline; } - - // TODO: remove this method when Java time formats are the default - static List jodaBwcJavaTimestampFormatsForIngestPipeline(List javaTimestampFormats) { - return javaTimestampFormats.stream().map(format -> { - switch (format) { - case "ISO8601": - case "UNIX_MS": - case "UNIX": - case "TAI64N": - return format; - default: - return "8" + format; - } - }).collect(Collectors.toList()); - } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java index 07dba7dcb2c64..c19a93a7be99e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java @@ -472,8 +472,7 @@ public Map getEsDateMappingTypeWithFormat() { case "UNIX": return Stream.of("epoch_second"); default: - // TODO: remove the "8" prefix when Java time formats are the default - return Stream.of("8" + format); + return Stream.of(format); } }).collect(Collectors.joining("||")); if (formats.isEmpty() == false) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java index 8140d2fa6034f..264521e68fb51 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java @@ -331,8 +331,7 @@ public void testGuessMappingsAndCalculateFieldStats() { assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("foo")); Map expectedTimeMapping = new HashMap<>(); expectedTimeMapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); - // TODO: remove the "8" prefix when Java time formats are the default - expectedTimeMapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, "8" + "yyyy-MM-dd HH:mm:ss,SSS"); + expectedTimeMapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, "yyyy-MM-dd HH:mm:ss,SSS"); assertEquals(expectedTimeMapping, mappings.get("time")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("bar")); assertNull(mappings.get("nothing")); @@ -372,8 +371,7 @@ public void testMakeIngestPipelineDefinitionGivenStructuredWithTimestamp() { assertNotNull(dateProcessor); assertEquals(timestampField, dateProcessor.get("field")); assertEquals(needClientTimezone, dateProcessor.containsKey("timezone")); - // TODO: remove the call to jodaBwcJavaTimestampFormatsForIngestPipeline() when Java time formats are the default - assertEquals(FileStructureUtils.jodaBwcJavaTimestampFormatsForIngestPipeline(timestampFormats), dateProcessor.get("formats")); + assertEquals(timestampFormats, dateProcessor.get("formats")); // After removing the two expected fields there should be nothing left in the pipeline assertEquals(Collections.emptyMap(), pipeline); @@ -406,8 +404,7 @@ public void testMakeIngestPipelineDefinitionGivenSemiStructured() { assertNotNull(dateProcessor); assertEquals(timestampField, dateProcessor.get("field")); assertEquals(needClientTimezone, dateProcessor.containsKey("timezone")); - // TODO: remove the call to jodaBwcJavaTimestampFormatsForIngestPipeline() when Java time formats are the default - assertEquals(FileStructureUtils.jodaBwcJavaTimestampFormatsForIngestPipeline(timestampFormats), dateProcessor.get("formats")); + assertEquals(timestampFormats, dateProcessor.get("formats")); Map removeProcessor = (Map) processors.get(2).get("remove"); assertNotNull(removeProcessor); From 603cdf40f182ac876414ae2b910f3ddca90ab667 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Fri, 1 Feb 2019 10:41:41 -0600 Subject: [PATCH 38/54] Update geo_shape docs to include unsupported features (#38138) There are a two major features that are not yet supported by BKD Backed geo_shape: MultiPoint queries, and CONTAINS relation. It is important we are explicitly clear in the documentation that using the new approach may not work for users that depend on these features. This commit adds an IMPORTANT NOTE section to geo_shape docs that explicitly highlights these missing features and what should be done if they are an absolute necessity. --- docs/reference/mapping/types/geo-shape.asciidoc | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index a740b8c3b41a0..a46b8a3f8a87c 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -21,7 +21,7 @@ type. |======================================================================= |Option |Description| Default -|`tree |deprecated[6.6, PrefixTrees no longer used] Name of the PrefixTree +|`tree` |deprecated[6.6, PrefixTrees no longer used] Name of the PrefixTree implementation to be used: `geohash` for GeohashPrefixTree and `quadtree` for QuadPrefixTree. Note: This parameter is only relevant for `term` and `recursive` strategies. @@ -127,6 +127,20 @@ the `tree` or `strategy` parameters according to the appropriate <>. Note that these parameters are now deprecated and will be removed in a future version. +*IMPORTANT NOTES* + +The following features are not yet supported with the new indexing approach: + +* `geo_shape` query with `MultiPoint` geometry types - Elasticsearch currently prevents searching + geo_shape fields with a MultiPoint geometry type to avoid a brute force linear search + over each individual point. For now, if this is absolutely needed, this can be achieved + using a `bool` query with each individual point. + +* `CONTAINS` relation query - when using the new default vector indexing strategy, `geo_shape` + queries with `relation` defined as `contains` are not yet supported. If this query relation + is an absolute necessity, it is recommended to set `strategy` to `quadtree` and use the + deprecated PrefixTree strategy indexing approach. + [[prefix-trees]] [float] ==== Prefix trees From cc7c42d7e2e221ceacbb368c027aae60f1b761eb Mon Sep 17 00:00:00 2001 From: Shaunak Kashyap Date: Fri, 1 Feb 2019 08:56:34 -0800 Subject: [PATCH 39/54] Allow built-in monitoring_user role to call GET _xpack API (#38060) This PR adds the `monitor/xpack/info` cluster-level privilege to the built-in `monitoring_user` role. This privilege is required for the Monitoring UI to call the `GET _xpack API` on the Monitoring Cluster. It needs to do this in order to determine the license of the Monitoring Cluster, which further determines whether Cluster Alerts are shown to the user or not. Resolves #37970. --- .../xpack/core/security/authz/store/ReservedRolesStore.java | 2 +- .../core/security/authz/store/ReservedRolesStoreTests.java | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index 2c30b5fe1affe..9cb25f6a221d0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -53,7 +53,7 @@ private static Map initializeReservedRoles() { null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null)) .put("monitoring_user", new RoleDescriptor("monitoring_user", - new String[] { "cluster:monitor/main" }, + new String[] { "cluster:monitor/main", "cluster:monitor/xpack/info" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices(".monitoring-*").privileges("read", "read_cross_cluster").build() diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 35e2043acd809..f0da0c5775e1f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -47,6 +47,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.DeleteCalendarAction; @@ -405,6 +406,7 @@ public void testMonitoringUserRole() { Role monitoringUserRole = Role.builder(roleDescriptor, null).build(); assertThat(monitoringUserRole.cluster().check(MainAction.NAME, request), is(true)); + assertThat(monitoringUserRole.cluster().check(XPackInfoAction.NAME, request), is(true)); assertThat(monitoringUserRole.cluster().check(ClusterHealthAction.NAME, request), is(false)); assertThat(monitoringUserRole.cluster().check(ClusterStateAction.NAME, request), is(false)); assertThat(monitoringUserRole.cluster().check(ClusterStatsAction.NAME, request), is(false)); From f5f3cb8f4c9f63401f8aec7262ac85797b661cec Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 1 Feb 2019 12:00:43 -0500 Subject: [PATCH 40/54] AwaitsFix PUT mapping with _doc on an index that has types (#38204) Tracked at #38202 --- .../test/indices.put_mapping/20_mix_typeless_typeful.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml index 13cb3321841cf..7c6136d273979 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml @@ -55,8 +55,8 @@ "PUT mapping with _doc on an index that has types": - skip: - version: " - 6.6.99" - reason: include_type_name is only supported as of 6.7 + version: "all" + reason: include_type_name is only supported as of 6.7 # AwaitsFix: https://github.com/elastic/elasticsearch/issues/38202 - do: From 5db305023df7c7d729d4d175fb4a775a0d851ff9 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 1 Feb 2019 11:16:35 -0600 Subject: [PATCH 41/54] ML: Fix error race condition on stop _all datafeeds and close _all jobs (#38113) * ML: Ignore when task is not found for _all * Addressing PR comments * Update TransportStopDatafeedAction.java --- .../xpack/ml/action/TransportCloseJobAction.java | 14 ++++++++++++-- .../ml/action/TransportStopDatafeedAction.java | 14 ++++++++++++-- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java index 1076533660273..1a8aea05c458b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ml.action; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.FailedNodeException; @@ -16,6 +17,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -272,7 +274,12 @@ protected void taskOperation(CloseJobAction.Request request, TransportOpenJobAct threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { - listener.onFailure(e); + if (e instanceof ResourceNotFoundException && Strings.isAllOrWildcard(new String[]{request.getJobId()})) { + jobTask.closeJob("close job (api)"); + listener.onResponse(new CloseJobAction.Response(true)); + } else { + listener.onFailure(e); + } } @Override @@ -332,7 +339,10 @@ public void onResponse(PersistentTasksCustomMetaData.PersistentTask task) { @Override public void onFailure(Exception e) { final int slot = counter.incrementAndGet(); - failures.set(slot - 1, e); + if ((e instanceof ResourceNotFoundException && + Strings.isAllOrWildcard(new String[]{request.getJobId()})) == false) { + failures.set(slot - 1, e); + } if (slot == numberOfJobs) { sendResponseOrFailure(request.getJobId(), listener, failures); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java index 63c47996881c2..636138a855bce 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -187,7 +188,10 @@ public void onResponse(PersistentTasksCustomMetaData.PersistentTask persisten @Override public void onFailure(Exception e) { final int slot = counter.incrementAndGet(); - failures.set(slot - 1, e); + if ((e instanceof ResourceNotFoundException && + Strings.isAllOrWildcard(new String[]{request.getDatafeedId()})) == false) { + failures.set(slot - 1, e); + } if (slot == startedDatafeeds.size()) { sendResponseOrFailure(request.getDatafeedId(), listener, failures); } @@ -215,7 +219,13 @@ protected void taskOperation(StopDatafeedAction.Request request, TransportStartD threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { - listener.onFailure(e); + if ((e instanceof ResourceNotFoundException && + Strings.isAllOrWildcard(new String[]{request.getDatafeedId()}))) { + datafeedTask.stop("stop_datafeed (api)", request.getStopTimeout()); + listener.onResponse(new StopDatafeedAction.Response(true)); + } else { + listener.onFailure(e); + } } @Override From 04dc41b99e8fa7b85e7f5cf8e0655f1459a4c943 Mon Sep 17 00:00:00 2001 From: Andrey Ershov Date: Fri, 1 Feb 2019 18:18:11 +0100 Subject: [PATCH 42/54] Zen2ify RareClusterStateIT (#38184) In Zen 1 there are commit timeout and publish timeout and these settings could be changed on-the-fly. In Zen 2, there is only commit timeout and this setting is static. RareClusterStateIT is actively using these settings and the fact, they are dynamic. This commit adds cancelCommitedPublication method to Coordinator to be used by tests. This method will cancel current committed publication if there is any. When there is BlockClusterStateProcessing on the non-master node, the publication will be accepted and committed, but not yet applied. So we can use the method above to cancel it. Also, this commit replaces callback + AtomicReference with ActionFuture, which makes test code easier to read. --- .../cluster/coordination/Coordinator.java | 15 ++ .../coordination}/RareClusterStateIT.java | 151 ++++++------------ 2 files changed, 66 insertions(+), 100 deletions(-) rename server/src/test/java/org/elasticsearch/{indices/state => cluster/coordination}/RareClusterStateIT.java (73%) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index d73d33a0635c0..231f5555e8ff1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -1131,6 +1131,21 @@ public Iterable getFoundPeers() { return peerFinder.getFoundPeers(); } + /** + * If there is any current committed publication, this method cancels it. + * This method is used exclusively by tests. + * @return true if publication was cancelled, false if there is no current committed publication. + */ + boolean cancelCommittedPublication() { + synchronized (mutex) { + if (currentPublication.isPresent() && currentPublication.get().isCommitted()) { + currentPublication.get().cancel("cancelCommittedPublication"); + return true; + } + return false; + } + } + class CoordinatorPublication extends Publication { private final PublishRequest publishRequest; diff --git a/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java similarity index 73% rename from server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java rename to server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java index d2f65d1168da8..49b4086372d21 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java @@ -17,12 +17,14 @@ * under the License. */ -package org.elasticsearch.indices.state; +package org.elasticsearch.cluster.coordination; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterState; @@ -40,7 +42,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.Discovery; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; @@ -51,10 +53,9 @@ import org.elasticsearch.test.disruption.BlockClusterStateProcessing; import org.elasticsearch.test.junit.annotations.TestLogging; -import java.util.Arrays; import java.util.List; import java.util.Map; -import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -86,7 +87,7 @@ protected int numberOfReplicas() { return 0; } - public void testAssignmentWithJustAddedNodes() throws Exception { + public void testAssignmentWithJustAddedNodes() { internalCluster().startNode(); final String index = "index"; prepareCreate(index).setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) @@ -149,22 +150,20 @@ public void onFailure(String source, Exception e) { }); } + private ActionFuture executeAndCancelCommittedPublication( + ActionRequestBuilder req) throws Exception { + ActionFuture future = req.execute(); + assertBusy(() -> assertTrue(((Coordinator)internalCluster().getMasterNodeInstance(Discovery.class)).cancelCommittedPublication())); + return future; + } + public void testDeleteCreateInOneBulk() throws Exception { - internalCluster().startMasterOnlyNode(Settings.builder() - .put(TestZenDiscovery.USE_ZEN2.getKey(), false) // TODO: convert test to support Zen2 - .build()); - String dataNode = internalCluster().startDataOnlyNode(Settings.builder() - .put(TestZenDiscovery.USE_ZEN2.getKey(), false) // TODO: convert test to support Zen2 - .build()); + internalCluster().startMasterOnlyNode(); + String dataNode = internalCluster().startDataOnlyNode(); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)).addMapping("type").get(); ensureGreen("test"); - // now that the cluster is stable, remove publishing timeout - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0") - .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s"))); - // block none master node. BlockClusterStateProcessing disruption = new BlockClusterStateProcessing(dataNode, random()); internalCluster().setDisruptionScheme(disruption); @@ -173,10 +172,14 @@ public void testDeleteCreateInOneBulk() throws Exception { refresh(); disruption.startDisrupting(); logger.info("--> delete index and recreate it"); - assertFalse(client().admin().indices().prepareDelete("test").setTimeout("200ms").get().isAcknowledged()); - assertFalse(prepareCreate("test").setTimeout("200ms").setSettings(Settings.builder().put(IndexMetaData - .SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), "0")).get().isAcknowledged()); + executeAndCancelCommittedPublication(client().admin().indices().prepareDelete("test").setTimeout("0s")) + .get(10, TimeUnit.SECONDS); + executeAndCancelCommittedPublication(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData + .SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), "0")).setTimeout("0s")) + .get(10, TimeUnit.SECONDS); + logger.info("--> letting cluster proceed"); + disruption.stopDisrupting(); ensureGreen(TimeValue.timeValueMinutes(30), "test"); // due to publish_timeout of 0, wait for data node to have cluster state fully applied @@ -196,12 +199,7 @@ public void testDelayedMappingPropagationOnPrimary() throws Exception { // but the change might not be on the node that performed the indexing // operation yet - Settings settings = Settings.builder() - .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // explicitly set so it won't default to publish timeout - .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait post commit as we are blocking things by design - .put(TestZenDiscovery.USE_ZEN2.getKey(), false) // TODO: convert test to support Zen2 - .build(); - final List nodeNames = internalCluster().startNodes(2, settings); + final List nodeNames = internalCluster().startNodes(2); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); final String master = internalCluster().getMasterName(); @@ -242,19 +240,10 @@ public void testDelayedMappingPropagationOnPrimary() throws Exception { disruption.startDisrupting(); // Add a new mapping... - final AtomicReference putMappingResponse = new AtomicReference<>(); - client().admin().indices().preparePutMapping("index").setType("type").setSource("field", "type=long").execute( - new ActionListener() { - @Override - public void onResponse(AcknowledgedResponse response) { - putMappingResponse.set(response); - } + ActionFuture putMappingResponse = + executeAndCancelCommittedPublication(client().admin().indices().preparePutMapping("index") + .setType("type").setSource("field", "type=long")); - @Override - public void onFailure(Exception e) { - putMappingResponse.set(e); - } - }); // ...and wait for mappings to be available on master assertBusy(() -> { ImmutableOpenMap indexMappings = client().admin().indices() @@ -273,36 +262,24 @@ public void onFailure(Exception e) { assertNotNull(fieldMapping); }); - final AtomicReference docIndexResponse = new AtomicReference<>(); - client().prepareIndex("index", "type", "1").setSource("field", 42).execute(new ActionListener() { - @Override - public void onResponse(IndexResponse response) { - docIndexResponse.set(response); - } - - @Override - public void onFailure(Exception e) { - docIndexResponse.set(e); - } - }); + // this request does not change the cluster state, because mapping is already created, + // we don't await and cancel committed publication + ActionFuture docIndexResponse = + client().prepareIndex("index", "type", "1").setSource("field", 42).execute(); // Wait a bit to make sure that the reason why we did not get a response // is that cluster state processing is blocked and not just that it takes // time to process the indexing request Thread.sleep(100); - assertThat(putMappingResponse.get(), equalTo(null)); - assertThat(docIndexResponse.get(), equalTo(null)); + assertFalse(putMappingResponse.isDone()); + assertFalse(docIndexResponse.isDone()); // Now make sure the indexing request finishes successfully disruption.stopDisrupting(); assertBusy(() -> { - assertThat(putMappingResponse.get(), instanceOf(AcknowledgedResponse.class)); - AcknowledgedResponse resp = (AcknowledgedResponse) putMappingResponse.get(); - assertTrue(resp.isAcknowledged()); - assertThat(docIndexResponse.get(), instanceOf(IndexResponse.class)); - IndexResponse docResp = (IndexResponse) docIndexResponse.get(); - assertEquals(Arrays.toString(docResp.getShardInfo().getFailures()), - 1, docResp.getShardInfo().getTotal()); + assertTrue(putMappingResponse.get(10, TimeUnit.SECONDS).isAcknowledged()); + assertThat(docIndexResponse.get(10, TimeUnit.SECONDS), instanceOf(IndexResponse.class)); + assertEquals(1, docIndexResponse.get(10, TimeUnit.SECONDS).getShardInfo().getTotal()); }); } @@ -312,12 +289,7 @@ public void testDelayedMappingPropagationOnReplica() throws Exception { // Here we want to test that everything goes well if the mappings that // are needed for a document are not available on the replica at the // time of indexing it - final List nodeNames = internalCluster().startNodes(2, - Settings.builder() - .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // explicitly set so it won't default to publish timeout - .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait post commit as we are blocking things by design - .put(TestZenDiscovery.USE_ZEN2.getKey(), false) // TODO: convert test to support Zen2 - .build()); + final List nodeNames = internalCluster().startNodes(2); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); final String master = internalCluster().getMasterName(); @@ -359,19 +331,10 @@ public void testDelayedMappingPropagationOnReplica() throws Exception { BlockClusterStateProcessing disruption = new BlockClusterStateProcessing(otherNode, random()); internalCluster().setDisruptionScheme(disruption); disruption.startDisrupting(); - final AtomicReference putMappingResponse = new AtomicReference<>(); - client().admin().indices().preparePutMapping("index").setType("type").setSource("field", "type=long").execute( - new ActionListener() { - @Override - public void onResponse(AcknowledgedResponse response) { - putMappingResponse.set(response); - } + final ActionFuture putMappingResponse = + executeAndCancelCommittedPublication(client().admin().indices().preparePutMapping("index") + .setType("type").setSource("field", "type=long")); - @Override - public void onFailure(Exception e) { - putMappingResponse.set(e); - } - }); final Index index = resolveIndex("index"); // Wait for mappings to be available on master assertBusy(() -> { @@ -384,25 +347,17 @@ public void onFailure(Exception e) { assertNotNull(mapper.mappers().getMapper("field")); }); - final AtomicReference docIndexResponse = new AtomicReference<>(); - client().prepareIndex("index", "type", "1").setSource("field", 42).execute(new ActionListener() { - @Override - public void onResponse(IndexResponse response) { - docIndexResponse.set(response); - } - - @Override - public void onFailure(Exception e) { - docIndexResponse.set(e); - } - }); + final ActionFuture docIndexResponse = client().prepareIndex("index", "type", "1").setSource("field", 42).execute(); assertBusy(() -> assertTrue(client().prepareGet("index", "type", "1").get().isExists())); // index another document, this time using dynamic mappings. // The ack timeout of 0 on dynamic mapping updates makes it possible for the document to be indexed on the primary, even // if the dynamic mapping update is not applied on the replica yet. - ActionFuture dynamicMappingsFut = client().prepareIndex("index", "type", "2").setSource("field2", 42).execute(); + // this request does not change the cluster state, because the mapping is dynamic, + // we need to await and cancel committed publication + ActionFuture dynamicMappingsFut = + executeAndCancelCommittedPublication(client().prepareIndex("index", "type", "2").setSource("field2", 42)); // ...and wait for second mapping to be available on master assertBusy(() -> { @@ -421,22 +376,18 @@ public void onFailure(Exception e) { // We wait on purpose to make sure that the document is not indexed because the shard operation is stalled // and not just because it takes time to replicate the indexing request to the replica Thread.sleep(100); - assertThat(putMappingResponse.get(), equalTo(null)); - assertThat(docIndexResponse.get(), equalTo(null)); + assertFalse(putMappingResponse.isDone()); + assertFalse(docIndexResponse.isDone()); // Now make sure the indexing request finishes successfully disruption.stopDisrupting(); assertBusy(() -> { - assertThat(putMappingResponse.get(), instanceOf(AcknowledgedResponse.class)); - AcknowledgedResponse resp = (AcknowledgedResponse) putMappingResponse.get(); - assertTrue(resp.isAcknowledged()); - assertThat(docIndexResponse.get(), instanceOf(IndexResponse.class)); - IndexResponse docResp = (IndexResponse) docIndexResponse.get(); - assertEquals(Arrays.toString(docResp.getShardInfo().getFailures()), - 2, docResp.getShardInfo().getTotal()); // both shards should have succeeded + assertTrue(putMappingResponse.get(10, TimeUnit.SECONDS).isAcknowledged()); + assertThat(docIndexResponse.get(10, TimeUnit.SECONDS), instanceOf(IndexResponse.class)); + assertEquals(2, docIndexResponse.get(10, TimeUnit.SECONDS).getShardInfo().getTotal()); // both shards should have succeeded }); - assertThat(dynamicMappingsFut.get().getResult(), equalTo(CREATED)); + assertThat(dynamicMappingsFut.get(10, TimeUnit.SECONDS).getResult(), equalTo(CREATED)); } } From 70235838d1db84295c64eca1651c10fef833aa50 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 1 Feb 2019 12:50:07 -0500 Subject: [PATCH 43/54] AwaitsFix testClientSucceedsWithVerificationDisabled (#38213) Tracked at #38212 --- .../elasticsearch/index/reindex/ReindexRestClientSslTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRestClientSslTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRestClientSslTests.java index f71d124986699..87ab4b3241410 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRestClientSslTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRestClientSslTests.java @@ -143,6 +143,7 @@ public void testClientSucceedsWithCertificateAuthorities() throws IOException { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/38212") public void testClientSucceedsWithVerificationDisabled() throws IOException { assertFalse("Cannot disable verification in FIPS JVM", inFipsJvm()); final List threads = new ArrayList<>(); From ee57420de6e25f83d412d8d4cf204eea0012f952 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Fri, 1 Feb 2019 19:23:13 +0100 Subject: [PATCH 44/54] Adjust SearchRequest version checks (#38181) The finalReduce flag is now supported on 6.x too, hence we need to update the version checks in master. --- .../elasticsearch/action/search/SearchRequest.java | 12 +++--------- .../action/search/SearchRequestTests.java | 6 +----- .../search/TransportSearchActionSingleNodeTests.java | 1 + 3 files changed, 5 insertions(+), 14 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 602a7123d0014..64627ee4977ea 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -205,17 +205,14 @@ public SearchRequest(StreamInput in) throws IOException { localClusterAlias = in.readOptionalString(); if (localClusterAlias != null) { absoluteStartMillis = in.readVLong(); + finalReduce = in.readBoolean(); } else { absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; + finalReduce = true; } } else { localClusterAlias = null; absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; - } - //TODO move to the 6_7_0 branch once backported to 6.x - if (in.getVersion().onOrAfter(Version.V_7_0_0)) { - finalReduce = in.readBoolean(); - } else { finalReduce = true; } if (in.getVersion().onOrAfter(Version.V_7_0_0)) { @@ -245,12 +242,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(localClusterAlias); if (localClusterAlias != null) { out.writeVLong(absoluteStartMillis); + out.writeBoolean(finalReduce); } } - //TODO move to the 6_7_0 branch once backported to 6.x - if (out.getVersion().onOrAfter(Version.V_7_0_0)) { - out.writeBoolean(finalReduce); - } if (out.getVersion().onOrAfter(Version.V_7_0_0)) { out.writeBoolean(ccsMinimizeRoundtrips); } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index c139b75f45c42..df9725ce89bff 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -91,14 +91,10 @@ public void testRandomVersionSerialization() throws IOException { if (version.before(Version.V_6_7_0)) { assertNull(deserializedRequest.getLocalClusterAlias()); assertAbsoluteStartMillisIsCurrentTime(deserializedRequest); + assertTrue(deserializedRequest.isFinalReduce()); } else { assertEquals(searchRequest.getLocalClusterAlias(), deserializedRequest.getLocalClusterAlias()); assertEquals(searchRequest.getOrCreateAbsoluteStartMillis(), deserializedRequest.getOrCreateAbsoluteStartMillis()); - } - //TODO move to the 6_7_0 branch once backported to 6.x - if (version.before(Version.V_7_0_0)) { - assertTrue(deserializedRequest.isFinalReduce()); - } else { assertEquals(searchRequest.isFinalReduce(), deserializedRequest.isFinalReduce()); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java index ed14d11946f75..b0980481d38e2 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java @@ -171,6 +171,7 @@ public void testFinalReduce() { assertEquals(2, searchResponse.getHits().getTotalHits().value); Aggregations aggregations = searchResponse.getAggregations(); LongTerms longTerms = aggregations.get("terms"); + assertEquals(2, longTerms.getBuckets().size()); } } } From f64b20383ed7f86c400a63be3e23145ae4440843 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 1 Feb 2019 13:31:17 -0500 Subject: [PATCH 45/54] Replace awaitBusy with assertBusy in atLeastDocsIndexed (#38190) Unlike assertBusy, awaitBusy does not retry if the code-block throws an AssertionError. A refresh in atLeastDocsIndexed can fail because we call this method while we are closing some node in FollowerFailOverIT. --- .../java/org/elasticsearch/xpack/CcrIntegTestCase.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index 2dccc0e96b7a2..3a13027cb3511 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -487,14 +487,15 @@ private Map> getDocIdAndSeqNos(InternalTestClus return docs; } - protected void atLeastDocsIndexed(Client client, String index, long numDocsReplicated) throws InterruptedException { + protected void atLeastDocsIndexed(Client client, String index, long numDocsReplicated) throws Exception { logger.info("waiting for at least [{}] documents to be indexed into index [{}]", numDocsReplicated, index); - awaitBusy(() -> { + assertBusy(() -> { refresh(client, index); SearchRequest request = new SearchRequest(index); request.source(new SearchSourceBuilder().size(0)); SearchResponse response = client.search(request).actionGet(); - return response.getHits().getTotalHits().value >= numDocsReplicated; + assertNotNull(response.getHits().getTotalHits()); + assertThat(response.getHits().getTotalHits().value, greaterThanOrEqualTo(numDocsReplicated)); }, 60, TimeUnit.SECONDS); } From a70f54fc775284a7595360401158605c4590678b Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 1 Feb 2019 12:45:28 -0600 Subject: [PATCH 46/54] Adding ml_settings entry to HLRC and Docs for deprecation_info (#38118) --- .../migration/DeprecationInfoResponse.java | 20 ++++++++++++--- .../org/elasticsearch/client/MigrationIT.java | 1 + .../MigrationClientDocumentationIT.java | 4 +++ .../DeprecationInfoResponseTests.java | 25 ++++++++++++++----- .../migration/get-deprecation-info.asciidoc | 1 + .../migration/apis/deprecation.asciidoc | 6 +++-- 6 files changed, 45 insertions(+), 12 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/DeprecationInfoResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/DeprecationInfoResponse.java index 05e4be37edcfa..6d2cf4507731f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/DeprecationInfoResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/DeprecationInfoResponse.java @@ -37,16 +37,19 @@ public class DeprecationInfoResponse { private static final ParseField CLUSTER_SETTINGS = new ParseField("cluster_settings"); private static final ParseField NODE_SETTINGS = new ParseField("node_settings"); private static final ParseField INDEX_SETTINGS = new ParseField("index_settings"); + private static final ParseField ML_SETTINGS = new ParseField("ml_settings"); private final List clusterSettingsIssues; private final List nodeSettingsIssues; private final Map> indexSettingsIssues; + private final List mlSettingsIssues; public DeprecationInfoResponse(List clusterSettingsIssues, List nodeSettingsIssues, - Map> indexSettingsIssues) { + Map> indexSettingsIssues, List mlSettingsIssues) { this.clusterSettingsIssues = Objects.requireNonNull(clusterSettingsIssues, "cluster settings issues cannot be null"); this.nodeSettingsIssues = Objects.requireNonNull(nodeSettingsIssues, "node settings issues cannot be null"); this.indexSettingsIssues = Objects.requireNonNull(indexSettingsIssues, "index settings issues cannot be null"); + this.mlSettingsIssues = Objects.requireNonNull(mlSettingsIssues, "ml settings issues cannot be null"); } public List getClusterSettingsIssues() { @@ -61,6 +64,10 @@ public Map> getIndexSettingsIssues() { return indexSettingsIssues; } + public List getMlSettingsIssues() { + return mlSettingsIssues; + } + private static List parseDeprecationIssues(XContentParser parser) throws IOException { List issues = new ArrayList<>(); XContentParser.Token token = null; @@ -76,6 +83,7 @@ public static DeprecationInfoResponse fromXContent(XContentParser parser) throws Map> indexSettings = new HashMap<>(); List clusterSettings = new ArrayList<>(); List nodeSettings = new ArrayList<>(); + List mlSettings = new ArrayList<>(); String fieldName = null; XContentParser.Token token; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -85,6 +93,8 @@ public static DeprecationInfoResponse fromXContent(XContentParser parser) throws clusterSettings.addAll(parseDeprecationIssues(parser)); } else if (NODE_SETTINGS.getPreferredName().equals(fieldName)) { nodeSettings.addAll(parseDeprecationIssues(parser)); + } else if (ML_SETTINGS.getPreferredName().equals(fieldName)) { + mlSettings.addAll(parseDeprecationIssues(parser)); } else if (INDEX_SETTINGS.getPreferredName().equals(fieldName)) { // parse out the key/value pairs while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -96,7 +106,7 @@ public static DeprecationInfoResponse fromXContent(XContentParser parser) throws } } } - return new DeprecationInfoResponse(clusterSettings, nodeSettings, indexSettings); + return new DeprecationInfoResponse(clusterSettings, nodeSettings, indexSettings, mlSettings); } @Override @@ -106,17 +116,19 @@ public boolean equals(Object o) { DeprecationInfoResponse that = (DeprecationInfoResponse) o; return Objects.equals(clusterSettingsIssues, that.clusterSettingsIssues) && Objects.equals(nodeSettingsIssues, that.nodeSettingsIssues) && + Objects.equals(mlSettingsIssues, that.mlSettingsIssues) && Objects.equals(indexSettingsIssues, that.indexSettingsIssues); } @Override public int hashCode() { - return Objects.hash(clusterSettingsIssues, nodeSettingsIssues, indexSettingsIssues); + return Objects.hash(clusterSettingsIssues, nodeSettingsIssues, indexSettingsIssues, mlSettingsIssues); } @Override public String toString() { - return clusterSettingsIssues.toString() + ":" + nodeSettingsIssues.toString() + ":" + indexSettingsIssues.toString(); + return clusterSettingsIssues.toString() + ":" + nodeSettingsIssues.toString() + ":" + indexSettingsIssues.toString() + + ":" + mlSettingsIssues.toString(); } /** diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java index 09e3e6f1c848f..350659ae1449a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java @@ -85,6 +85,7 @@ public void testGetDeprecationInfo() throws IOException { assertThat(response.getClusterSettingsIssues().size(), equalTo(0)); assertThat(response.getIndexSettingsIssues().size(), equalTo(0)); assertThat(response.getNodeSettingsIssues().size(), equalTo(0)); + assertThat(response.getMlSettingsIssues().size(), equalTo(0)); } /** diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationClientDocumentationIT.java index 7b1e92c337e17..ca5e18c376b92 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationClientDocumentationIT.java @@ -182,6 +182,8 @@ public void testGetDeprecationInfo() throws IOException, InterruptedException { deprecationInfoResponse.getNodeSettingsIssues(); // <2> Map> indexIssues = deprecationInfoResponse.getIndexSettingsIssues(); // <3> + List mlIssues = + deprecationInfoResponse.getMlSettingsIssues(); // <4> // end::get-deprecation-info-response // tag::get-deprecation-info-execute-listener @@ -195,6 +197,8 @@ public void onResponse(DeprecationInfoResponse deprecationInfoResponse1) { // <1 deprecationInfoResponse.getNodeSettingsIssues(); Map> indexIssues = deprecationInfoResponse.getIndexSettingsIssues(); + List mlIssues = + deprecationInfoResponse.getMlSettingsIssues(); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/DeprecationInfoResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/DeprecationInfoResponseTests.java index 054b800d6483c..052066e810634 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/DeprecationInfoResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/DeprecationInfoResponseTests.java @@ -65,6 +65,12 @@ private void toXContent(DeprecationInfoResponse response, XContentBuilder builde } } builder.endObject(); + + builder.startArray("ml_settings"); + for (DeprecationInfoResponse.DeprecationIssue issue : response.getMlSettingsIssues()) { + toXContent(issue, builder); + } + builder.endArray(); } builder.endObject(); } @@ -105,12 +111,14 @@ private List createRandomIssues(boolea } private DeprecationInfoResponse createInstance() { - return new DeprecationInfoResponse(createRandomIssues(true), createRandomIssues(true), createIndexSettingsIssues()); + return new DeprecationInfoResponse(createRandomIssues(true), createRandomIssues(true), createIndexSettingsIssues(), + createRandomIssues(true)); } private DeprecationInfoResponse copyInstance(DeprecationInfoResponse req) { return new DeprecationInfoResponse(new ArrayList<>(req.getClusterSettingsIssues()), - new ArrayList<>(req.getNodeSettingsIssues()), new HashMap<>(req.getIndexSettingsIssues())); + new ArrayList<>(req.getNodeSettingsIssues()), new HashMap<>(req.getIndexSettingsIssues()), + new ArrayList<>(req.getMlSettingsIssues())); } private DeprecationInfoResponse mutateInstance(DeprecationInfoResponse req) { @@ -128,16 +136,21 @@ public void testFromXContent() throws IOException { } public void testNullFailedIndices() { - NullPointerException exception = - expectThrows(NullPointerException.class, () -> new DeprecationInfoResponse(null, null, null)); + NullPointerException exception = expectThrows(NullPointerException.class, + () -> new DeprecationInfoResponse(null, null, null, null)); assertEquals("cluster settings issues cannot be null", exception.getMessage()); - exception = expectThrows(NullPointerException.class, () -> new DeprecationInfoResponse(Collections.emptyList(), null, null)); + exception = expectThrows(NullPointerException.class, + () -> new DeprecationInfoResponse(Collections.emptyList(), null, null, null)); assertEquals("node settings issues cannot be null", exception.getMessage()); exception = expectThrows(NullPointerException.class, - () -> new DeprecationInfoResponse(Collections.emptyList(), Collections.emptyList(), null)); + () -> new DeprecationInfoResponse(Collections.emptyList(), Collections.emptyList(), null, null)); assertEquals("index settings issues cannot be null", exception.getMessage()); + + exception = expectThrows(NullPointerException.class, + () -> new DeprecationInfoResponse(Collections.emptyList(), Collections.emptyList(), Collections.emptyMap(), null)); + assertEquals("ml settings issues cannot be null", exception.getMessage()); } public void testEqualsAndHashCode() { diff --git a/docs/java-rest/high-level/migration/get-deprecation-info.asciidoc b/docs/java-rest/high-level/migration/get-deprecation-info.asciidoc index f6e12e5746717..3cda1c2f503d6 100644 --- a/docs/java-rest/high-level/migration/get-deprecation-info.asciidoc +++ b/docs/java-rest/high-level/migration/get-deprecation-info.asciidoc @@ -33,3 +33,4 @@ include-tagged::{doc-tests-file}[{api}-response] <1> a List of Cluster deprecations <2> a List of Node deprecations <3> a Map of key IndexName, value List of deprecations for the index +<4> a list of Machine Learning related deprecations diff --git a/docs/reference/migration/apis/deprecation.asciidoc b/docs/reference/migration/apis/deprecation.asciidoc index 59ba1e97a2e9e..88de3f5d6e3fa 100644 --- a/docs/reference/migration/apis/deprecation.asciidoc +++ b/docs/reference/migration/apis/deprecation.asciidoc @@ -68,7 +68,8 @@ Example response: "details" : "This index is named [logs:apache], which contains the illegal character ':'." } ] - } + }, + "ml_settings" : [ ] } -------------------------------------------------- // NOTCONSOLE @@ -109,7 +110,8 @@ key. Similarly, any node-level warnings are found under `node_settings`. Since only a select subset of your nodes might incorporate these settings, it is important to read the `details` section for more information about which nodes are affected. Index warnings are sectioned off per index and can be filtered -using an index-pattern in the query. +using an index-pattern in the query. Machine Learning related deprecation +warnings can be found under the `ml_settings` key. The following example request shows only index-level deprecations of all `logstash-*` indices: From 9350da98a76a15304b45e11ba97ae34ccadc74e4 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Fri, 1 Feb 2019 20:28:08 +0100 Subject: [PATCH 47/54] Disable bwc preparing to backport of#37977, #37857 and #37872 (#38126) --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index b163c9492f247..42a4a42002222 100644 --- a/build.gradle +++ b/build.gradle @@ -160,7 +160,7 @@ task verifyVersions { * after the backport of the backcompat code is complete. */ final boolean bwc_tests_enabled = false -final String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/38180" /* place a PR link here when committing bwc changes */ +final String bwc_tests_disabled_issue = "backport of#37977, #37857 and #37872" /* place a PR link here when committing bwc changes */ if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") From c2e9d13ebd63d9c45135b09583e5aa8555ec2672 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Fri, 1 Feb 2019 11:44:13 -0800 Subject: [PATCH 48/54] Default include_type_name to false in the yml test harness. (#38058) This PR removes the temporary change we made to the yml test harness in #37285 to automatically set `include_type_name` to `true` in index creation requests if it's not already specified. This is possible now that the vast majority of index creation requests were updated to be typeless in #37611. A few additional tests also needed updating here. Additionally, this PR updates the test harness to set `include_type_name` to `false` in index creation requests when communicating with 6.x nodes. This mirrors the logic added in #37611 to allow for typeless document write requests in test set-up code. With this update in place, we can remove many references to `include_type_name: false` from the yml tests. --- .../test/stats/20_empty_bucket.yml | 1 - .../test/stats/30_single_value_field.yml | 1 - .../test/stats/40_multi_value_field.yml | 1 - .../test/indices.analyze/10_analyze.yml | 1 - .../indices/validate_query/10_synonyms.yml | 1 - .../test/search.query/10_match.yml | 1 - .../test/search.query/20_ngram_search.yml | 2 - .../search.query/30_ngram_highligthing.yml | 1 - .../test/search.query/40_query_string.yml | 1 - .../search.query/50_queries_with_synonyms.yml | 2 - .../test/search.query/60_synonym_graph.yml | 1 - .../test/search.suggest/20_phrase.yml | 1 - .../test/search.suggest/30_synonyms.yml | 1 - .../test/termvectors/10_payloads.yml | 1 - .../test/ingest_geoip/20_geoip_processor.yml | 2 - .../test/lang_expression/20_search.yml | 1 - .../test/lang_mustache/60_typed_keys.yml | 1 - .../test/painless/20_scriptfield.yml | 1 - .../test/painless/50_script_doc_values.yml | 1 - .../painless/60_script_doc_values_binary.yml | 1 - .../painless/70_execute_painless_scripts.yml | 1 - .../test/painless/70_mov_fn_agg.yml | 1 - .../test/painless/80_script_score.yml | 2 - .../painless/90_interval_query_filter.yml | 1 - .../test/dense-vector/10_indexing.yml | 1 - .../test/rank_feature/10_basic.yml | 1 - .../test/rank_features/10_basic.yml | 1 - .../test/scaled_float/10_basic.yml | 1 - .../test/sparse-vector/10_indexing.yml | 1 - .../rest-api-spec/test/11_parent_child.yml | 1 - .../rest-api-spec/test/20_parent_join.yml | 1 - .../resources/rest-api-spec/test/10_basic.yml | 1 - .../test/reindex/20_validation.yml | 7 +- .../test/reindex/95_parent_join.yml | 10 +- .../test/update_by_query/30_new_fields.yml | 8 +- .../test/analysis_icu/20_search.yml | 1 - .../test/analysis_nori/20_search.yml | 9 +- .../test/analysis_nori/20_search.yml | 9 +- .../test/analysis_phonetic/40_search.yml | 1 - .../test/analysis_smartcn/20_search.yml | 1 - .../test/analysis_stempel/20_search.yml | 1 - .../test/analysis_ukrainian/20_search.yml | 1 - .../test/mapper_annotatedtext/10_basic.yml | 1 - .../test/mapper_murmur3/10_basic.yml | 1 - .../test/mapper_size/10_basic.yml | 1 - .../test/multi_cluster/10_basic.yml | 10 +- .../test/multi_cluster/30_field_caps.yml | 37 ++++--- .../test/multi_cluster/70_skip_shards.yml | 11 +-- .../test/multi_cluster/80_index_name_agg.yml | 2 +- .../test/remote_cluster/10_basic.yml | 97 +++++++++--------- .../test/mixed_cluster/10_basic.yml | 1 - .../test/old_cluster/10_basic.yml | 53 +++++----- .../test/upgraded_cluster/10_basic.yml | 2 +- .../60_pipeline_timestamp_date_mapping.yml | 3 +- .../test/bulk/70_mix_typeless_typeful.yml | 1 + .../test/cat.aliases/10_basic.yml | 1 - .../test/cat.fielddata/10_basic.yml | 1 - .../test/count/20_query_string.yml | 1 - .../rest-api-spec/test/create/70_nested.yml | 1 - .../test/create/71_nested_with_types.yml | 1 + .../test/delete/11_shard_header.yml | 1 - .../rest-api-spec/test/delete/30_routing.yml | 1 - .../rest-api-spec/test/delete/50_refresh.yml | 1 - .../test/delete/70_mix_typeless_typeful.yml | 1 + .../test/explain/30_query_string.yml | 1 - .../explain/31_query_string_with_types.yml | 1 + .../test/explain/40_mix_typeless_typeful.yml | 1 + .../test/field_caps/10_basic.yml | 3 - .../test/get/100_mix_typeless_typeful.yml | 1 + .../test/get/20_stored_fields.yml | 1 - .../test/get/21_stored_fields_with_types.yml | 1 + .../rest-api-spec/test/get/40_routing.yml | 1 - .../test/get/60_realtime_refresh.yml | 1 - .../test/get/70_source_filtering.yml | 1 - .../get/71_source_filtering_with_types.yml | 1 + .../test/get_source/85_source_missing.yml | 1 - .../86_source_missing_with_types.yml | 1 + .../rest-api-spec/test/index/40_routing.yml | 1 - .../rest-api-spec/test/index/60_refresh.yml | 1 - .../test/index/70_mix_typeless_typeful.yml | 2 + .../test/indices.analyze/10_analyze.yml | 1 - .../test/indices.create/10_basic.yml | 7 -- .../20_mix_typeless_typeful.yml | 1 - .../test/indices.get/10_basic.yml | 1 - .../test/indices.get/11_basic_with_types.yml | 1 + .../indices.get_field_mapping/10_basic.yml | 1 - .../11_basic_with_types.yml | 1 + .../20_missing_field.yml | 1 - .../21_missing_field_with_types.yml | 1 + .../40_missing_index.yml | 1 - .../50_field_wildcards.yml | 2 - .../51_field_wildcards_with_types.yml | 2 + .../60_mix_typeless_typeful.yml | 1 + .../test/indices.get_mapping/10_basic.yml | 1 - .../11_basic_with_types.yml | 2 + .../indices.get_mapping/20_missing_type.yml | 3 + .../indices.get_mapping/30_missing_index.yml | 3 - .../test/indices.get_mapping/40_aliases.yml | 1 - .../50_wildcard_expansion.yml | 11 --- .../70_mix_typeless_typeful.yml | 2 +- .../indices.get_template/20_get_missing.yml | 1 - .../test/indices.put_mapping/10_basic.yml | 2 - .../indices.put_mapping/all_path_options.yml | 3 - .../test/indices.shrink/20_source_mapping.yml | 2 - .../test/indices.sort/10_basic.yml | 1 - .../test/indices.split/20_source_mapping.yml | 2 - .../test/indices.stats/13_fields.yml | 1 - .../20_query_string.yml | 1 - .../test/mget/20_stored_fields.yml | 1 - .../test/mget/23_stored_fields_with_types.yml | 1 + .../rest-api-spec/test/mlt/10_basic.yml | 1 - .../test/msearch/20_typed_keys.yml | 2 - .../test/mtermvectors/10_basic.yml | 1 - .../test/mtermvectors/11_basic_with_types.yml | 1 + .../test/mtermvectors/20_deprecated.yml | 1 - .../mtermvectors/21_deprecated_with_types.yml | 1 + .../mtermvectors/30_mix_typeless_typeful.yml | 1 + .../rest-api-spec/test/range/10_basic.yml | 1 - .../search.aggregation/100_avg_metric.yml | 1 - .../test/search.aggregation/10_histogram.yml | 1 - .../search.aggregation/110_max_metric.yml | 1 - .../search.aggregation/120_min_metric.yml | 1 - .../search.aggregation/130_sum_metric.yml | 1 - .../140_value_count_metric.yml | 1 - .../search.aggregation/150_stats_metric.yml | 1 - .../160_extended_stats_metric.yml | 1 - .../170_cardinality_metric.yml | 1 - .../180_percentiles_tdigest_metric.yml | 1 - .../190_percentiles_hdr_metric.yml | 1 - .../200_top_hits_metric.yml | 1 - .../test/search.aggregation/20_terms.yml | 2 - .../search.aggregation/220_filters_bucket.yml | 1 - .../test/search.aggregation/230_composite.yml | 1 - .../search.aggregation/240_max_buckets.yml | 1 - .../search.aggregation/260_weighted_avg.yml | 1 - .../270_median_absolute_deviation_metric.yml | 1 - .../test/search.aggregation/30_sig_terms.yml | 2 - .../test/search.aggregation/40_range.yml | 1 - .../test/search.aggregation/50_filter.yml | 1 - .../70_adjacency_matrix.yml | 1 - .../test/search.aggregation/80_typed_keys.yml | 1 - .../test/search.aggregation/90_sig_text.yml | 2 - .../test/search.highlight/10_unified.yml | 1 - .../test/search.highlight/20_fvh.yml | 1 - .../30_max_analyzed_offset.yml | 1 - .../test/search.inner_hits/10_basic.yml | 1 - .../test/search/10_source_filtering.yml | 1 - .../test/search/110_field_collapsing.yml | 1 - .../search/115_multiple_field_collapsing.yml | 1 - .../test/search/120_batch_reduce_size.yml | 1 - .../search/140_pre_filter_search_shards.yml | 3 - .../search/150_rewrite_on_coordinator.yml | 13 +-- .../test/search/160_exists_query.yml | 4 - .../test/search/170_terms_query.yml | 1 - .../search/171_terms_query_with_types.yml | 1 + .../search/180_locale_dependent_mapping.yml | 1 - .../test/search/190_index_prefix_search.yml | 1 - .../test/search/200_ignore_malformed.yml | 1 - .../test/search/200_index_phrase_search.yml | 1 - .../test/search/230_interval_query.yml | 1 - .../test/search/60_query_string.yml | 1 - .../test/search_shards/10_basic.yml | 1 - .../test/suggest/20_completion.yml | 1 - .../rest-api-spec/test/suggest/30_context.yml | 8 +- .../test/suggest/40_typed_keys.yml | 1 - .../50_completion_with_multi_fields.yml | 17 ++-- .../test/termvectors/10_basic.yml | 1 - .../test/termvectors/11_basic_with_types.yml | 1 + .../test/termvectors/20_issue7121.yml | 1 - .../termvectors/21_issue7121_with_types.yml | 1 + .../termvectors/50_mix_typeless_typeful.yml | 1 + .../test/update/11_shard_header.yml | 1 - .../rest-api-spec/test/update/40_routing.yml | 1 - .../rest-api-spec/test/update/60_refresh.yml | 1 - .../test/update/85_fields_meta.yml | 1 - .../test/update/90_mix_typeless_typeful.yml | 2 + .../yaml/ClientYamlTestExecutionContext.java | 66 ++++++++----- .../ClientYamlTestExecutionContextTests.java | 8 ++ .../test/ccr/follow_and_unfollow.yml | 7 +- .../rest-api-spec/test/ccr/follow_stats.yml | 7 +- .../index_directly_into_follower_index.yml | 7 +- .../authenticate/10_field_level_security.yml | 15 ++- .../rest-api-spec/test/graph/10_basic.yml | 10 +- .../test/ml/get_datafeed_stats.yml | 14 ++- .../test/ml/ml_anomalies_default_mappings.yml | 3 - .../test/ml/preview_datafeed.yml | 23 ++--- .../test/ml/set_upgrade_mode.yml | 19 ++-- .../test/ml/start_stop_datafeed.yml | 19 ++-- .../test/rollup/get_rollup_caps.yml | 3 - .../test/rollup/get_rollup_index_caps.yml | 3 - .../test/security/authz/10_index_doc.yml | 52 +++++----- .../test/security/authz/11_delete_doc.yml | 86 +++++++--------- .../test/security/authz/12_index_alias.yml | 75 +++++++------- .../test/security/authz/13_index_datemath.yml | 14 ++- .../test/security/authz/20_get_doc.yml | 80 +++++++-------- .../test/security/authz/21_search_doc.yml | 75 +++++++------- .../security/authz/30_dynamic_put_mapping.yml | 2 - .../test/multi_cluster/10_basic.yml | 10 +- .../test/multi_cluster/30_field_caps.yml | 37 ++++--- .../test/multi_cluster/60_skip_shards.yml | 11 +-- .../test/remote_cluster/10_basic.yml | 99 +++++++++---------- .../test/upgraded_cluster/10_basic.yml | 1 - .../upgraded_cluster/40_ml_datafeed_crud.yml | 7 +- 203 files changed, 515 insertions(+), 740 deletions(-) diff --git a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/20_empty_bucket.yml b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/20_empty_bucket.yml index 09896371574ad..3fa6c87869234 100644 --- a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/20_empty_bucket.yml +++ b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/20_empty_bucket.yml @@ -2,7 +2,6 @@ "Empty Bucket Aggregation": - do: indices.create: - include_type_name: false index: empty_bucket_idx body: settings: diff --git a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/30_single_value_field.yml b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/30_single_value_field.yml index 630211feb08e0..77e8bf6359f22 100644 --- a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/30_single_value_field.yml +++ b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/30_single_value_field.yml @@ -3,7 +3,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/40_multi_value_field.yml b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/40_multi_value_field.yml index bc9634d412694..467efce78a467 100644 --- a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/40_multi_value_field.yml +++ b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/40_multi_value_field.yml @@ -3,7 +3,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml index 3ceb26b2bac21..ec00b6d41f1c5 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml @@ -23,7 +23,6 @@ - do: indices.create: - include_type_name: false index: test_deprecated_htmlstrip body: settings: diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices/validate_query/10_synonyms.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices/validate_query/10_synonyms.yml index 7509d9667ef1b..22804be6ab124 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices/validate_query/10_synonyms.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices/validate_query/10_synonyms.yml @@ -2,7 +2,6 @@ "validate query with synonyms": - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/10_match.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/10_match.yml index 78e2507e462b8..6609eb831b226 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/10_match.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/10_match.yml @@ -5,7 +5,6 @@ # versions in the same position. - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/20_ngram_search.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/20_ngram_search.yml index f480045689056..495932016966d 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/20_ngram_search.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/20_ngram_search.yml @@ -1,7 +1,6 @@ "ngram search": - do: indices.create: - include_type_name: false index: test body: settings: @@ -44,7 +43,6 @@ "testNGramCopyField": - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/30_ngram_highligthing.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/30_ngram_highligthing.yml index 943f1d6ae5767..674a6ab438069 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/30_ngram_highligthing.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/30_ngram_highligthing.yml @@ -1,7 +1,6 @@ "ngram highlighting": - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/40_query_string.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/40_query_string.yml index efe5d04012d57..4ba16007664f1 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/40_query_string.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/40_query_string.yml @@ -2,7 +2,6 @@ "Test query string with snowball": - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml index 5a4af7c9a344a..784ffd9dd123a 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml @@ -2,7 +2,6 @@ "Test common terms query with stacked tokens": - do: indices.create: - include_type_name: false index: test body: settings: @@ -220,7 +219,6 @@ "Test match query with synonyms - see #3881 for extensive description of the issue": - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/60_synonym_graph.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/60_synonym_graph.yml index e24dc918d449a..7de297bb4e215 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/60_synonym_graph.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/60_synonym_graph.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.suggest/20_phrase.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.suggest/20_phrase.yml index 403d7dd2830fb..35d5b1aaf190f 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.suggest/20_phrase.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.suggest/20_phrase.yml @@ -3,7 +3,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.suggest/30_synonyms.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.suggest/30_synonyms.yml index eb0bfc84a9223..8b67abb193aa0 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.suggest/30_synonyms.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.suggest/30_synonyms.yml @@ -2,7 +2,6 @@ "suggestions with synonyms": - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/termvectors/10_payloads.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/termvectors/10_payloads.yml index 0e9c5588371a8..cba4370943206 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/termvectors/10_payloads.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/termvectors/10_payloads.yml @@ -3,7 +3,6 @@ # because there are no token filters that support payloads in core. - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/modules/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yml b/modules/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yml index 78368b8703a6d..95c826eee30de 100644 --- a/modules/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yml +++ b/modules/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yml @@ -127,14 +127,12 @@ body: > { "mappings" : { - "test" : { "properties" : { "geoip.location" : { "type": "geo_point" } } } - } } - match: { acknowledged: true } diff --git a/modules/lang-expression/src/test/resources/rest-api-spec/test/lang_expression/20_search.yml b/modules/lang-expression/src/test/resources/rest-api-spec/test/lang_expression/20_search.yml index 197daf5357be0..fd8a2ebf1183b 100644 --- a/modules/lang-expression/src/test/resources/rest-api-spec/test/lang_expression/20_search.yml +++ b/modules/lang-expression/src/test/resources/rest-api-spec/test/lang_expression/20_search.yml @@ -2,7 +2,6 @@ setup: - do: indices.create: - include_type_name: false index: test123 body: mappings: diff --git a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/60_typed_keys.yml b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/60_typed_keys.yml index 9c12aeaa530ea..0f97afbe5ab66 100644 --- a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/60_typed_keys.yml +++ b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/60_typed_keys.yml @@ -1,7 +1,6 @@ setup: - do: indices.put_template: - include_type_name: false name: index_template body: index_patterns: test-* diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/20_scriptfield.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/20_scriptfield.yml index 470ace98dd329..79836021c7ef7 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/20_scriptfield.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/20_scriptfield.yml @@ -3,7 +3,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml index b112408396e53..fa55b47b803dd 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/60_script_doc_values_binary.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/60_script_doc_values_binary.yml index f8ad782665e98..3a549e6c79bf9 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/60_script_doc_values_binary.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/60_script_doc_values_binary.yml @@ -4,7 +4,6 @@ features: ["headers"] - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_execute_painless_scripts.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_execute_painless_scripts.yml index d06cf45dadd3c..5a994425c5dc2 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_execute_painless_scripts.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_execute_painless_scripts.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: my-index body: mappings: diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml index 16432f9e70e7a..c2fb38611a30d 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml @@ -6,7 +6,6 @@ setup: reason: "moving_fn added in 6.4.0" - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml index 24dd8b87d8b8f..c9b73bca029b1 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml @@ -80,7 +80,6 @@ setup: "Random functions": - do: indices.create: - include_type_name: false index: test body: settings: @@ -136,7 +135,6 @@ setup: "Decay geo functions": - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/90_interval_query_filter.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/90_interval_query_filter.yml index 270f5682aacb4..0a6cf993e2a2e 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/90_interval_query_filter.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/90_interval_query_filter.yml @@ -5,7 +5,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/modules/mapper-extras/src/test/resources/rest-api-spec/test/dense-vector/10_indexing.yml b/modules/mapper-extras/src/test/resources/rest-api-spec/test/dense-vector/10_indexing.yml index fd554fb16dbbd..846341cd8ece4 100644 --- a/modules/mapper-extras/src/test/resources/rest-api-spec/test/dense-vector/10_indexing.yml +++ b/modules/mapper-extras/src/test/resources/rest-api-spec/test/dense-vector/10_indexing.yml @@ -5,7 +5,6 @@ setup: - do: indices.create: - include_type_name: false index: test-index body: settings: diff --git a/modules/mapper-extras/src/test/resources/rest-api-spec/test/rank_feature/10_basic.yml b/modules/mapper-extras/src/test/resources/rest-api-spec/test/rank_feature/10_basic.yml index 28cd9a4abc045..8a874d30591f6 100644 --- a/modules/mapper-extras/src/test/resources/rest-api-spec/test/rank_feature/10_basic.yml +++ b/modules/mapper-extras/src/test/resources/rest-api-spec/test/rank_feature/10_basic.yml @@ -5,7 +5,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/modules/mapper-extras/src/test/resources/rest-api-spec/test/rank_features/10_basic.yml b/modules/mapper-extras/src/test/resources/rest-api-spec/test/rank_features/10_basic.yml index 1c1f021d0fd8e..f524bd93bb600 100644 --- a/modules/mapper-extras/src/test/resources/rest-api-spec/test/rank_features/10_basic.yml +++ b/modules/mapper-extras/src/test/resources/rest-api-spec/test/rank_features/10_basic.yml @@ -5,7 +5,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/modules/mapper-extras/src/test/resources/rest-api-spec/test/scaled_float/10_basic.yml b/modules/mapper-extras/src/test/resources/rest-api-spec/test/scaled_float/10_basic.yml index 9badd5ca3018d..0298a1c00ce90 100644 --- a/modules/mapper-extras/src/test/resources/rest-api-spec/test/scaled_float/10_basic.yml +++ b/modules/mapper-extras/src/test/resources/rest-api-spec/test/scaled_float/10_basic.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/modules/mapper-extras/src/test/resources/rest-api-spec/test/sparse-vector/10_indexing.yml b/modules/mapper-extras/src/test/resources/rest-api-spec/test/sparse-vector/10_indexing.yml index ef78a48fab5a1..b3efff318b5e3 100644 --- a/modules/mapper-extras/src/test/resources/rest-api-spec/test/sparse-vector/10_indexing.yml +++ b/modules/mapper-extras/src/test/resources/rest-api-spec/test/sparse-vector/10_indexing.yml @@ -5,7 +5,6 @@ setup: - do: indices.create: - include_type_name: false index: test-index body: settings: diff --git a/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml b/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml index 7d3fb36bafc93..d85f89e768db8 100644 --- a/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml +++ b/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/modules/parent-join/src/test/resources/rest-api-spec/test/20_parent_join.yml b/modules/parent-join/src/test/resources/rest-api-spec/test/20_parent_join.yml index 27af9794843d9..379fdddea0b02 100644 --- a/modules/parent-join/src/test/resources/rest-api-spec/test/20_parent_join.yml +++ b/modules/parent-join/src/test/resources/rest-api-spec/test/20_parent_join.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/modules/percolator/src/test/resources/rest-api-spec/test/10_basic.yml b/modules/percolator/src/test/resources/rest-api-spec/test/10_basic.yml index 8c39ed8278a69..3ed2ed64d782c 100644 --- a/modules/percolator/src/test/resources/rest-api-spec/test/10_basic.yml +++ b/modules/percolator/src/test/resources/rest-api-spec/test/10_basic.yml @@ -2,7 +2,6 @@ "Test percolator basics via rest": - do: indices.create: - include_type_name: false index: queries_index body: mappings: diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yml index f726b1f00bddf..d45b14a1edc5f 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yml @@ -166,9 +166,8 @@ index: test body: mappings: - test: - _source: - enabled: false + _source: + enabled: false - do: index: index: test @@ -178,7 +177,7 @@ indices.refresh: {} - do: - catch: /\[test\]\[test\]\[1\] didn't store _source/ + catch: /\[test\]\[_doc\]\[1\] didn't store _source/ reindex: body: source: diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/95_parent_join.yml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/95_parent_join.yml index ae571d97c105a..b36593b4b962c 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/95_parent_join.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/95_parent_join.yml @@ -4,18 +4,16 @@ setup: index: source body: mappings: - doc: - properties: - join_field: { "type": "join", "relations": { "parent": "child", "child": "grand_child" } } + properties: + join_field: { "type": "join", "relations": { "parent": "child", "child": "grand_child" } } - do: indices.create: index: dest body: mappings: - doc: - properties: - join_field: { "type": "join", "relations": { "parent": "child", "child": "grand_child" } } + properties: + join_field: { "type": "join", "relations": { "parent": "child", "child": "grand_child" } } - do: index: diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/30_new_fields.yml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/30_new_fields.yml index aba57218b8a3b..ba14b34cf0ef5 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/30_new_fields.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/30_new_fields.yml @@ -5,10 +5,9 @@ index: test body: mappings: - place: - properties: - name: - type: keyword + properties: + name: + type: keyword - do: index: index: test @@ -18,7 +17,6 @@ - do: indices.put_mapping: - include_type_name: false index: test body: properties: diff --git a/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/20_search.yml b/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/20_search.yml index aaa3ea7fb042b..90aae30bbb5b5 100644 --- a/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/20_search.yml +++ b/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/20_search.yml @@ -4,7 +4,6 @@ "Index ICU content": - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_nori/20_search.yml b/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_nori/20_search.yml index 3b7fc4eb46293..9a052469c2019 100644 --- a/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_nori/20_search.yml +++ b/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_nori/20_search.yml @@ -7,11 +7,10 @@ index: test body: mappings: - type: - properties: - text: - type: text - analyzer: kuromoji + properties: + text: + type: text + analyzer: kuromoji - do: index: diff --git a/plugins/analysis-nori/src/test/resources/rest-api-spec/test/analysis_nori/20_search.yml b/plugins/analysis-nori/src/test/resources/rest-api-spec/test/analysis_nori/20_search.yml index e31355d299e9a..b7ecd933b6676 100644 --- a/plugins/analysis-nori/src/test/resources/rest-api-spec/test/analysis_nori/20_search.yml +++ b/plugins/analysis-nori/src/test/resources/rest-api-spec/test/analysis_nori/20_search.yml @@ -7,11 +7,10 @@ index: test body: mappings: - type: - properties: - text: - type: text - analyzer: nori + properties: + text: + type: text + analyzer: nori - do: index: diff --git a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/40_search.yml b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/40_search.yml index 80f7b63bb0b7c..2e6ee7ebd102a 100644 --- a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/40_search.yml +++ b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/40_search.yml @@ -4,7 +4,6 @@ "Index phonetic content": - do: indices.create: - include_type_name: false index: phonetic_sample body: settings: diff --git a/plugins/analysis-smartcn/src/test/resources/rest-api-spec/test/analysis_smartcn/20_search.yml b/plugins/analysis-smartcn/src/test/resources/rest-api-spec/test/analysis_smartcn/20_search.yml index 2529e40c0be5e..0a1f6e2af42bf 100644 --- a/plugins/analysis-smartcn/src/test/resources/rest-api-spec/test/analysis_smartcn/20_search.yml +++ b/plugins/analysis-smartcn/src/test/resources/rest-api-spec/test/analysis_smartcn/20_search.yml @@ -4,7 +4,6 @@ "Index Smartcn content": - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/plugins/analysis-stempel/src/test/resources/rest-api-spec/test/analysis_stempel/20_search.yml b/plugins/analysis-stempel/src/test/resources/rest-api-spec/test/analysis_stempel/20_search.yml index dc00c297694db..7276b6744dfb5 100644 --- a/plugins/analysis-stempel/src/test/resources/rest-api-spec/test/analysis_stempel/20_search.yml +++ b/plugins/analysis-stempel/src/test/resources/rest-api-spec/test/analysis_stempel/20_search.yml @@ -4,7 +4,6 @@ "Index Stempel content": - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/plugins/analysis-ukrainian/src/test/resources/rest-api-spec/test/analysis_ukrainian/20_search.yml b/plugins/analysis-ukrainian/src/test/resources/rest-api-spec/test/analysis_ukrainian/20_search.yml index aec452ed3fe99..ba860729ebf23 100644 --- a/plugins/analysis-ukrainian/src/test/resources/rest-api-spec/test/analysis_ukrainian/20_search.yml +++ b/plugins/analysis-ukrainian/src/test/resources/rest-api-spec/test/analysis_ukrainian/20_search.yml @@ -4,7 +4,6 @@ "Index Stempel content": - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/plugins/mapper-annotated-text/src/test/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml b/plugins/mapper-annotated-text/src/test/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml index 0049e60def439..a27e6c44a7fa2 100644 --- a/plugins/mapper-annotated-text/src/test/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml +++ b/plugins/mapper-annotated-text/src/test/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml @@ -9,7 +9,6 @@ - do: indices.create: - include_type_name: false index: annotated body: settings: diff --git a/plugins/mapper-murmur3/src/test/resources/rest-api-spec/test/mapper_murmur3/10_basic.yml b/plugins/mapper-murmur3/src/test/resources/rest-api-spec/test/mapper_murmur3/10_basic.yml index 647fd52f17ef3..34db9d017a854 100644 --- a/plugins/mapper-murmur3/src/test/resources/rest-api-spec/test/mapper_murmur3/10_basic.yml +++ b/plugins/mapper-murmur3/src/test/resources/rest-api-spec/test/mapper_murmur3/10_basic.yml @@ -6,7 +6,6 @@ - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/plugins/mapper-size/src/test/resources/rest-api-spec/test/mapper_size/10_basic.yml b/plugins/mapper-size/src/test/resources/rest-api-spec/test/mapper_size/10_basic.yml index 9a1049f72d0a4..bc33818e10e62 100644 --- a/plugins/mapper-size/src/test/resources/rest-api-spec/test/mapper_size/10_basic.yml +++ b/plugins/mapper-size/src/test/resources/rest-api-spec/test/mapper_size/10_basic.yml @@ -6,7 +6,6 @@ - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml index fa4ca0588940c..b4dd748f8286e 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml @@ -14,15 +14,15 @@ bulk: refresh: true body: - - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"index": {"_index": "test_index"}}' - '{"f1": "local_cluster", "filter_field": 0}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"index": {"_index": "test_index"}}' - '{"f1": "local_cluster", "filter_field": 1}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"index": {"_index": "test_index"}}' - '{"f1": "local_cluster", "filter_field": 0}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"index": {"_index": "test_index"}}' - '{"f1": "local_cluster", "filter_field": 1}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"index": {"_index": "test_index"}}' - '{"f1": "local_cluster", "filter_field": 0}' - do: diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yml index 2144c281e40d0..a0445adacab03 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yml @@ -5,25 +5,24 @@ index: field_caps_index_2 body: mappings: - t: - properties: - text: - type: text - keyword: - type: keyword - number: - type: double - geo: - type: geo_point - object: - type: object - properties: - nested1 : - type : text - index: true - nested2: - type: float - doc_values: true + properties: + text: + type: text + keyword: + type: keyword + number: + type: double + geo: + type: geo_point + object: + type: object + properties: + nested1 : + type : text + index: true + nested2: + type: float + doc_values: true - do: field_caps: diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml index d1a5a273e1d0f..9242664d9f219 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml @@ -10,17 +10,16 @@ number_of_shards: 1 number_of_replicas: 0 mappings: - test_type: - properties: - created_at: - type: date - format: "yyyy-MM-dd" + properties: + created_at: + type: date + format: "yyyy-MM-dd" - do: bulk: refresh: true body: - - '{"index": {"_index": "skip_shards_index", "_type": "test_type"}}' + - '{"index": {"_index": "skip_shards_index"}}' - '{"f1": "local_cluster", "sort_field": 0, "created_at" : "2017-01-01"}' # check that we skip the remote shard diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/80_index_name_agg.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/80_index_name_agg.yml index d2fcd6878f8e6..9c4aa0e7f09c4 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/80_index_name_agg.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/80_index_name_agg.yml @@ -24,7 +24,7 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "single_doc_index", "_type": "test_type"}}' + - '{"index": {"_index": "single_doc_index"}}' - '{"f1": "local_cluster", "sort_field": 0}' - do: diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml index 24dc2532937a2..19b3771acf877 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml @@ -10,17 +10,16 @@ number_of_shards: 1 number_of_replicas: 0 mappings: - test_type: - properties: - created_at: - type: date - format: "yyyy-MM-dd" + properties: + created_at: + type: date + format: "yyyy-MM-dd" - do: bulk: refresh: true body: - - '{"index": {"_index": "single_doc_index", "_type": "test_type"}}' + - '{"index": {"_index": "single_doc_index"}}' - '{"f1": "remote_cluster", "sort_field": 1, "created_at" : "2016-01-01"}' - do: @@ -30,25 +29,24 @@ settings: index.number_of_shards: 1 mappings: - t: - properties: - text: - type: text - keyword: - type: keyword - number: - type: double - geo: - type: geo_point - object: - type: object - properties: - nested1 : - type : text - index: false - nested2: - type: float - doc_values: false + properties: + text: + type: text + keyword: + type: keyword + number: + type: double + geo: + type: geo_point + object: + type: object + properties: + nested1 : + type : text + index: false + nested2: + type: float + doc_values: false - do: indices.create: index: field_caps_index_3 @@ -56,25 +54,24 @@ settings: index.number_of_shards: 1 mappings: - t: - properties: - text: - type: text - keyword: - type: keyword - number: - type: long - geo: - type: keyword - object: - type: object - properties: - nested1 : - type : long - index: false - nested2: - type: keyword - doc_values: false + properties: + text: + type: text + keyword: + type: keyword + number: + type: long + geo: + type: keyword + object: + type: object + properties: + nested1 : + type : long + index: false + nested2: + type: keyword + doc_values: false - do: indices.create: index: test_index @@ -93,17 +90,17 @@ bulk: refresh: true body: - - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"index": {"_index": "test_index"}}' - '{"f1": "remote_cluster", "filter_field": 0}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"index": {"_index": "test_index"}}' - '{"f1": "remote_cluster", "filter_field": 1}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"index": {"_index": "test_index"}}' - '{"f1": "remote_cluster", "filter_field": 0}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"index": {"_index": "test_index"}}' - '{"f1": "remote_cluster", "filter_field": 1}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"index": {"_index": "test_index"}}' - '{"f1": "remote_cluster", "filter_field": 0}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"index": {"_index": "test_index"}}' - '{"f1": "remote_cluster", "filter_field": 0}' - do: diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml index 2605836f8573c..bd07ee8a58469 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml @@ -61,7 +61,6 @@ query: percolate: field: query - type: doc document: field2: value field3: value diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml index 9e06f767d4892..a26a3f8274d99 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml @@ -25,15 +25,15 @@ bulk: refresh: true body: - - '{"index": {"_index": "test_search_template", "_type": "doc"}}' + - '{"index": {"_index": "test_search_template"}}' - '{"f1": "v1_old"}' - - '{"index": {"_index": "test_search_template", "_type": "doc"}}' + - '{"index": {"_index": "test_search_template"}}' - '{"f1": "v2_old"}' - - '{"index": {"_index": "test_search_template", "_type": "doc"}}' + - '{"index": {"_index": "test_search_template"}}' - '{"f1": "v3_old"}' - - '{"index": {"_index": "test_search_template", "_type": "doc"}}' + - '{"index": {"_index": "test_search_template"}}' - '{"f1": "v4_old"}' - - '{"index": {"_index": "test_search_template", "_type": "doc"}}' + - '{"index": {"_index": "test_search_template"}}' - '{"f1": "v5_old"}' - do: @@ -65,21 +65,19 @@ index: queries body: mappings: - doc: - properties: - query: - type: percolator - field1: - type: keyword - field2: - type: keyword - field3: - type: keyword + properties: + query: + type: percolator + field1: + type: keyword + field2: + type: keyword + field3: + type: keyword - do: index: index: queries - type: doc id: q1 body: query: @@ -89,7 +87,6 @@ - do: index: index: queries - type: doc id: q2 body: query: @@ -103,7 +100,6 @@ - do: index: index: queries - type: doc id: q3 body: query: @@ -176,15 +172,15 @@ bulk: refresh: true body: - - '{"index": {"_index": "reindexed_index", "_type": "doc"}}' + - '{"index": {"_index": "reindexed_index"}}' - '{"f1": "1"}' - - '{"index": {"_index": "reindexed_index", "_type": "doc"}}' + - '{"index": {"_index": "reindexed_index"}}' - '{"f1": "2"}' - - '{"index": {"_index": "reindexed_index", "_type": "doc"}}' + - '{"index": {"_index": "reindexed_index"}}' - '{"f1": "3"}' - - '{"index": {"_index": "reindexed_index", "_type": "doc"}}' + - '{"index": {"_index": "reindexed_index"}}' - '{"f1": "4"}' - - '{"index": {"_index": "reindexed_index", "_type": "doc"}}' + - '{"index": {"_index": "reindexed_index"}}' - '{"f1": "5"}' - do: @@ -215,9 +211,8 @@ index: all-index body: mappings: - type: - _all: - enabled: false - properties: - field: - type: text + _all: + enabled: false + properties: + field: + type: text diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml index 508a898e0cdb5..63e67652127e9 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml @@ -27,7 +27,6 @@ - do: index: index: queries - type: doc id: q4 refresh: true body: @@ -134,6 +133,7 @@ - do: indices.get_mapping: + include_type_name: false index: all-index - is_true: all-index.mappings._all diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml index ea0984ef3bcbf..0f514f2213492 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml @@ -9,7 +9,7 @@ index: timetest body: mappings: - test: { "properties": { "my_time": {"type": "date", "format": "strict_date_optional_time_nanos"}}} + "properties": { "my_time": {"type": "date", "format": "strict_date_optional_time_nanos"}} - do: ingest.put_pipeline: @@ -31,7 +31,6 @@ - do: index: index: timetest - type: test id: 1 pipeline: "my_timely_pipeline" body: {} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/70_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/70_mix_typeless_typeful.yml index f9d5c079856a1..cad0891b21e52 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/70_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/70_mix_typeless_typeful.yml @@ -7,6 +7,7 @@ - do: indices.create: # not using include_type_name: false on purpose + include_type_name: true index: index body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yml index 0c64c94ce7f85..bdcee7af1bcb6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yml @@ -54,7 +54,6 @@ - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.fielddata/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.fielddata/10_basic.yml index b1b0e5fac933d..6ef59f30753f7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.fielddata/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.fielddata/10_basic.yml @@ -22,7 +22,6 @@ - do: indices.create: - include_type_name: false index: index body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/count/20_query_string.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/count/20_query_string.yml index 5ef6b7584452a..66b0699a184d2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/count/20_query_string.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/count/20_query_string.yml @@ -2,7 +2,6 @@ "count with query_string parameters": - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/70_nested.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/70_nested.yml index 1d6bd5bd70373..e6d2413f16788 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/70_nested.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/70_nested.yml @@ -5,7 +5,6 @@ setup: reason: types are required in requests before 7.0.0 - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/71_nested_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/71_nested_with_types.yml index 2c912a2165a83..755aaca448b0b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/71_nested_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/71_nested_with_types.yml @@ -2,6 +2,7 @@ setup: - do: indices.create: + include_type_name: true index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/11_shard_header.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/11_shard_header.yml index e3f210966563e..3fc10bc8db12d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/11_shard_header.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/11_shard_header.yml @@ -7,7 +7,6 @@ - do: indices.create: - include_type_name: false index: foobar body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/30_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/30_routing.yml index f1647b8edac85..27e9350caed70 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/30_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/30_routing.yml @@ -7,7 +7,6 @@ - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml index 22d9a6971f4f9..935e0946f100b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml @@ -7,7 +7,6 @@ - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/70_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/70_mix_typeless_typeful.yml index 22df4f5dc437e..e0f20795e41ca 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/70_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/70_mix_typeless_typeful.yml @@ -7,6 +7,7 @@ - do: indices.create: # not using include_type_name: false on purpose + include_type_name: true index: index body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/30_query_string.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/30_query_string.yml index 6a71cba7c4da1..ac34d4c2495f2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/30_query_string.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/30_query_string.yml @@ -6,7 +6,6 @@ - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/31_query_string_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/31_query_string_with_types.yml index ea298f2092342..b6930688acf2d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/31_query_string_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/31_query_string_with_types.yml @@ -2,6 +2,7 @@ "explain with query_string parameters": - do: indices.create: + include_type_name: true index: test body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/40_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/40_mix_typeless_typeful.yml index baefba7c312de..36fdbaa6b6f78 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/40_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/40_mix_typeless_typeful.yml @@ -7,6 +7,7 @@ - do: indices.create: # not using include_type_name: false on purpose + include_type_name: true index: index body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/field_caps/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/field_caps/10_basic.yml index 7fe06f1c56152..f7ed51665003d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/field_caps/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/field_caps/10_basic.yml @@ -2,7 +2,6 @@ setup: - do: indices.create: - include_type_name: false index: test1 body: mappings: @@ -36,7 +35,6 @@ setup: - do: indices.create: - include_type_name: false index: test2 body: mappings: @@ -69,7 +67,6 @@ setup: index: false - do: indices.create: - include_type_name: false index: test3 body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/100_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/100_mix_typeless_typeful.yml index 71907461da3ea..d13229dbffbc6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/100_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/100_mix_typeless_typeful.yml @@ -7,6 +7,7 @@ - do: indices.create: # not using include_type_name: false on purpose + include_type_name: true index: index body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_stored_fields.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_stored_fields.yml index 7dd782652bf99..ab27842e4516e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_stored_fields.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_stored_fields.yml @@ -6,7 +6,6 @@ reason: types are required in requests before 7.0.0 - do: indices.create: - include_type_name: false index: test_1 body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/21_stored_fields_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/21_stored_fields_with_types.yml index fbffb9e0ea8ce..d1862fc0340d8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/21_stored_fields_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/21_stored_fields_with_types.yml @@ -3,6 +3,7 @@ - do: indices.create: + include_type_name: true index: test_1 body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yml index 61b4fc8a1597a..9ba546d6ef942 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yml @@ -7,7 +7,6 @@ - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/60_realtime_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/60_realtime_refresh.yml index a1647835536e1..ef4fa60bf1b0e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/60_realtime_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/60_realtime_refresh.yml @@ -7,7 +7,6 @@ - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml index 4090636f1c21f..f4a5ba39be3b8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml @@ -6,7 +6,6 @@ reason: types are required in requests before 7.0.0 - do: indices.create: - include_type_name: false index: test_1 body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/71_source_filtering_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/71_source_filtering_with_types.yml index ca629cfa6aafe..3ac493c629f20 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/71_source_filtering_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/71_source_filtering_with_types.yml @@ -3,6 +3,7 @@ - do: indices.create: + include_type_name: true index: test_1 body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/85_source_missing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/85_source_missing.yml index 79b49097e743a..c214bf87d3997 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/85_source_missing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/85_source_missing.yml @@ -7,7 +7,6 @@ setup: - do: indices.create: - include_type_name: false index: test_1 body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/86_source_missing_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/86_source_missing_with_types.yml index 63a47c7c95836..d7cfced5164ec 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/86_source_missing_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/86_source_missing_with_types.yml @@ -3,6 +3,7 @@ setup: - do: indices.create: + include_type_name: true index: test_1 body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yml index c3b577df4fe2c..630cf39dbe65c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yml @@ -6,7 +6,6 @@ reason: types are required in requests before 7.0.0 - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml index c991d7d2c3014..e16602d7ac8b6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml @@ -7,7 +7,6 @@ - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/70_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/70_mix_typeless_typeful.yml index 9f4f68f1998ae..f3629fbb7cc18 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/70_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/70_mix_typeless_typeful.yml @@ -7,6 +7,7 @@ - do: indices.create: # not using include_type_name: false on purpose + include_type_name: true index: index body: mappings: @@ -70,6 +71,7 @@ - do: indices.create: # not using include_type_name: false on purpose + include_type_name: true index: index body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yml index 0b1c205e8956c..8062a96f3e6bc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yml @@ -11,7 +11,6 @@ "Index and field": - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml index 19bc94870cb47..78e67541a1f36 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml @@ -6,7 +6,6 @@ reason: include_type_name defaults to true before 7.0.0 - do: indices.create: - include_type_name: false index: test_index body: mappings: @@ -26,7 +25,6 @@ reason: include_type_name defaults to true before 7.0.0 - do: indices.create: - include_type_name: false index: test_index body: settings: @@ -46,7 +44,6 @@ reason: include_type_name defaults to true before 7.0.0 - do: indices.create: - include_type_name: false index: test_index - match: { acknowledged: true } @@ -60,7 +57,6 @@ reason: include_type_name defaults to true before 7.0.0 - do: indices.create: - include_type_name: false index: test_index wait_for_active_shards: all body: @@ -78,7 +74,6 @@ reason: include_type_name defaults to true before 7.0.0 - do: indices.create: - include_type_name: false index: test_index body: mappings: @@ -112,7 +107,6 @@ reason: is_write_index is not implemented in ES <= 6.x - do: indices.create: - include_type_name: false index: test_index body: aliases: @@ -135,7 +129,6 @@ - do: catch: /illegal_argument_exception/ indices.create: - include_type_name: false index: test_index body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml index 50a5239e70675..1eab9d6159764 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml @@ -19,7 +19,6 @@ - do: indices.create: - include_type_name: false index: test-1 body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yml index 6f5ae97683b82..b70bc8ebeb469 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yml @@ -3,7 +3,6 @@ setup: - do: indices.create: - include_type_name: false index: test_index body: aliases: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/11_basic_with_types.yml index 2f3e538743c56..413c4bcb8d28c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/11_basic_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/11_basic_with_types.yml @@ -3,6 +3,7 @@ setup: - do: indices.create: + include_type_name: true index: test_index body: aliases: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/10_basic.yml index e215c43b4bdeb..9be1f7246d5f3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/10_basic.yml @@ -5,7 +5,6 @@ setup: reason: include_type_name defaults to true before 7.0 - do: indices.create: - include_type_name: false index: test_index body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/11_basic_with_types.yml index d45f0671db5a6..0a7f5fa3560ba 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/11_basic_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/11_basic_with_types.yml @@ -2,6 +2,7 @@ setup: - do: indices.create: + include_type_name: true index: test_index body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml index 3bc1c349039c0..1570ded351874 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml @@ -5,7 +5,6 @@ reason: include_type_name defaults to true before 7.0 - do: indices.create: - include_type_name: false index: test_index body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/21_missing_field_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/21_missing_field_with_types.yml index c715d27ebc26f..264d187ebd22d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/21_missing_field_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/21_missing_field_with_types.yml @@ -3,6 +3,7 @@ - do: indices.create: + include_type_name: true index: test_index body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/40_missing_index.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/40_missing_index.yml index 2273867816c6e..7c7b07b587849 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/40_missing_index.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/40_missing_index.yml @@ -4,7 +4,6 @@ - do: catch: missing indices.get_field_mapping: - include_type_name: false index: test_index fields: field diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/50_field_wildcards.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/50_field_wildcards.yml index 435259fb3b07d..7db61d122e7ce 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/50_field_wildcards.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/50_field_wildcards.yml @@ -5,7 +5,6 @@ setup: reason: include_type_name defaults to true before 7.0 - do: indices.create: - include_type_name: false index: test_index body: mappings: @@ -25,7 +24,6 @@ setup: - do: indices.create: - include_type_name: false index: test_index_2 body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/51_field_wildcards_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/51_field_wildcards_with_types.yml index fe0c135f13ad7..68c183e9b292e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/51_field_wildcards_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/51_field_wildcards_with_types.yml @@ -2,6 +2,7 @@ setup: - do: indices.create: + include_type_name: true index: test_index body: mappings: @@ -22,6 +23,7 @@ setup: - do: indices.create: + include_type_name: true index: test_index_2 body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/60_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/60_mix_typeless_typeful.yml index 862075f450e73..2b6433a3e98f8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/60_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/60_mix_typeless_typeful.yml @@ -3,6 +3,7 @@ - do: indices.create: # not using include_type_name: false on purpose + include_type_name: true index: index body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml index 76519cc4c4c01..c3addd95469d4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml @@ -18,7 +18,6 @@ setup: - do: indices.create: - include_type_name: false index: t - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/11_basic_with_types.yml index 1c26a2a3d3d13..598cc24f7806b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/11_basic_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/11_basic_with_types.yml @@ -2,12 +2,14 @@ setup: - do: indices.create: + include_type_name: true index: test_1 body: mappings: doc: {} - do: indices.create: + include_type_name: true index: test_2 body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/20_missing_type.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/20_missing_type.yml index 93598cab6cf8b..f17fb6a595305 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/20_missing_type.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/20_missing_type.yml @@ -26,6 +26,7 @@ "No type matching pattern returns 404": - do: indices.create: + include_type_name: true index: test_index body: mappings: @@ -50,6 +51,7 @@ "Existent and non-existent type returns 404 and the existing type": - do: indices.create: + include_type_name: true index: test_index body: mappings: @@ -74,6 +76,7 @@ "Existent and non-existent types returns 404 and the existing type": - do: indices.create: + include_type_name: true index: test_index body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/30_missing_index.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/30_missing_index.yml index 9b422ec8b9156..5a7624265ecc9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/30_missing_index.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/30_missing_index.yml @@ -3,7 +3,6 @@ - do: catch: missing indices.get_mapping: - include_type_name: false index: test_index --- @@ -20,7 +19,6 @@ reason: ignore_unavailable was ignored in previous versions - do: indices.get_mapping: - include_type_name: false index: test_index ignore_unavailable: true @@ -31,7 +29,6 @@ - do: catch: missing indices.get_mapping: - include_type_name: false index: test_index ignore_unavailable: true allow_no_indices: false diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/40_aliases.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/40_aliases.yml index 26086fe4c3a13..15a52b7b2db25 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/40_aliases.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/40_aliases.yml @@ -3,7 +3,6 @@ - do: indices.create: - include_type_name: false index: test_index body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml index ea6a3d3a01361..d3f15b3292285 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml @@ -2,7 +2,6 @@ setup: - do: indices.create: - include_type_name: false index: test-xxx body: settings: @@ -14,7 +13,6 @@ setup: type: keyword - do: indices.create: - include_type_name: false index: test-xxy body: settings: @@ -26,7 +24,6 @@ setup: type: keyword - do: indices.create: - include_type_name: false index: test-xyy body: settings: @@ -38,7 +35,6 @@ setup: type: keyword - do: indices.create: - include_type_name: false index: test-yyy body: settings: @@ -66,7 +62,6 @@ setup: - do: indices.get_mapping: - include_type_name: false index: test-x* - is_true: test-xxx.mappings @@ -77,7 +72,6 @@ setup: - do: indices.get_mapping: - include_type_name: false index: test-x* expand_wildcards: all @@ -90,7 +84,6 @@ setup: - do: indices.get_mapping: - include_type_name: false index: test-x* expand_wildcards: open @@ -102,7 +95,6 @@ setup: - do: indices.get_mapping: - include_type_name: false index: test-x* expand_wildcards: closed @@ -115,7 +107,6 @@ setup: reason: allow_no_indices (defaults to true) was ignored in previous versions - do: indices.get_mapping: - include_type_name: false index: test-x* expand_wildcards: none @@ -128,7 +119,6 @@ setup: - do: catch: missing indices.get_mapping: - include_type_name: false index: test-x* expand_wildcards: none allow_no_indices: false @@ -137,7 +127,6 @@ setup: - do: indices.get_mapping: - include_type_name: false index: test-x* expand_wildcards: open,closed diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/70_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/70_mix_typeless_typeful.yml index 89e0d42a9e799..162a8d340d48a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/70_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/70_mix_typeless_typeful.yml @@ -7,6 +7,7 @@ - do: indices.create: # not using include_type_name: false on purpose + include_type_name: true index: index body: mappings: @@ -17,7 +18,6 @@ - do: indices.get_mapping: - include_type_name: false index: index - match: { index.mappings.properties.foo.type: "keyword" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/20_get_missing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/20_get_missing.yml index 13d05efc67151..2751f57dacb6c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/20_get_missing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/20_get_missing.yml @@ -9,6 +9,5 @@ setup: - do: catch: missing indices.get_template: - include_type_name: false name: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml index 14cc3615db05a..420b12398d267 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml @@ -5,7 +5,6 @@ reason: include_type_name defaults to true before 7.0 - do: indices.create: - include_type_name: false index: test_index - do: @@ -60,7 +59,6 @@ reason: include_type_name defaults to true before 7.0 - do: indices.create: - include_type_name: false index: test_index - do: catch: /illegal_argument_exception/ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml index 44796defef7c5..182ec017e0d30 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml @@ -4,15 +4,12 @@ setup: reason: include_type_name defaults to true before 7.0 - do: indices.create: - include_type_name: false index: test_index1 - do: indices.create: - include_type_name: false index: test_index2 - do: indices.create: - include_type_name: false index: foo diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml index de9c51a890c9a..f12864236d7bd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml @@ -14,7 +14,6 @@ # create index - do: indices.create: - include_type_name: false index: source wait_for_active_shards: 1 body: @@ -38,7 +37,6 @@ # create template matching shrink target - do: indices.put_template: - include_type_name: false name: tpl1 body: index_patterns: targ* diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml index 8cc12d4fe959b..b9089689b0cf1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml @@ -3,7 +3,6 @@ - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml index 442fe7c896173..3740167a0253a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml @@ -8,7 +8,6 @@ # create index - do: indices.create: - include_type_name: false index: source wait_for_active_shards: 1 body: @@ -31,7 +30,6 @@ # create template matching shrink target - do: indices.put_template: - include_type_name: false name: tpl1 body: index_patterns: targ* diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml index 5e7e266394375..42a11e467ccb3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml @@ -3,7 +3,6 @@ setup: - do: indices.create: - include_type_name: false index: test1 wait_for_active_shards: all body: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string.yml index 8ce0f8c01cb4d..2f74aee3a973e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string.yml @@ -2,7 +2,6 @@ "validate_query with query_string parameters": - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/20_stored_fields.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/20_stored_fields.yml index 5fefcee57c76b..45460deb04e0b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/20_stored_fields.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/20_stored_fields.yml @@ -6,7 +6,6 @@ - do: indices.create: - include_type_name: false index: test_1 body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/23_stored_fields_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/23_stored_fields_with_types.yml index 33cab111a8b3a..05b9738d46180 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/23_stored_fields_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/23_stored_fields_with_types.yml @@ -3,6 +3,7 @@ - do: indices.create: + include_type_name: true index: test_1 body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/10_basic.yml index 2457087ad2de5..243d953811336 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/10_basic.yml @@ -2,7 +2,6 @@ "Basic mlt": - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/20_typed_keys.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/20_typed_keys.yml index cab5c5ca628c9..0be04fd01c0ed 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/20_typed_keys.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/20_typed_keys.yml @@ -2,7 +2,6 @@ setup: - do: indices.create: - include_type_name: false index: test-0 body: settings: @@ -22,7 +21,6 @@ setup: - do: indices.create: - include_type_name: false index: test-1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/10_basic.yml index 0cdf4bf9aef5c..87c3e6065bba4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/10_basic.yml @@ -4,7 +4,6 @@ setup: reason: types are required in requests before 7.0.0 - do: indices.create: - include_type_name: false index: testidx body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/11_basic_with_types.yml index 27fa94e85a437..0c037eee9ddd2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/11_basic_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/11_basic_with_types.yml @@ -1,6 +1,7 @@ setup: - do: indices.create: + include_type_name: true index: testidx body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/20_deprecated.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/20_deprecated.yml index 633857815c9c0..376192680c99b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/20_deprecated.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/20_deprecated.yml @@ -13,7 +13,6 @@ setup: - do: indices.create: - include_type_name: false index: testidx body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/21_deprecated_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/21_deprecated_with_types.yml index 3ee06780a1f65..b0335498e22a1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/21_deprecated_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/21_deprecated_with_types.yml @@ -9,6 +9,7 @@ - do: indices.create: + include_type_name: true index: testidx body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/30_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/30_mix_typeless_typeful.yml index 24bb8a7d34f5f..b14b5f94ebbc2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/30_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/30_mix_typeless_typeful.yml @@ -7,6 +7,7 @@ - do: indices.create: # not using include_type_name: false on purpose + include_type_name: true index: index body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/range/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/range/10_basic.yml index a63ad16096472..44c60cfe70b52 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/range/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/range/10_basic.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/100_avg_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/100_avg_metric.yml index 559421a8ad13a..0e57bb9abd667 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/100_avg_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/100_avg_metric.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml index 784b95cec25cf..f07ac96e67e98 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/110_max_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/110_max_metric.yml index c02dc27d3376c..4235679746115 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/110_max_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/110_max_metric.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/120_min_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/120_min_metric.yml index a02ba6840fdee..eb68357258507 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/120_min_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/120_min_metric.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/130_sum_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/130_sum_metric.yml index 486e7f19873e7..3221543276115 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/130_sum_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/130_sum_metric.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/140_value_count_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/140_value_count_metric.yml index d483b5e1a494f..b5ac7d2e5db01 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/140_value_count_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/140_value_count_metric.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/150_stats_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/150_stats_metric.yml index 112e379c3c937..2afad21e61421 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/150_stats_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/150_stats_metric.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/160_extended_stats_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/160_extended_stats_metric.yml index cfb6102b5f606..c70ca3356767a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/160_extended_stats_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/160_extended_stats_metric.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/170_cardinality_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/170_cardinality_metric.yml index 0ea6ef5604a31..482ab05291a4d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/170_cardinality_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/170_cardinality_metric.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml index 35f07a65390a5..faae9c1ccda82 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/190_percentiles_hdr_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/190_percentiles_hdr_metric.yml index 6e85e34538fde..809e18458f0bd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/190_percentiles_hdr_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/190_percentiles_hdr_metric.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml index b77362963684c..11e3d5906d7e9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: my-index body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml index 88e0ecff29608..3d9f8a9c8af60 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test_1 body: settings: @@ -25,7 +24,6 @@ setup: - do: indices.create: - include_type_name: false index: test_2 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml index 74e077d4c6a51..a6b7cae104418 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml index 5e1b336e7ccf8..8532b40fbc1e1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -2,7 +2,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml index 5ed025a72095f..3dd8d345043c3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml @@ -2,7 +2,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/260_weighted_avg.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/260_weighted_avg.yml index 65f5e29f7e8e7..c5988fc9e5dc4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/260_weighted_avg.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/260_weighted_avg.yml @@ -4,7 +4,6 @@ setup: reason: weighted_avg is only available as of 6.4.0 - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/270_median_absolute_deviation_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/270_median_absolute_deviation_metric.yml index 3a96000f3a5a8..0cba08fccae9b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/270_median_absolute_deviation_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/270_median_absolute_deviation_metric.yml @@ -4,7 +4,6 @@ setup: reason: "added in 6.6.0" - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml index 3376ce4441457..99d0f29a9b2ad 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml @@ -2,7 +2,6 @@ "Default index": - do: indices.create: - include_type_name: false index: goodbad body: settings: @@ -77,7 +76,6 @@ "IP test": - do: indices.create: - include_type_name: false index: ip_index body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml index d4492861e72e8..b1f093c138048 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/50_filter.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/50_filter.yml index 6964acdab06d9..c32cae9ff8239 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/50_filter.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/50_filter.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml index ad364e930df89..3394b9a5a602a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml index 5afd8b925e652..7897d1feb5aa6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml @@ -2,7 +2,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text.yml index f730a099989fe..673d19e04cf22 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text.yml @@ -3,7 +3,6 @@ - do: indices.create: - include_type_name: false index: goodbad body: settings: @@ -79,7 +78,6 @@ - do: indices.create: - include_type_name: false index: goodbad body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/10_unified.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/10_unified.yml index 03989a2da185a..edb1ba1b05934 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/10_unified.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/10_unified.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/20_fvh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/20_fvh.yml index fd1bdbc86efa4..d411f55106d75 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/20_fvh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/20_fvh.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml index f90d18c6d3996..334708b54b066 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml @@ -2,7 +2,6 @@ setup: - do: indices.create: - include_type_name: false index: test1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml index a2b6a480d48e7..629a6d4de34a1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml @@ -2,7 +2,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml index 992d6325a1381..501fb1292da94 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml @@ -2,7 +2,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml index c0e998c711895..99a7300abf1dd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/115_multiple_field_collapsing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/115_multiple_field_collapsing.yml index ca55785066719..b10401f48dbce 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/115_multiple_field_collapsing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/115_multiple_field_collapsing.yml @@ -5,7 +5,6 @@ reason: using multiple field collapsing from 7.0 on - do: indices.create: - include_type_name: false index: addresses body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/120_batch_reduce_size.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/120_batch_reduce_size.yml index 5cc22387c5118..9c23899fc12dc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/120_batch_reduce_size.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/120_batch_reduce_size.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml index e0999db549127..e9fb959406e0e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml @@ -1,7 +1,6 @@ setup: - do: indices.create: - include_type_name: false index: index_1 body: settings: @@ -13,7 +12,6 @@ setup: format: "yyyy-MM-dd" - do: indices.create: - include_type_name: false index: index_2 body: settings: @@ -25,7 +23,6 @@ setup: format: "yyyy-MM-dd" - do: indices.create: - include_type_name: false index: index_3 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/150_rewrite_on_coordinator.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/150_rewrite_on_coordinator.yml index 626d4a8088986..7724fdc8c0673 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/150_rewrite_on_coordinator.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/150_rewrite_on_coordinator.yml @@ -1,7 +1,6 @@ "Ensure that we fetch the document only once": - do: indices.create: - include_type_name: false index: search_index body: settings: @@ -36,7 +35,7 @@ search: rest_total_hits_as_int: true index: "search_index" - body: { "size" : 0, "query" : { "terms" : { "user" : { "index": "lookup_index", "type": "doc", "id": "1", "path": "followers"} } } } + body: { "size" : 0, "query" : { "terms" : { "user" : { "index": "lookup_index", "type": "_doc", "id": "1", "path": "followers"} } } } - do: indices.create: index: lookup_index @@ -44,14 +43,12 @@ settings: number_of_shards: 1 mappings: - doc: - properties: - followers: - type: keyword + properties: + followers: + type: keyword - do: index: index: lookup_index - type: doc id: 1 body: { "followers" : ["1", "3"] } - do: @@ -61,7 +58,7 @@ search: rest_total_hits_as_int: true index: "search_index" - body: { "size" : 0, "query" : { "terms" : { "user" : { "index": "lookup_index", "type": "doc", "id": "1", "path": "followers"} } } } + body: { "size" : 0, "query" : { "terms" : { "user" : { "index": "lookup_index", "type": "_doc", "id": "1", "path": "followers"} } } } - match: { _shards.total: 5 } - match: { _shards.successful: 5 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml index 89cc7f2135232..d94e86bb6c565 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml @@ -4,7 +4,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: mappings: @@ -140,7 +139,6 @@ setup: - do: indices.create: - include_type_name: false index: test-no-dv body: mappings: @@ -292,7 +290,6 @@ setup: - do: indices.create: - include_type_name: false index: test-unmapped body: mappings: @@ -310,7 +307,6 @@ setup: - do: indices.create: - include_type_name: false index: test-empty body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/170_terms_query.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/170_terms_query.yml index 73aea9c581454..89ea24618c68f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/170_terms_query.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/170_terms_query.yml @@ -5,7 +5,6 @@ reason: index.max_terms_count setting has been added in 7.0.0 - do: indices.create: - include_type_name: false index: test_index body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/171_terms_query_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/171_terms_query_with_types.yml index 515dcfe463069..af9e276558a09 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/171_terms_query_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/171_terms_query_with_types.yml @@ -5,6 +5,7 @@ reason: index.max_terms_count setting has been added in 7.0.0 - do: indices.create: + include_type_name: true index: test_index body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/180_locale_dependent_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/180_locale_dependent_mapping.yml index 2e0dd9339c234..e9ba863675dfa 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/180_locale_dependent_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/180_locale_dependent_mapping.yml @@ -5,7 +5,6 @@ reason: JDK9 only supports this with a special sysproperty added in 6.2.0 - do: indices.create: - include_type_name: false index: test_index body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml index 296db2a01dc9a..40c80b88cfb1b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml @@ -5,7 +5,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_ignore_malformed.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_ignore_malformed.yml index 70bc254f7b42c..71ddb32302396 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_ignore_malformed.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_ignore_malformed.yml @@ -6,7 +6,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_index_phrase_search.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_index_phrase_search.yml index 02f4b55fd3a1c..b48857be4e7a1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_index_phrase_search.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_index_phrase_search.yml @@ -5,7 +5,6 @@ reason: index_phrase is only available as of 7.0.0 - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml index bd7c2ded35c15..e6e54cbb275e8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml @@ -5,7 +5,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/60_query_string.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/60_query_string.yml index 7582ac0df0334..131c8f92a231e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/60_query_string.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/60_query_string.yml @@ -2,7 +2,6 @@ "search with query_string parameters": - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yml index e1e697b496ed1..03f218b140b8f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yml @@ -15,7 +15,6 @@ "Search shards aliases with and without filters": - do: indices.create: - include_type_name: false index: test_index body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/20_completion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/20_completion.yml index 0afafdfe0b075..b64a51141dc6e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/20_completion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/20_completion.yml @@ -5,7 +5,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yml index 1f16d5e35d6af..e2c7ccfb421e3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yml @@ -5,7 +5,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: mappings: @@ -189,9 +188,7 @@ setup: indices.refresh: {} - do: - indices.get_mapping: - include_type_name: false - + indices.get_mapping: {} - do: search: rest_total_hits_as_int: true @@ -240,8 +237,7 @@ setup: indices.refresh: {} - do: - indices.get_mapping: - include_type_name: false + indices.get_mapping: {} - do: search: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/40_typed_keys.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/40_typed_keys.yml index bf11481d0b900..daac7d895611c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/40_typed_keys.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/40_typed_keys.yml @@ -2,7 +2,6 @@ setup: - do: indices.create: - include_type_name: false index: test body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/50_completion_with_multi_fields.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/50_completion_with_multi_fields.yml index fa453b9d84db2..a29019183e199 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/50_completion_with_multi_fields.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/50_completion_with_multi_fields.yml @@ -8,7 +8,6 @@ - do: indices.create: - include_type_name: false index: completion_with_sub_keyword body: mappings: @@ -74,13 +73,12 @@ index: completion_with_sub_completion body: mappings: - test: - "properties": - "suggest_1": - "type": "completion" - "fields": - "suggest_2": - "type": "completion" + "properties": + "suggest_1": + "type": "completion" + "fields": + "suggest_2": + "type": "completion" - do: index: @@ -122,7 +120,6 @@ - do: indices.create: - include_type_name: false index: completion_with_context body: mappings: @@ -192,7 +189,6 @@ - do: indices.create: - include_type_name: false index: completion_with_weight body: mappings: @@ -249,7 +245,6 @@ - do: indices.create: - include_type_name: false index: geofield_with_completion body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/10_basic.yml index 0a4286ad3e1be..62ec86118e5bb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/10_basic.yml @@ -5,7 +5,6 @@ setup: - do: indices.create: - include_type_name: false index: testidx body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/11_basic_with_types.yml index 2fbcb815d87e5..992d6db7ca786 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/11_basic_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/11_basic_with_types.yml @@ -1,6 +1,7 @@ setup: - do: indices.create: + include_type_name: true index: testidx body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/20_issue7121.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/20_issue7121.yml index 94b6413af772f..5f43e8a247923 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/20_issue7121.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/20_issue7121.yml @@ -7,7 +7,6 @@ setup: "Term vector API should return 'found: false' for docs between index and refresh": - do: indices.create: - include_type_name: false index: testidx body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/21_issue7121_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/21_issue7121_with_types.yml index 168c913b75cb3..cf597bf141f61 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/21_issue7121_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/21_issue7121_with_types.yml @@ -1,6 +1,7 @@ "Term vector API should return 'found: false' for docs between index and refresh": - do: indices.create: + include_type_name: true index: testidx body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/50_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/50_mix_typeless_typeful.yml index 403f2b5b8cf67..4382442dee4dd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/50_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/50_mix_typeless_typeful.yml @@ -7,6 +7,7 @@ - do: indices.create: # not using include_type_name: false on purpose + include_type_name: true index: index body: mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/11_shard_header.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/11_shard_header.yml index 4afe78ca7d30a..41dba3551e64c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/11_shard_header.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/11_shard_header.yml @@ -7,7 +7,6 @@ - do: indices.create: - include_type_name: false index: foobar body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yml index 374390f4b9716..6f43d381e0537 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yml @@ -7,7 +7,6 @@ - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml index 9e7dfdf4698f2..3a74f75f4f11d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml @@ -7,7 +7,6 @@ - do: indices.create: - include_type_name: false index: test_1 body: settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yml index 14b096211c5c2..fe76ab5299cda 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yml @@ -7,7 +7,6 @@ - do: indices.create: - include_type_name: false index: test_1 - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/90_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/90_mix_typeless_typeful.yml index 4caeb712c2896..0ca25e8598c24 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/90_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/90_mix_typeless_typeful.yml @@ -7,6 +7,7 @@ - do: indices.create: # not using include_type_name: false on purpose + include_type_name: true index: index body: mappings: @@ -47,6 +48,7 @@ - do: indices.create: # not using include_type_name: false on purpose + include_type_name: true index: index body: mappings: diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java index 813491f2655b7..dbda656656f01 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -98,25 +98,53 @@ public ClientYamlTestResponse callApi(String apiName, Map params } } - // Although include_type_name defaults to false, there is a large number of typed index creations - // in REST tests that need to be manually converted to typeless calls. As a temporary measure, we - // specify include_type_name=true in indices.create calls, unless the parameter has been set otherwise. - // This workaround will be removed once we convert all index creations to be typeless. + if (esVersion().before(Version.V_7_0_0)) { + adaptRequestForOlderVersion(apiName, bodies, requestParams); + } + + HttpEntity entity = createEntity(bodies, requestHeaders); + try { + response = callApiInternal(apiName, requestParams, entity, requestHeaders, nodeSelector); + return response; + } catch(ClientYamlTestResponseException e) { + response = e.getRestTestResponse(); + throw e; + } finally { + // if we hit a bad exception the response is null + Object responseBody = response != null ? response.getBody() : null; + //we always stash the last response body + stash.stashValue("body", responseBody); + } + } + + /** + * To allow tests to run against a mixed 7.x/6.x cluster, we make certain modifications to the + * request related to types. + * + * Specifically, we generally use typeless index creation and document writes in test set-up code. + * This functionality is supported in 7.x, but is not supported in 6.x (or is not the default + * behavior). Here we modify the request so that it will work against a 6.x node. + */ + private void adaptRequestForOlderVersion(String apiName, + List> bodies, + Map requestParams) { + // For index creations, we specify 'include_type_name=false' if it is not explicitly set. This + // allows us to omit the parameter in the test description, while still being able to communicate + // with 6.x nodes where include_type_name defaults to 'true'. if (apiName.equals("indices.create") && requestParams.containsKey(INCLUDE_TYPE_NAME_PARAMETER) == false) { - requestParams.put(INCLUDE_TYPE_NAME_PARAMETER, "true"); + requestParams.put(INCLUDE_TYPE_NAME_PARAMETER, "false"); } - // When running tests against a mixed 7.x/6.x cluster we need to add the type to the document API - // requests if its not already included. + // We add the type to the document API requests if it's not already included. if ((apiName.equals("index") || apiName.equals("update") || apiName.equals("delete") || apiName.equals("get")) - && esVersion().before(Version.V_7_0_0) && requestParams.containsKey("type") == false) { + && requestParams.containsKey("type") == false) { requestParams.put("type", "_doc"); } - // When running tests against a mixed 7.x/6.x cluster we need to add the type to the bulk API requests - // if its not already included. The type can either be on the request parameters or in the action metadata - // in the body of the request so we need to be sensitive to both scenarios - if (apiName.equals("bulk") && esVersion().before(Version.V_7_0_0) && requestParams.containsKey("type") == false) { + // We also add the type to the bulk API requests if it's not already included. The type can either + // be on the request parameters or in the action metadata in the body of the request so we need to + // be sensitive to both scenarios. + if (apiName.equals("bulk") && requestParams.containsKey("type") == false) { if (requestParams.containsKey("index")) { requestParams.put("type", "_doc"); } else { @@ -145,20 +173,6 @@ && esVersion().before(Version.V_7_0_0) && requestParams.containsKey("type") == f } } } - - HttpEntity entity = createEntity(bodies, requestHeaders); - try { - response = callApiInternal(apiName, requestParams, entity, requestHeaders, nodeSelector); - return response; - } catch(ClientYamlTestResponseException e) { - response = e.getRestTestResponse(); - throw e; - } finally { - // if we hit a bad exception the response is null - Object responseBody = response != null ? response.getBody() : null; - //we always stash the last response body - stash.stashValue("body", responseBody); - } } private HttpEntity createEntity(List> bodies, Map headers) throws IOException { diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java index fbf7f10e5e186..8182e2d0fc7ac 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java @@ -20,8 +20,10 @@ package org.elasticsearch.test.rest.yaml; import org.apache.http.HttpEntity; +import org.elasticsearch.Version; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.Collections; @@ -33,6 +35,7 @@ public class ClientYamlTestExecutionContextTests extends ESTestCase { public void testHeadersSupportStashedValueReplacement() throws IOException { final AtomicReference> headersRef = new AtomicReference<>(); + final Version version = VersionUtils.randomVersion(random()); final ClientYamlTestExecutionContext context = new ClientYamlTestExecutionContext(null, randomBoolean()) { @Override @@ -41,6 +44,11 @@ ClientYamlTestResponse callApiInternal(String apiName, Map param headersRef.set(headers); return null; } + + @Override + public Version esVersion() { + return version; + } }; final Map headers = new HashMap<>(); headers.put("foo", "$bar"); diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml index d5cd8ebd4f1ab..42b92a7011a83 100644 --- a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml +++ b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml @@ -28,10 +28,9 @@ soft_deletes: enabled: true mappings: - doc: - properties: - field: - type: keyword + properties: + field: + type: keyword - is_true: acknowledged - do: diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml index 220463a60b258..8be035961e979 100644 --- a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml +++ b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml @@ -28,10 +28,9 @@ soft_deletes: enabled: true mappings: - doc: - properties: - field: - type: keyword + properties: + field: + type: keyword - do: ccr.follow: diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/index_directly_into_follower_index.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/index_directly_into_follower_index.yml index 62878437c37e3..9d021df585ffe 100644 --- a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/index_directly_into_follower_index.yml +++ b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/index_directly_into_follower_index.yml @@ -28,10 +28,9 @@ soft_deletes: enabled: true mappings: - doc: - properties: - field: - type: keyword + properties: + field: + type: keyword - is_true: acknowledged - do: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/authenticate/10_field_level_security.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/authenticate/10_field_level_security.yml index 4dadd1370da8c..96063bdc45895 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/authenticate/10_field_level_security.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/authenticate/10_field_level_security.yml @@ -85,12 +85,11 @@ teardown: aliases: the_alias : {} mappings: - doc: - properties: - location: - properties: - city: - type: "keyword" + properties: + location: + properties: + city: + type: "keyword" settings: index: number_of_shards: 1 @@ -100,9 +99,9 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "test_index", "_type": "doc"}}' + - '{"index": {"_index": "test_index"}}' - '{"marker": "test_1", "location.city": "bos"}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' + - '{"index": {"_index": "test_index"}}' - '{"marker": "test_2", "location.city": "ams"}' - do: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/graph/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/graph/10_basic.yml index 5ac76101f2693..ccd861e6358e0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/graph/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/graph/10_basic.yml @@ -9,10 +9,9 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - test: - properties: - keys: - type : integer + properties: + keys: + type : integer --- "Test basic graph explore": @@ -22,14 +21,12 @@ setup: - do: index: index: test_1 - type: test id: 1 body: { keys: [1,2,3] } - do: index: index: test_1 - type: test id: 2 body: { keys: [4,5,6] } @@ -44,7 +41,6 @@ setup: - do: xpack.graph.explore: index: test_1 - type: test body: {"query": {"match": {"keys": 1}},"controls":{"use_significance":false},"vertices":[{"field": "keys","min_doc_count": 1}]} - length: {failures: 0} - length: {vertices: 3} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeed_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeed_stats.yml index 70626a0afd752..f9178dc6f71f5 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeed_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeed_stats.yml @@ -11,10 +11,9 @@ setup: index: number_of_replicas: 1 mappings: - type-1: - properties: - time: - type: date + properties: + time: + type: date - do: headers: @@ -26,10 +25,9 @@ setup: index: number_of_replicas: 1 mappings: - type-2: - properties: - time: - type: date + properties: + time: + type: date - do: headers: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml index 91ba0ae17ce0d..c07bdf2add3c0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml @@ -42,7 +42,6 @@ setup: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser indices.get_field_mapping: - include_type_name: false index: .ml-anomalies-shared fields: new_field - match: {\.ml-anomalies-shared.mappings.new_field.mapping.new_field.type: keyword} @@ -75,7 +74,6 @@ setup: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser indices.get_mapping: - include_type_name: false index: .ml-anomalies-shared - is_true: \.ml-anomalies-shared.mappings._meta.version @@ -104,6 +102,5 @@ setup: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser indices.get_mapping: - include_type_name: false index: .ml-anomalies-shared - is_true: \.ml-anomalies-shared.mappings._meta.version diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/preview_datafeed.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/preview_datafeed.yml index 23cdb3b55c664..0818bfe4d25fe 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/preview_datafeed.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/preview_datafeed.yml @@ -4,21 +4,19 @@ setup: index: airline-data body: mappings: - response: - properties: - time: - type: date - airline: - type: keyword - responsetime: - type: float - event_rate: - type: integer + properties: + time: + type: date + airline: + type: keyword + responsetime: + type: float + event_rate: + type: integer - do: index: index: airline-data - type: response id: 1 body: > { @@ -31,7 +29,6 @@ setup: - do: index: index: airline-data - type: response id: 2 body: > { @@ -44,7 +41,6 @@ setup: - do: index: index: airline-data - type: response id: 3 body: > { @@ -57,7 +53,6 @@ setup: - do: index: index: airline-data - type: response id: 4 body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/set_upgrade_mode.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/set_upgrade_mode.yml index 95ce67299c76a..be1e0203a92c7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/set_upgrade_mode.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/set_upgrade_mode.yml @@ -7,16 +7,15 @@ setup: index: airline-data body: mappings: - response: - properties: - time: - type: date - airline: - type: keyword - airport: - type: text - responsetime: - type: float + properties: + time: + type: date + airline: + type: keyword + airport: + type: text + responsetime: + type: float - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/start_stop_datafeed.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/start_stop_datafeed.yml index 1dd201007f96f..8722bab94e216 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/start_stop_datafeed.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/start_stop_datafeed.yml @@ -6,16 +6,15 @@ setup: index: airline-data body: mappings: - response: - properties: - time: - type: date - airline: - type: keyword - airport: - type: text - responsetime: - type: float + properties: + time: + type: date + airline: + type: keyword + airport: + type: text + responsetime: + type: float - do: headers: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml index f39cfc6ca13a5..0b5a8a2e11180 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml @@ -4,7 +4,6 @@ setup: - do: indices.create: index: foo - include_type_name: false body: mappings: properties: @@ -16,7 +15,6 @@ setup: - do: indices.create: index: foo2 - include_type_name: false body: mappings: properties: @@ -28,7 +26,6 @@ setup: - do: indices.create: index: foo3 - include_type_name: false body: mappings: properties: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml index 38b7303ecb3ae..b300af5e0a014 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml @@ -4,7 +4,6 @@ setup: - do: indices.create: index: foo - include_type_name: false body: mappings: properties: @@ -16,7 +15,6 @@ setup: - do: indices.create: index: foo2 - include_type_name: false body: mappings: properties: @@ -28,7 +26,6 @@ setup: - do: indices.create: index: foo3 - include_type_name: false body: mappings: properties: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/10_index_doc.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/10_index_doc.yml index 2317d29ee703b..5732b19f93665 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/10_index_doc.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/10_index_doc.yml @@ -39,10 +39,9 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" + properties: + name: + type: "keyword" - do: indices.create: @@ -53,10 +52,9 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" + properties: + name: + type: "keyword" - do: indices.create: index: only_delete @@ -66,10 +64,9 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" + properties: + name: + type: "keyword" - do: indices.create: @@ -80,10 +77,9 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" + properties: + name: + type: "keyword" --- teardown: - do: @@ -104,7 +100,6 @@ teardown: create: id: 1 index: only_index - type: doc body: > { "name" : "doc1" @@ -115,7 +110,6 @@ teardown: create: id: 2 index: everything - type: doc body: > { "name" : "doc2" @@ -126,9 +120,9 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "only_index", "_type": "doc", "_id": "3"}}' + - '{"index": {"_index": "only_index", "_id": "3"}}' - '{"name": "doc3"}' - - '{"index": {"_index": "everything", "_type": "doc", "_id": "4"}}' + - '{"index": {"_index": "everything", "_id": "4"}}' - '{"name": "doc4"}' - do: @@ -136,7 +130,7 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "only_index", "_type": "doc", "_id": "5"}}' + - '{"index": {"_index": "only_index", "_id": "5"}}' - '{"name": "doc5"}' - do: @@ -144,7 +138,7 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "everything", "_type": "doc", "_id": "6"}}' + - '{"index": {"_index": "everything", "_id": "6"}}' - '{"name": "doc6"}' - do: # superuser @@ -170,7 +164,6 @@ teardown: refresh: true id: 7 index: only_read - type: doc body: > { "name" : "doc7" @@ -183,7 +176,6 @@ teardown: refresh: true id: 8 index: only_delete - type: doc body: > { "name" : "doc8" @@ -194,9 +186,9 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "only_read", "_type": "doc", "_id": "9"}}' + - '{"index": {"_index": "only_read", "_id": "9"}}' - '{"name": "doc9"}' - - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "10"}}' + - '{"index": {"_index": "only_delete", "_id": "10"}}' - '{"name": "doc10"}' - match: { errors: true } - match: { items.0.index.status: 403 } @@ -209,7 +201,7 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "only_read", "_type": "doc", "_id": "11"}}' + - '{"index": {"_index": "only_read", "_id": "11"}}' - '{"name": "doc11"}' - match: { errors: true } - match: { items.0.index.status: 403 } @@ -220,7 +212,7 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "12"}}' + - '{"index": {"_index": "only_delete", "_id": "12"}}' - '{"name": "doc12"}' - match: { errors: true } - match: { items.0.index.status: 403 } @@ -245,9 +237,9 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "only_read", "_type": "doc", "_id": "13"}}' + - '{"index": {"_index": "only_read", "_id": "13"}}' - '{"name": "doc13"}' - - '{"index": {"_index": "only_index", "_type": "doc", "_id": "14"}}' + - '{"index": {"_index": "only_index", "_id": "14"}}' - '{"name": "doc14"}' - match: { errors: true } - match: { items.0.index.status: 403 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/11_delete_doc.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/11_delete_doc.yml index 560c4a2cfb241..32e4694311f88 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/11_delete_doc.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/11_delete_doc.yml @@ -39,10 +39,9 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" + properties: + name: + type: "keyword" - do: indices.create: @@ -53,10 +52,9 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" + properties: + name: + type: "keyword" - do: indices.create: index: only_delete @@ -66,10 +64,9 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" + properties: + name: + type: "keyword" - do: indices.create: @@ -80,34 +77,33 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" + properties: + name: + type: "keyword" - do: bulk: refresh: true body: - - '{"index": {"_index": "only_read", "_type": "doc", "_id": "1"}}' + - '{"index": {"_index": "only_read", "_id": "1"}}' - '{"name": "doc1"}' - - '{"index": {"_index": "only_index", "_type": "doc", "_id": "2"}}' + - '{"index": {"_index": "only_index", "_id": "2"}}' - '{"name": "doc2"}' - - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "3"}}' + - '{"index": {"_index": "only_delete", "_id": "3"}}' - '{"name": "doc3"}' - - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "4"}}' + - '{"index": {"_index": "only_delete", "_id": "4"}}' - '{"name": "doc4"}' - - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "5"}}' + - '{"index": {"_index": "only_delete", "_id": "5"}}' - '{"name": "doc5"}' - - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "6"}}' + - '{"index": {"_index": "only_delete", "_id": "6"}}' - '{"name": "doc6"}' - - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "7"}}' + - '{"index": {"_index": "only_delete", "_id": "7"}}' - '{"name": "doc7"}' - - '{"index": {"_index": "everything", "_type": "doc", "_id": "8"}}' + - '{"index": {"_index": "everything", "_id": "8"}}' - '{"name": "doc8"}' - - '{"index": {"_index": "everything", "_type": "doc", "_id": "9"}}' + - '{"index": {"_index": "everything", "_id": "9"}}' - '{"name": "doc9"}' - - '{"index": {"_index": "everything", "_type": "doc", "_id": "10"}}' + - '{"index": {"_index": "everything", "_id": "10"}}' - '{"name": "doc10"}' --- @@ -144,7 +140,6 @@ teardown: delete: refresh: true index: only_delete - type: doc id: 3 - do: @@ -152,7 +147,6 @@ teardown: delete: refresh: true index: everything - type: doc id: 8 - do: @@ -160,16 +154,16 @@ teardown: bulk: refresh: true body: - - '{"delete": {"_index": "only_delete", "_type": "doc", "_id": "4"}}' - - '{"delete": {"_index": "everything" , "_type": "doc", "_id": "9"}}' + - '{"delete": {"_index": "only_delete", "_id": "4"}}' + - '{"delete": {"_index": "everything" , "_id": "9"}}' - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: refresh: true body: # The rest test won't send streaming content unless it has multiple bodies, so we send the same delete twice - - '{"delete": {"_index": "only_delete", "_type": "doc", "_id": "5"}}' - - '{"delete": {"_index": "only_delete", "_type": "doc", "_id": "5"}}' + - '{"delete": {"_index": "only_delete", "_id": "5"}}' + - '{"delete": {"_index": "only_delete", "_id": "5"}}' - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user @@ -178,11 +172,9 @@ teardown: body: # The rest test won't send streaming content unless it has multiple bodies, so we send the same delete twice - delete: _index: everything - _type: doc _id: 10 - delete: _index: everything - _type: doc _id: 10 - do: # superuser @@ -207,7 +199,6 @@ teardown: delete: refresh: true index: only_read - type: doc id: 1 - do: @@ -216,7 +207,6 @@ teardown: delete: refresh: true index: only_index - type: doc id: 2 - do: @@ -224,8 +214,8 @@ teardown: bulk: refresh: true body: - - '{"delete": {"_index": "only_read" , "_type": "doc", "_id": "1"}}' - - '{"delete": {"_index": "only_index", "_type": "doc", "_id": "2"}}' + - '{"delete": {"_index": "only_read" , "_id": "1"}}' + - '{"delete": {"_index": "only_index", "_id": "2"}}' - match: { errors: true } - match: { items.0.delete.status: 403 } @@ -238,8 +228,8 @@ teardown: bulk: refresh: true body: # The rest test won't send streaming content unless it has multiple bodies, so we send the same delete twice - - '{"delete": {"_index": "only_read" , "_type": "doc", "_id": "1"}}' - - '{"delete": {"_index": "only_read" , "_type": "doc", "_id": "1"}}' + - '{"delete": {"_index": "only_read" , "_id": "1"}}' + - '{"delete": {"_index": "only_read" , "_id": "1"}}' - match: { errors: true } - match: { items.0.delete.status: 403 } @@ -252,8 +242,8 @@ teardown: bulk: refresh: true body: # The rest test won't send streaming content unless it has multiple bodies, so we send the same delete twice - - '{"delete": {"_index": "only_index", "_type": "doc", "_id": "2"}}' - - '{"delete": {"_index": "only_index", "_type": "doc", "_id": "2"}}' + - '{"delete": {"_index": "only_index", "_id": "2"}}' + - '{"delete": {"_index": "only_index", "_id": "2"}}' - match: { errors: true } - match: { items.0.delete.status: 403 } @@ -282,8 +272,8 @@ teardown: bulk: refresh: true body: - - '{"delete": {"_index": "only_read" , "_type": "doc", "_id": "1"}}' - - '{"delete": {"_index": "only_delete", "_type": "doc", "_id": "6"}}' + - '{"delete": {"_index": "only_read" , "_id": "1"}}' + - '{"delete": {"_index": "only_delete", "_id": "6"}}' - match: { errors: true } - match: { items.0.delete.status: 403 } - match: { items.0.delete.error.type: "security_exception" } @@ -310,12 +300,12 @@ teardown: bulk: refresh: true body: - - '{"index" : {"_index": "only_delete", "_type": "doc", "_id": "11"}}' + - '{"index" : {"_index": "only_delete", "_id": "11"}}' - '{"name" : "doc11"}' - - '{"delete": {"_index": "only_delete", "_type": "doc", "_id": "7"}}' - - '{"index" : {"_index": "only_index", "_type": "doc", "_id": "12"}}' + - '{"delete": {"_index": "only_delete", "_id": "7"}}' + - '{"index" : {"_index": "only_index", "_id": "12"}}' - '{"name" : "doc12"}' - - '{"delete": {"_index": "only_index", "_type": "doc", "_id": "2"}}' + - '{"delete": {"_index": "only_index", "_id": "2"}}' - match: { errors: true } - match: { items.0.index.status: 403 } - match: { items.0.index.error.type: "security_exception" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/12_index_alias.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/12_index_alias.yml index 71b3275ccab0d..1f490ec08dac1 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/12_index_alias.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/12_index_alias.yml @@ -37,10 +37,9 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" + properties: + name: + type: "keyword" - do: indices.create: @@ -51,10 +50,9 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" + properties: + name: + type: "keyword" - do: indices.create: @@ -65,10 +63,9 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" + properties: + name: + type: "keyword" - do: indices.put_alias: @@ -132,7 +129,6 @@ teardown: create: id: 1 index: can_write_1 - type: doc body: > { "name" : "doc1" @@ -143,7 +139,6 @@ teardown: create: id: 2 index: can_write_2 - type: doc body: > { "name" : "doc2" @@ -154,9 +149,9 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "can_write_1", "_type": "doc", "_id": "3"}}' + - '{"index": {"_index": "can_write_1", "_id": "3"}}' - '{"name": "doc3"}' - - '{"index": {"_index": "can_write_1", "_type": "doc", "_id": "4"}}' + - '{"index": {"_index": "can_write_1", "_id": "4"}}' - '{"name": "doc4"}' - do: @@ -164,9 +159,9 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "can_write_1", "_type": "doc", "_id": "5"}}' + - '{"index": {"_index": "can_write_1", "_id": "5"}}' - '{"name": "doc5"}' - - '{"index": {"_index": "can_write_2", "_type": "doc", "_id": "6"}}' + - '{"index": {"_index": "can_write_2", "_id": "6"}}' - '{"name": "doc6"}' - do: @@ -174,11 +169,11 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "can_write_1", "_type": "doc", "_id": "7"}}' + - '{"index": {"_index": "can_write_1", "_id": "7"}}' - '{"name": "doc7"}' - - '{"index": {"_index": "can_write_2", "_type": "doc", "_id": "8"}}' + - '{"index": {"_index": "can_write_2", "_id": "8"}}' - '{"name": "doc8"}' - - '{"index": {"_index": "can_write_3", "_type": "doc", "_id": "9"}}' + - '{"index": {"_index": "can_write_3", "_id": "9"}}' - '{"name": "doc9"}' - do: # superuser @@ -202,7 +197,6 @@ teardown: refresh: true id: 7 index: can_read_1 - type: doc body: > { "name" : "doc7" @@ -215,7 +209,6 @@ teardown: refresh: true id: 8 index: can_read_2 - type: doc body: > { "name" : "doc8" @@ -226,9 +219,9 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "can_read_1", "_type": "doc", "_id": "9"}}' + - '{"index": {"_index": "can_read_1", "_id": "9"}}' - '{"name": "doc9"}' - - '{"index": {"_index": "can_read_1", "_type": "doc", "_id": "10"}}' + - '{"index": {"_index": "can_read_1", "_id": "10"}}' - '{"name": "doc10"}' - match: { errors: true } - match: { items.0.index.status: 403 } @@ -241,9 +234,9 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "can_read_1", "_type": "doc", "_id": "11"}}' + - '{"index": {"_index": "can_read_1", "_id": "11"}}' - '{"name": "doc11"}' - - '{"index": {"_index": "can_read_2", "_type": "doc", "_id": "12"}}' + - '{"index": {"_index": "can_read_2", "_id": "12"}}' - '{"name": "doc12"}' - match: { errors: true } - match: { items.0.index.status: 403 } @@ -264,9 +257,9 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "can_read_1", "_type": "doc", "_id": "13"}}' + - '{"index": {"_index": "can_read_1", "_id": "13"}}' - '{"name": "doc13"}' - - '{"index": {"_index": "can_write_1", "_type": "doc", "_id": "14"}}' + - '{"index": {"_index": "can_write_1", "_id": "14"}}' - '{"name": "doc14"}' - match: { errors: true } - match: { items.0.index.status: 403 } @@ -285,15 +278,15 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "can_read_1", "_type": "doc", "_id": "15"}}' + - '{"index": {"_index": "can_read_1", "_id": "15"}}' - '{"name": "doc15"}' - - '{"index": {"_index": "can_write_1", "_type": "doc", "_id": "16"}}' + - '{"index": {"_index": "can_write_1", "_id": "16"}}' - '{"name": "doc16"}' - - '{"index": {"_index": "can_read_2", "_type": "doc", "_id": "17"}}' + - '{"index": {"_index": "can_read_2", "_id": "17"}}' - '{"name": "doc17"}' - - '{"index": {"_index": "can_write_2", "_type": "doc", "_id": "18"}}' + - '{"index": {"_index": "can_write_2", "_id": "18"}}' - '{"name": "doc18"}' - - '{"index": {"_index": "can_write_3", "_type": "doc", "_id": "19"}}' + - '{"index": {"_index": "can_write_3", "_id": "19"}}' - '{"name": "doc19"}' - match: { errors: true } - match: { items.0.index.status: 403 } @@ -345,19 +338,19 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "can_read_1", "_type": "doc", "_id": "20"}}' + - '{"index": {"_index": "can_read_1", "_id": "20"}}' - '{"name": "doc20"}' - - '{"index": {"_index": "can_write_1", "_type": "doc", "_id": "21"}}' + - '{"index": {"_index": "can_write_1", "_id": "21"}}' - '{"name": "doc21"}' - - '{"index": {"_index": "can_read_2", "_type": "doc", "_id": "22"}}' + - '{"index": {"_index": "can_read_2", "_id": "22"}}' - '{"name": "doc22"}' - - '{"index": {"_index": "can_write_2", "_type": "doc", "_id": "23"}}' + - '{"index": {"_index": "can_write_2", "_id": "23"}}' - '{"name": "doc23"}' - - '{"index": {"_index": "can_write_3", "_type": "doc", "_id": "24"}}' + - '{"index": {"_index": "can_write_3", "_id": "24"}}' - '{"name": "doc24"}' - - '{"update": {"_index": "can_write_3", "_type": "doc", "_id": "24"}}' + - '{"update": {"_index": "can_write_3", "_id": "24"}}' - '{"doc": { "name": "doc_24"}}' - - '{"delete": {"_index": "can_write_3", "_type": "doc", "_id": "24"}}' + - '{"delete": {"_index": "can_write_3", "_id": "24"}}' - match: { errors: true } - match: { items.0.index.status: 403 } - match: { items.0.index.error.type: "security_exception" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/13_index_datemath.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/13_index_datemath.yml index becef57ec4f9a..462b023d18cc0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/13_index_datemath.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/13_index_datemath.yml @@ -48,7 +48,6 @@ teardown: index: id: 1 index: "" - type: doc body: > { "name" : "doc1" @@ -58,9 +57,9 @@ teardown: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: body: - - '{"index": {"_index": "", "_type": "doc", "_id": "2"}}' + - '{"index": {"_index": "", "_id": "2"}}' - '{"name": "doc2"}' - - '{"index": {"_index": "", "_type": "doc", "_id": "3"}}' + - '{"index": {"_index": "", "_id": "3"}}' - '{"name": "doc3"}' - match: { errors: false } - match: { items.0.index.status: 201 } @@ -84,7 +83,6 @@ teardown: index: id: 4 index: "" - type: doc body: > { "name" : "doc4" @@ -94,9 +92,9 @@ teardown: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: body: - - '{"index": {"_index": "", "_type": "doc", "_id": "5"}}' + - '{"index": {"_index": "", "_id": "5"}}' - '{"name": "doc5"}' - - '{"index": {"_index": "", "_type": "doc", "_id": "6"}}' + - '{"index": {"_index": "", "_id": "6"}}' - '{"name": "doc6"}' - match: { errors: true } - match: { items.0.index.status: 403 } @@ -120,9 +118,9 @@ teardown: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: body: - - '{"index": {"_index": "", "_type": "doc", "_id": "7"}}' + - '{"index": {"_index": "", "_id": "7"}}' - '{"name": "doc7"}' - - '{"index": {"_index": "", "_type": "doc", "_id": "8"}}' + - '{"index": {"_index": "", "_id": "8"}}' - '{"name": "doc8"}' - match: { errors: true } - match: { items.0.index.status: 403 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/20_get_doc.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/20_get_doc.yml index 5b6a0e2d331e1..4e9367c238ae7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/20_get_doc.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/20_get_doc.yml @@ -40,10 +40,9 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" + properties: + name: + type: "keyword" - do: indices.create: @@ -54,10 +53,9 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" + properties: + name: + type: "keyword" - do: indices.create: index: only_delete @@ -67,10 +65,9 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" + properties: + name: + type: "keyword" - do: indices.create: index: read_write @@ -80,10 +77,9 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" + properties: + name: + type: "keyword" - do: indices.create: index: everything @@ -93,24 +89,23 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" + properties: + name: + type: "keyword" - do: bulk: refresh: true body: - - '{"index": {"_index": "only_read", "_type": "doc", "_id": "1"}}' + - '{"index": {"_index": "only_read", "_id": "1"}}' - '{"name": "doc1"}' - - '{"index": {"_index": "only_index", "_type": "doc", "_id": "2"}}' + - '{"index": {"_index": "only_index", "_id": "2"}}' - '{"name": "doc2"}' - - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "3"}}' + - '{"index": {"_index": "only_delete", "_id": "3"}}' - '{"name": "doc3"}' - - '{"index": {"_index": "read_write", "_type": "doc", "_id": "4"}}' + - '{"index": {"_index": "read_write", "_id": "4"}}' - '{"name": "doc4"}' - - '{"index": {"_index": "everything", "_type": "doc", "_id": "5"}}' + - '{"index": {"_index": "everything", "_id": "5"}}' - '{"name": "doc5"}' --- @@ -133,7 +128,6 @@ teardown: get: id: 1 index: only_read - type: doc - match: { _index: only_read } - match: { _id: "1" } @@ -144,7 +138,6 @@ teardown: get: id: 4 index: read_write - type: doc - match: { _index: read_write } - match: { _id: "4" } - match: { _source.name: "doc4" } @@ -154,7 +147,6 @@ teardown: get: id: 5 index: everything - type: doc - match: { _index: everything } - match: { _id: "5" } - match: { _source.name: "doc5" } @@ -164,9 +156,9 @@ teardown: mget: body: docs: - - { _index: "only_read", _type: "doc", _id: "1" } - - { _index: "read_write", _type: "doc", _id: "4" } - - { _index: "everything", _type: "doc", _id: "5" } + - { _index: "only_read", _id: "1" } + - { _index: "read_write", _id: "4" } + - { _index: "everything", _id: "5" } - match: { docs.0._index: "only_read" } - match: { docs.0._id: "1" } - match: { docs.0._source.name: "doc1" } @@ -182,7 +174,7 @@ teardown: mget: body: docs: - - { _index: "only_read", _type: "doc", _id: "1" } + - { _index: "only_read", _id: "1" } - match: { docs.0._index: "only_read"} - match: { docs.0._id: "1" } - match: { docs.0._source.name: "doc1" } @@ -192,7 +184,7 @@ teardown: mget: body: docs: - - { _index: "read_write", _type: "doc", _id: "4" } + - { _index: "read_write", _id: "4" } - match: { docs.0._index: read_write} - match: { docs.0._id: "4" } - match: { docs.0._source.name: "doc4" } @@ -202,7 +194,7 @@ teardown: mget: body: docs: - - { _index: "everything", _type: "doc", _id: "5" } + - { _index: "everything", _id: "5" } - match: { docs.0._index: "everything"} - match: { docs.0._id: "5" } - match: { docs.0._source.name: "doc5" } @@ -216,7 +208,6 @@ teardown: get: id: 2 index: only_index - type: doc - do: catch: forbidden @@ -224,15 +215,14 @@ teardown: get: id: 3 index: only_delete - type: doc - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user mget: body: docs: - - { _index: "only_index", _type: "doc", _id: "2" } - - { _index: "only_delete", _type: "doc", _id: "3" } + - { _index: "only_index", _id: "2" } + - { _index: "only_delete", _id: "3" } - match: { docs.0._index: "only_index"} - match: { docs.0._id: "2" } - match: { docs.0.error.type: "security_exception" } @@ -245,7 +235,7 @@ teardown: mget: body: docs: - - { _index: "only_index", _type: "doc", _id: "2" } + - { _index: "only_index", _id: "2" } - match: { docs.0._index: "only_index"} - match: { docs.0._id: "2" } - match: { docs.0.error.type: "security_exception" } @@ -255,7 +245,7 @@ teardown: mget: body: docs: - - { _index: "only_delete", _type: "doc", _id: "3" } + - { _index: "only_delete", _id: "3" } - match: { docs.0._index: "only_delete"} - match: { docs.0._id: "3" } - match: { docs.0.error.type: "security_exception" } @@ -267,11 +257,11 @@ teardown: mget: body: docs: - - { _index: "only_read" , _type: "doc", _id: "1" } - - { _index: "only_index" , _type: "doc", _id: "2" } - - { _index: "only_delete", _type: "doc", _id: "3" } - - { _index: "read_write" , _type: "doc", _id: "4" } - - { _index: "everything" , _type: "doc", _id: "5" } + - { _index: "only_read" , _id: "1" } + - { _index: "only_index" , _id: "2" } + - { _index: "only_delete", _id: "3" } + - { _index: "read_write" , _id: "4" } + - { _index: "everything" , _id: "5" } - match: { docs.0._index: "only_read" } - match: { docs.0._id: "1" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/21_search_doc.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/21_search_doc.yml index 5901a8c66b28a..cd7b4772ce949 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/21_search_doc.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/21_search_doc.yml @@ -40,12 +40,11 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" - tag: - type: "keyword" + properties: + name: + type: "keyword" + tag: + type: "keyword" - do: indices.create: index: only_index @@ -55,12 +54,11 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" - tag: - type: "keyword" + properties: + name: + type: "keyword" + tag: + type: "keyword" - do: indices.create: index: only_delete @@ -70,12 +68,11 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" - tag: - type: "keyword" + properties: + name: + type: "keyword" + tag: + type: "keyword" - do: indices.create: index: read_write @@ -85,12 +82,11 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" - tag: - type: "keyword" + properties: + name: + type: "keyword" + tag: + type: "keyword" - do: indices.create: index: everything @@ -100,35 +96,34 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - doc: - properties: - name: - type: "keyword" - tag: - type: "keyword" + properties: + name: + type: "keyword" + tag: + type: "keyword" - do: bulk: refresh: true body: - - '{"index": {"_index": "only_read", "_type": "doc", "_id": "1"}}' + - '{"index": {"_index": "only_read", "_id": "1"}}' - '{"name": "doc1", "tag": [ "can-read", "tag-a" ] }' - - '{"index": {"_index": "only_read", "_type": "doc", "_id": "2"}}' + - '{"index": {"_index": "only_read", "_id": "2"}}' - '{"name": "doc2", "tag": [ "can-read", "tag-b"] }' - - '{"index": {"_index": "only_index", "_type": "doc", "_id": "3"}}' + - '{"index": {"_index": "only_index", "_id": "3"}}' - '{"name": "doc3", "tag": [ "no-read", "tag-a"] }' - - '{"index": {"_index": "only_index", "_type": "doc", "_id": "4"}}' + - '{"index": {"_index": "only_index", "_id": "4"}}' - '{"name": "doc4", "tag": [ "no-read", "tag-b"] }' - - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "5"}}' + - '{"index": {"_index": "only_delete", "_id": "5"}}' - '{"name": "doc5", "tag": [ "no-read", "tag-a"] }' - - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "6"}}' + - '{"index": {"_index": "only_delete", "_id": "6"}}' - '{"name": "doc6", "tag": [ "no-read", "tag-b"] }' - - '{"index": {"_index": "read_write", "_type": "doc", "_id": "7"}}' + - '{"index": {"_index": "read_write", "_id": "7"}}' - '{"name": "doc7", "tag": [ "can-read", "tag-a" ] }' - - '{"index": {"_index": "read_write", "_type": "doc", "_id": "8"}}' + - '{"index": {"_index": "read_write", "_id": "8"}}' - '{"name": "doc8", "tag": [ "can-read", "tag-b"] }' - - '{"index": {"_index": "everything", "_type": "doc", "_id": "9"}}' + - '{"index": {"_index": "everything", "_id": "9"}}' - '{"name": "doc9", "tag": [ "can-read", "tag-a" ] }' - - '{"index": {"_index": "everything", "_type": "doc", "_id": "10"}}' + - '{"index": {"_index": "everything", "_id": "10"}}' - '{"name": "doc10", "tag": [ "can-read", "tag-b"] }' --- diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/30_dynamic_put_mapping.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/30_dynamic_put_mapping.yml index a0fbed057bc73..8176a276b0301 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/30_dynamic_put_mapping.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/30_dynamic_put_mapping.yml @@ -72,7 +72,6 @@ teardown: create: id: 1 index: write_alias - type: doc body: > { "name" : "doc1" @@ -83,7 +82,6 @@ teardown: create: id: 2 index: write_alias - type: doc body: > { "name2" : "doc2" diff --git a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml index fa8172697287e..1537d361a3afb 100644 --- a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml +++ b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml @@ -53,15 +53,15 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "local_index", "_type": "test_type"}}' + - '{"index": {"_index": "local_index"}}' - '{"f1": "local_cluster", "filter_field": 0}' - - '{"index": {"_index": "local_index", "_type": "test_type"}}' + - '{"index": {"_index": "local_index"}}' - '{"f1": "local_cluster", "filter_field": 1}' - - '{"index": {"_index": "local_index", "_type": "test_type"}}' + - '{"index": {"_index": "local_index"}}' - '{"f1": "local_cluster", "filter_field": 0}' - - '{"index": {"_index": "local_index", "_type": "test_type"}}' + - '{"index": {"_index": "local_index"}}' - '{"f1": "local_cluster", "filter_field": 1}' - - '{"index": {"_index": "local_index", "_type": "test_type"}}' + - '{"index": {"_index": "local_index"}}' - '{"f1": "local_cluster", "filter_field": 0}' - do: diff --git a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yml b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yml index 9bc4daa655c5f..d4d83937654fe 100644 --- a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yml +++ b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yml @@ -44,25 +44,24 @@ teardown: index: field_caps_index_2 body: mappings: - t: - properties: - text: - type: text - keyword: - type: keyword - number: - type: double - geo: - type: geo_point - object: - type: object - properties: - nested1 : - type : text - index: true - nested2: - type: float - doc_values: true + properties: + text: + type: text + keyword: + type: keyword + number: + type: double + geo: + type: geo_point + object: + type: object + properties: + nested1 : + type : text + index: true + nested2: + type: float + doc_values: true - do: headers: { Authorization: "Basic am9lOnMza3JpdA==" } diff --git a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/60_skip_shards.yml b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/60_skip_shards.yml index d74e82edca7f0..832b7deb8ec78 100644 --- a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/60_skip_shards.yml +++ b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/60_skip_shards.yml @@ -49,17 +49,16 @@ teardown: number_of_shards: 1 number_of_replicas: 0 mappings: - test_type: - properties: - created_at: - type: date - format: "yyyy-MM-dd" + properties: + created_at: + type: date + format: "yyyy-MM-dd" - do: bulk: refresh: true body: - - '{"index": {"_index": "skip_shards_index", "_type": "test_type"}}' + - '{"index": {"_index": "skip_shards_index"}}' - '{"f1": "local_cluster", "sort_field": 0, "created_at" : "2017-01-01"}' # check that we skip the remote shard diff --git a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml index f9a37dd829430..32549c586d384 100644 --- a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml +++ b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml @@ -40,17 +40,16 @@ setup: number_of_shards: 1 number_of_replicas: 0 mappings: - test_type: - properties: - created_at: - type: date - format: "yyyy-MM-dd" + properties: + created_at: + type: date + format: "yyyy-MM-dd" - do: bulk: refresh: true body: - - '{"index": {"_index": "single_doc_index", "_type": "test_type"}}' + - '{"index": {"_index": "single_doc_index"}}' - '{"f1": "remote_cluster", "sort_field": 1, "created_at" : "2016-01-01"}' - do: @@ -58,49 +57,47 @@ setup: index: field_caps_index_1 body: mappings: - t: - properties: - text: - type: text - keyword: - type: keyword - number: - type: double - geo: - type: geo_point - object: - type: object - properties: - nested1 : - type : text - index: false - nested2: - type: float - doc_values: false + properties: + text: + type: text + keyword: + type: keyword + number: + type: double + geo: + type: geo_point + object: + type: object + properties: + nested1 : + type : text + index: false + nested2: + type: float + doc_values: false - do: indices.create: index: field_caps_index_3 body: mappings: - t: - properties: - text: - type: text - keyword: - type: keyword - number: - type: long - geo: - type: keyword - object: - type: object - properties: - nested1 : - type : long - index: false - nested2: - type: keyword - doc_values: false + properties: + text: + type: text + keyword: + type: keyword + number: + type: long + geo: + type: keyword + object: + type: object + properties: + nested1 : + type : long + index: false + nested2: + type: keyword + doc_values: false - do: indices.create: @@ -130,19 +127,19 @@ setup: bulk: refresh: true body: - - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"index": {"_index": "test_index"}}' - '{"f1": "remote_cluster", "filter_field": 0}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"index": {"_index": "test_index"}}' - '{"f1": "remote_cluster", "filter_field": 1}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"index": {"_index": "test_index"}}' - '{"f1": "remote_cluster", "filter_field": 0}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"index": {"_index": "test_index"}}' - '{"f1": "remote_cluster", "filter_field": 1}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"index": {"_index": "test_index"}}' - '{"f1": "remote_cluster", "filter_field": 0}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"index": {"_index": "test_index"}}' - '{"f1": "remote_cluster", "filter_field": 0}' - - '{"index": {"_index": "secured_via_alias", "_type": "test_type"}}' + - '{"index": {"_index": "secured_via_alias"}}' - '{"f1": "remote_cluster", "secure": true}' diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml index 1d5f89667db65..0c5deab19068d 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml @@ -3,7 +3,6 @@ - do: get: index: scroll_index - type: doc id: 1 - set: {_source.value: scroll_id} diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml index 39864ae6d9cf0..928fb3a066c28 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml @@ -11,10 +11,9 @@ setup: index: airline-data body: mappings: - response: - properties: - time: - type: date + properties: + time: + type: date --- "Test old and mixed cluster datafeeds": From 03a1d2107069123e6a7f8319e4cccffaea7eb9f2 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 1 Feb 2019 20:46:14 +0100 Subject: [PATCH 49/54] SnapshotShardsService Simplifications (#38025) * Instead of replacing the `shardSnapshots` field, we mutate it, explicitly removing entries from it in only a single spot * Decreased the amount of indirection by moving all logic for starting a snapshot's newly discovered shard tasks into `startNewShards` (saves us two maps (keyed by snapshot) and iterations over them) --- .../snapshots/SnapshotShardsService.java | 291 ++++++++---------- .../snapshots/SnapshotsService.java | 2 +- 2 files changed, 127 insertions(+), 166 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 132b269b196e0..fdcf22a080ecb 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -76,18 +76,15 @@ import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.Executor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.Condition; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; import java.util.function.Function; import java.util.stream.Collectors; import static java.util.Collections.emptyMap; -import static java.util.Collections.unmodifiableMap; +import static java.util.Collections.unmodifiableList; import static org.elasticsearch.cluster.SnapshotsInProgress.completed; /** @@ -109,11 +106,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements private final ThreadPool threadPool; - private final Lock shutdownLock = new ReentrantLock(); - - private final Condition shutdownCondition = shutdownLock.newCondition(); - - private volatile Map> shardSnapshots = emptyMap(); + private final Map> shardSnapshots = new HashMap<>(); // A map of snapshots to the shardIds that we already reported to the master as failed private final TransportRequestDeduplicator remoteFailedRequestDeduplicator = @@ -149,16 +142,6 @@ protected void doStart() { @Override protected void doStop() { - shutdownLock.lock(); - try { - while(!shardSnapshots.isEmpty() && shutdownCondition.await(5, TimeUnit.SECONDS)) { - // Wait for at most 5 second for locally running snapshots to finish - } - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } finally { - shutdownLock.unlock(); - } } @Override @@ -173,7 +156,9 @@ public void clusterChanged(ClusterChangedEvent event) { SnapshotsInProgress currentSnapshots = event.state().custom(SnapshotsInProgress.TYPE); if ((previousSnapshots == null && currentSnapshots != null) || (previousSnapshots != null && previousSnapshots.equals(currentSnapshots) == false)) { - processIndexShardSnapshots(event); + synchronized (shardSnapshots) { + processIndexShardSnapshots(currentSnapshots); + } } String previousMasterNodeId = event.previousState().nodes().getMasterNodeId(); @@ -190,13 +175,14 @@ public void clusterChanged(ClusterChangedEvent event) { @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { // abort any snapshots occurring on the soon-to-be closed shard - Map> snapshotShardsMap = shardSnapshots; - for (Map.Entry> snapshotShards : snapshotShardsMap.entrySet()) { - Map shards = snapshotShards.getValue(); - if (shards.containsKey(shardId)) { - logger.debug("[{}] shard closing, abort snapshotting for snapshot [{}]", - shardId, snapshotShards.getKey().getSnapshotId()); - shards.get(shardId).abortIfNotCompleted("shard is closing, aborting"); + synchronized (shardSnapshots) { + for (Map.Entry> snapshotShards : shardSnapshots.entrySet()) { + Map shards = snapshotShards.getValue(); + if (shards.containsKey(shardId)) { + logger.debug("[{}] shard closing, abort snapshotting for snapshot [{}]", + shardId, snapshotShards.getKey().getSnapshotId()); + shards.get(shardId).abortIfNotCompleted("shard is closing, aborting"); + } } } } @@ -211,169 +197,147 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh * @return map of shard id to snapshot status */ public Map currentSnapshotShards(Snapshot snapshot) { - return shardSnapshots.get(snapshot); + synchronized (shardSnapshots) { + final Map current = shardSnapshots.get(snapshot); + return current == null ? null : new HashMap<>(current); + } } /** * Checks if any new shards should be snapshotted on this node * - * @param event cluster state changed event + * @param snapshotsInProgress Current snapshots in progress in cluster state */ - private void processIndexShardSnapshots(ClusterChangedEvent event) { - SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE); - Map> survivors = new HashMap<>(); + private void processIndexShardSnapshots(SnapshotsInProgress snapshotsInProgress) { + cancelRemoved(snapshotsInProgress); + if (snapshotsInProgress != null) { + startNewSnapshots(snapshotsInProgress); + } + } + + private void cancelRemoved(@Nullable SnapshotsInProgress snapshotsInProgress) { // First, remove snapshots that are no longer there - for (Map.Entry> entry : shardSnapshots.entrySet()) { + Iterator>> it = shardSnapshots.entrySet().iterator(); + while (it.hasNext()) { + final Map.Entry> entry = it.next(); final Snapshot snapshot = entry.getKey(); - if (snapshotsInProgress != null && snapshotsInProgress.snapshot(snapshot) != null) { - survivors.put(entry.getKey(), entry.getValue()); - } else { + if (snapshotsInProgress == null || snapshotsInProgress.snapshot(snapshot) == null) { // abort any running snapshots of shards for the removed entry; // this could happen if for some reason the cluster state update for aborting // running shards is missed, then the snapshot is removed is a subsequent cluster // state update, which is being processed here + it.remove(); for (IndexShardSnapshotStatus snapshotStatus : entry.getValue().values()) { snapshotStatus.abortIfNotCompleted("snapshot has been removed in cluster state, aborting"); } } } + } + private void startNewSnapshots(SnapshotsInProgress snapshotsInProgress) { // For now we will be mostly dealing with a single snapshot at a time but might have multiple simultaneously running // snapshots in the future - Map> newSnapshots = new HashMap<>(); // Now go through all snapshots and update existing or create missing - final String localNodeId = event.state().nodes().getLocalNodeId(); - final Map> snapshotIndices = new HashMap<>(); - if (snapshotsInProgress != null) { - for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { - snapshotIndices.put(entry.snapshot(), - entry.indices().stream().collect(Collectors.toMap(IndexId::getName, Function.identity()))); - if (entry.state() == State.STARTED) { - Map startedShards = new HashMap<>(); - Map snapshotShards = shardSnapshots.get(entry.snapshot()); - for (ObjectObjectCursor shard : entry.shards()) { - // Add all new shards to start processing on - if (localNodeId.equals(shard.value.nodeId())) { - if (shard.value.state() == State.INIT && (snapshotShards == null || !snapshotShards.containsKey(shard.key))) { - logger.trace("[{}] - Adding shard to the queue", shard.key); - startedShards.put(shard.key, IndexShardSnapshotStatus.newInitializing()); - } + final String localNodeId = clusterService.localNode().getId(); + for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { + final State entryState = entry.state(); + if (entryState == State.STARTED) { + Map startedShards = null; + final Snapshot snapshot = entry.snapshot(); + Map snapshotShards = shardSnapshots.getOrDefault(snapshot, emptyMap()); + for (ObjectObjectCursor shard : entry.shards()) { + // Add all new shards to start processing on + final ShardId shardId = shard.key; + final ShardSnapshotStatus shardSnapshotStatus = shard.value; + if (localNodeId.equals(shardSnapshotStatus.nodeId()) && shardSnapshotStatus.state() == State.INIT + && snapshotShards.containsKey(shardId) == false) { + logger.trace("[{}] - Adding shard to the queue", shardId); + if (startedShards == null) { + startedShards = new HashMap<>(); } + startedShards.put(shardId, IndexShardSnapshotStatus.newInitializing()); } - if (!startedShards.isEmpty()) { - newSnapshots.put(entry.snapshot(), startedShards); - if (snapshotShards != null) { - // We already saw this snapshot but we need to add more started shards - Map shards = new HashMap<>(); - // Put all shards that were already running on this node - shards.putAll(snapshotShards); - // Put all newly started shards - shards.putAll(startedShards); - survivors.put(entry.snapshot(), unmodifiableMap(shards)); - } else { - // Brand new snapshot that we haven't seen before - survivors.put(entry.snapshot(), unmodifiableMap(startedShards)); - } - } - } else if (entry.state() == State.ABORTED) { - // Abort all running shards for this snapshot - Map snapshotShards = shardSnapshots.get(entry.snapshot()); - if (snapshotShards != null) { - for (ObjectObjectCursor shard : entry.shards()) { - final IndexShardSnapshotStatus snapshotStatus = snapshotShards.get(shard.key); - if (snapshotStatus != null) { - final IndexShardSnapshotStatus.Copy lastSnapshotStatus = - snapshotStatus.abortIfNotCompleted("snapshot has been aborted"); - final Stage stage = lastSnapshotStatus.getStage(); - if (stage == Stage.FINALIZE) { - logger.debug("[{}] trying to cancel snapshot on shard [{}] that is finalizing, " + - "letting it finish", entry.snapshot(), shard.key); - - } else if (stage == Stage.DONE) { - logger.debug("[{}] trying to cancel snapshot on the shard [{}] that is already done, " + - "updating status on the master", entry.snapshot(), shard.key); - notifySuccessfulSnapshotShard(entry.snapshot(), shard.key, localNodeId); - - } else if (stage == Stage.FAILURE) { - logger.debug("[{}] trying to cancel snapshot on the shard [{}] that has already failed, " + - "updating status on the master", entry.snapshot(), shard.key); - notifyFailedSnapshotShard(entry.snapshot(), shard.key, localNodeId, lastSnapshotStatus.getFailure()); - } - } + } + if (startedShards != null && startedShards.isEmpty() == false) { + shardSnapshots.computeIfAbsent(snapshot, s -> new HashMap<>()).putAll(startedShards); + startNewShards(entry, startedShards); + } + } else if (entryState == State.ABORTED) { + // Abort all running shards for this snapshot + final Snapshot snapshot = entry.snapshot(); + Map snapshotShards = shardSnapshots.getOrDefault(snapshot, emptyMap()); + for (ObjectObjectCursor shard : entry.shards()) { + final IndexShardSnapshotStatus snapshotStatus = snapshotShards.get(shard.key); + if (snapshotStatus != null) { + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = + snapshotStatus.abortIfNotCompleted("snapshot has been aborted"); + final Stage stage = lastSnapshotStatus.getStage(); + if (stage == Stage.FINALIZE) { + logger.debug("[{}] trying to cancel snapshot on shard [{}] that is finalizing, " + + "letting it finish", snapshot, shard.key); + } else if (stage == Stage.DONE) { + logger.debug("[{}] trying to cancel snapshot on the shard [{}] that is already done, " + + "updating status on the master", snapshot, shard.key); + notifySuccessfulSnapshotShard(snapshot, shard.key); + } else if (stage == Stage.FAILURE) { + logger.debug("[{}] trying to cancel snapshot on the shard [{}] that has already failed, " + + "updating status on the master", snapshot, shard.key); + notifyFailedSnapshotShard(snapshot, shard.key, lastSnapshotStatus.getFailure()); } } else { - final Snapshot snapshot = entry.snapshot(); for (ObjectObjectCursor curr : entry.shards()) { // due to CS batching we might have missed the INIT state and straight went into ABORTED // notify master that abort has completed by moving to FAILED if (curr.value.state() == State.ABORTED) { - notifyFailedSnapshotShard(snapshot, curr.key, localNodeId, curr.value.reason()); + notifyFailedSnapshotShard(snapshot, curr.key, curr.value.reason()); } } } } } } + } - // Update the list of snapshots that we saw and tried to started - // If startup of these shards fails later, we don't want to try starting these shards again - shutdownLock.lock(); - try { - shardSnapshots = unmodifiableMap(survivors); - if (shardSnapshots.isEmpty()) { - // Notify all waiting threads that no more snapshots - shutdownCondition.signalAll(); - } - } finally { - shutdownLock.unlock(); - } + private void startNewShards(SnapshotsInProgress.Entry entry, Map startedShards) { + final Snapshot snapshot = entry.snapshot(); + final Map indicesMap = entry.indices().stream().collect(Collectors.toMap(IndexId::getName, Function.identity())); + final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); + for (final Map.Entry shardEntry : startedShards.entrySet()) { + final ShardId shardId = shardEntry.getKey(); + final IndexId indexId = indicesMap.get(shardId.getIndexName()); + assert indexId != null; + executor.execute(new AbstractRunnable() { - // We have new shards to starts - if (newSnapshots.isEmpty() == false) { - Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); - for (final Map.Entry> entry : newSnapshots.entrySet()) { - final Snapshot snapshot = entry.getKey(); - final Map indicesMap = snapshotIndices.get(snapshot); - assert indicesMap != null; - - for (final Map.Entry shardEntry : entry.getValue().entrySet()) { - final ShardId shardId = shardEntry.getKey(); - final IndexId indexId = indicesMap.get(shardId.getIndexName()); - executor.execute(new AbstractRunnable() { - - final SetOnce failure = new SetOnce<>(); - - @Override - public void doRun() { - final IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()); - assert indexId != null; - snapshot(indexShard, snapshot, indexId, shardEntry.getValue()); - } + private final SetOnce failure = new SetOnce<>(); - @Override - public void onFailure(Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", - shardId, snapshot), e); - failure.set(e); - } + @Override + public void doRun() { + final IndexShard indexShard = + indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()); + snapshot(indexShard, snapshot, indexId, shardEntry.getValue()); + } - @Override - public void onRejection(Exception e) { - failure.set(e); - } + @Override + public void onFailure(Exception e) { + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", shardId, snapshot), e); + failure.set(e); + } - @Override - public void onAfter() { - final Exception exception = failure.get(); - if (exception != null) { - notifyFailedSnapshotShard(snapshot, shardId, localNodeId, ExceptionsHelper.detailedMessage(exception)); - } else { - notifySuccessfulSnapshotShard(snapshot, shardId, localNodeId); - } - } - }); + @Override + public void onRejection(Exception e) { + failure.set(e); } - } + + @Override + public void onAfter() { + final Exception exception = failure.get(); + if (exception != null) { + notifyFailedSnapshotShard(snapshot, shardId, ExceptionsHelper.detailedMessage(exception)); + } else { + notifySuccessfulSnapshotShard(snapshot, shardId); + } + } + }); } } @@ -427,7 +391,6 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { return; } - final String localNodeId = event.state().nodes().getLocalNodeId(); for (SnapshotsInProgress.Entry snapshot : snapshotsInProgress.entries()) { if (snapshot.state() == State.STARTED || snapshot.state() == State.ABORTED) { Map localShards = currentSnapshotShards(snapshot.snapshot()); @@ -444,13 +407,13 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { // but we think the shard is done - we need to make new master know that the shard is done logger.debug("[{}] new master thinks the shard [{}] is not completed but the shard is done locally, " + "updating status on the master", snapshot.snapshot(), shardId); - notifySuccessfulSnapshotShard(snapshot.snapshot(), shardId, localNodeId); + notifySuccessfulSnapshotShard(snapshot.snapshot(), shardId); } else if (stage == Stage.FAILURE) { // but we think the shard failed - we need to make new master know that the shard failed logger.debug("[{}] new master thinks the shard [{}] is not completed but the shard failed locally, " + "updating status on master", snapshot.snapshot(), shardId); - notifyFailedSnapshotShard(snapshot.snapshot(), shardId, localNodeId, indexShardSnapshotStatus.getFailure()); + notifyFailedSnapshotShard(snapshot.snapshot(), shardId, indexShardSnapshotStatus.getFailure()); } } } @@ -519,13 +482,13 @@ public String toString() { } /** Notify the master node that the given shard has been successfully snapshotted **/ - void notifySuccessfulSnapshotShard(final Snapshot snapshot, final ShardId shardId, final String localNodeId) { - sendSnapshotShardUpdate(snapshot, shardId, new ShardSnapshotStatus(localNodeId, State.SUCCESS)); + private void notifySuccessfulSnapshotShard(final Snapshot snapshot, final ShardId shardId) { + sendSnapshotShardUpdate(snapshot, shardId, new ShardSnapshotStatus(clusterService.localNode().getId(), State.SUCCESS)); } /** Notify the master node that the given shard failed to be snapshotted **/ - void notifyFailedSnapshotShard(final Snapshot snapshot, final ShardId shardId, final String localNodeId, final String failure) { - sendSnapshotShardUpdate(snapshot, shardId, new ShardSnapshotStatus(localNodeId, State.FAILED, failure)); + private void notifyFailedSnapshotShard(final Snapshot snapshot, final ShardId shardId, final String failure) { + sendSnapshotShardUpdate(snapshot, shardId, new ShardSnapshotStatus(clusterService.localNode().getId(), State.FAILED, failure)); } /** Updates the shard snapshot status by sending a {@link UpdateIndexShardSnapshotStatusRequest} to the master node */ @@ -585,11 +548,11 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS }); } - class SnapshotStateExecutor implements ClusterStateTaskExecutor { + private class SnapshotStateExecutor implements ClusterStateTaskExecutor { @Override public ClusterTasksResult - execute(ClusterState currentState, List tasks) throws Exception { + execute(ClusterState currentState, List tasks) { final SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); if (snapshots != null) { int changedCount = 0; @@ -628,11 +591,9 @@ class SnapshotStateExecutor implements ClusterStateTaskExecutor 0) { logger.trace("changed cluster state triggered by {} snapshot state updates", changedCount); - - final SnapshotsInProgress updatedSnapshots = - new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()])); - return ClusterTasksResult.builder().successes(tasks).build( - ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, updatedSnapshots).build()); + return ClusterTasksResult.builder().successes(tasks) + .build(ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, + new SnapshotsInProgress(unmodifiableList(entries))).build()); } } return ClusterTasksResult.builder().successes(tasks).build(currentState); @@ -665,7 +626,7 @@ protected UpdateIndexShardSnapshotStatusResponse newResponse() { @Override protected void masterOperation(UpdateIndexShardSnapshotStatusRequest request, ClusterState state, - ActionListener listener) throws Exception { + ActionListener listener) { innerUpdateSnapshotState(request, listener); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index c5b478fa908a9..df3d0f16f9188 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -98,7 +98,7 @@ * the {@link #beginSnapshot(ClusterState, SnapshotsInProgress.Entry, boolean, ActionListener)} method kicks in and initializes * the snapshot in the repository and then populates list of shards that needs to be snapshotted in cluster state *
  • Each data node is watching for these shards and when new shards scheduled for snapshotting appear in the cluster state, data nodes - * start processing them through {@link SnapshotShardsService#processIndexShardSnapshots(ClusterChangedEvent)} method
  • + * start processing them through {@link SnapshotShardsService#processIndexShardSnapshots(SnapshotsInProgress)} method *
  • Once shard snapshot is created data node updates state of the shard in the cluster state using * the {@link SnapshotShardsService#sendSnapshotShardUpdate(Snapshot, ShardId, ShardSnapshotStatus)} method
  • *
  • When last shard is completed master node in {@link SnapshotShardsService#innerUpdateSnapshotState} method marks the snapshot From 3ecdfe106076d9820ca86ee4369b69e708ec9088 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 1 Feb 2019 15:44:39 -0500 Subject: [PATCH 50/54] Enable trace log in FollowerFailOverIT (#38148) This suite still fails one per week sometimes with a worrying assertion. Sadly we are still unable to find the actual source. Expected: but: was This change enables trace log in the suite so we will have a better picture if this fails again. Relates #3333 --- .../bulk/TransportBulkShardOperationsAction.java | 10 ++++++++++ .../xpack/ccr/index/engine/FollowingEngine.java | 3 +++ .../elasticsearch/xpack/CcrIntegTestCase.java | 16 +++++++++------- .../xpack/ccr/FollowerFailOverIT.java | 2 ++ 4 files changed, 24 insertions(+), 7 deletions(-) diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java index 4a4b4648776b1..9afc57309cccc 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java @@ -63,6 +63,9 @@ public TransportBulkShardOperationsAction( @Override protected WritePrimaryResult shardOperationOnPrimary( final BulkShardOperationsRequest request, final IndexShard primary) throws Exception { + if (logger.isTraceEnabled()) { + logger.trace("index [{}] on the following primary shard {}", request.getOperations(), primary.routingEntry()); + } return shardOperationOnPrimary(request.shardId(), request.getHistoryUUID(), request.getOperations(), request.getMaxSeqNoOfUpdatesOrDeletes(), primary, logger); } @@ -134,6 +137,10 @@ public static CcrWritePrimaryResult shardOperationOnPrimary( // replicated to replicas but with the existing primary term (not the current primary term) in order // to guarantee the consistency between the primary and replicas, and between translog and Lucene index. final AlreadyProcessedFollowingEngineException failure = (AlreadyProcessedFollowingEngineException) result.getFailure(); + if (logger.isTraceEnabled()) { + logger.trace("operation [{}] was processed before on following primary shard {} with existing term {}", + targetOp, primary.routingEntry(), failure.getExistingPrimaryTerm()); + } assert failure.getSeqNo() == targetOp.seqNo() : targetOp.seqNo() + " != " + failure.getSeqNo(); if (failure.getExistingPrimaryTerm().isPresent()) { appliedOperations.add(rewriteOperationWithPrimaryTerm(sourceOp, failure.getExistingPrimaryTerm().getAsLong())); @@ -156,6 +163,9 @@ public static CcrWritePrimaryResult shardOperationOnPrimary( @Override protected WriteReplicaResult shardOperationOnReplica( final BulkShardOperationsRequest request, final IndexShard replica) throws Exception { + if (logger.isTraceEnabled()) { + logger.trace("index [{}] on the following replica shard {}", request.getOperations(), replica.routingEntry()); + } return shardOperationOnReplica(request, replica, logger); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java index b7086ed876db7..23157c177816f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java @@ -72,6 +72,9 @@ protected InternalEngine.IndexingStrategy indexingStrategyForOperation(final Ind final long maxSeqNoOfUpdatesOrDeletes = getMaxSeqNoOfUpdatesOrDeletes(); assert maxSeqNoOfUpdatesOrDeletes != SequenceNumbers.UNASSIGNED_SEQ_NO : "max_seq_no_of_updates is not initialized"; if (hasBeenProcessedBefore(index)) { + if (logger.isTraceEnabled()) { + logger.trace("index operation [id={} seq_no={} origin={}] was processed before", index.id(), index.seqNo(), index.origin()); + } if (index.origin() == Operation.Origin.PRIMARY) { /* * The existing operation in this engine was probably assigned the term of the previous primary shard which is different diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index 3a13027cb3511..2f34315b46e69 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -444,6 +444,13 @@ public static ResumeFollowAction.Request resumeFollow(String followerIndex) { * on the follower equal the leader's; then verifies the existing pairs of (docId, seqNo) on the follower also equal the leader. */ protected void assertIndexFullyReplicatedToFollower(String leaderIndex, String followerIndex) throws Exception { + logger.info("--> asserting <> between {} and {}", leaderIndex, followerIndex); + assertBusy(() -> { + Map> docsOnFollower = getDocIdAndSeqNos(clusterGroup.followerCluster, followerIndex); + logger.info("--> docs on the follower {}", docsOnFollower); + assertThat(docsOnFollower, equalTo(getDocIdAndSeqNos(clusterGroup.leaderCluster, leaderIndex))); + }, 120, TimeUnit.SECONDS); + logger.info("--> asserting seq_no_stats between {} and {}", leaderIndex, followerIndex); assertBusy(() -> { Map leaderStats = new HashMap<>(); @@ -460,13 +467,8 @@ protected void assertIndexFullyReplicatedToFollower(String leaderIndex, String f } followerStats.put(shardStat.getShardRouting().shardId().id(), shardStat.getSeqNoStats()); } - assertThat(leaderStats, equalTo(followerStats)); - }, 60, TimeUnit.SECONDS); - logger.info("--> asserting <> between {} and {}", leaderIndex, followerIndex); - assertBusy(() -> { - assertThat(getDocIdAndSeqNos(clusterGroup.leaderCluster, leaderIndex), - equalTo(getDocIdAndSeqNos(clusterGroup.followerCluster, followerIndex))); - }, 60, TimeUnit.SECONDS); + assertThat(followerStats, equalTo(leaderStats)); + }, 120, TimeUnit.SECONDS); } private Map> getDocIdAndSeqNos(InternalTestCluster cluster, String index) throws IOException { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java index 73cc94b4703a9..440a5fbc37e1e 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.CcrIntegTestCase; import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; @@ -44,6 +45,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; +@TestLogging("org.elasticsearch.xpack.ccr:TRACE,org.elasticsearch.index.shard:DEBUG") public class FollowerFailOverIT extends CcrIntegTestCase { @Override From bae656dceafb57278cd5ad1f0f933c4e6c6cc124 Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Fri, 1 Feb 2019 13:16:34 -0800 Subject: [PATCH 51/54] Preserve ILM operation mode when creating new lifecycles (#38134) There was a bug where creating a new policy would start the ILM service, even if it was stopped. This change ensures that there is no change to the existing operation mode --- .../action/TransportPutLifecycleAction.java | 3 +- .../IndexLifecycleInitialisationTests.java | 37 +++++++++++++++++++ 2 files changed, 38 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportPutLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportPutLifecycleAction.java index 61f9be3558fa7..d94de3e201a46 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportPutLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportPutLifecycleAction.java @@ -23,7 +23,6 @@ import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; -import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction; import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction.Request; import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction.Response; @@ -93,7 +92,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { } else { logger.info("updating index lifecycle policy [{}]", request.getPolicy().getName()); } - IndexLifecycleMetadata newMetadata = new IndexLifecycleMetadata(newPolicies, OperationMode.RUNNING); + IndexLifecycleMetadata newMetadata = new IndexLifecycleMetadata(newPolicies, currentMetadata.getOperationMode()); newState.metaData(MetaData.builder(currentState.getMetaData()) .putCustom(IndexLifecycleMetadata.TYPE, newMetadata).build()); return newState.build(); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java index a041232d8a7e7..a1a37beb1d129 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java @@ -37,13 +37,17 @@ import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; import org.elasticsearch.xpack.core.indexlifecycle.MockAction; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; import org.elasticsearch.xpack.core.indexlifecycle.Phase; import org.elasticsearch.xpack.core.indexlifecycle.PhaseExecutionInfo; import org.elasticsearch.xpack.core.indexlifecycle.Step; +import org.elasticsearch.xpack.core.indexlifecycle.StopILMRequest; import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; import org.elasticsearch.xpack.core.indexlifecycle.action.ExplainLifecycleAction; import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetStatusAction; import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.StopILMAction; import org.junit.Before; import java.io.IOException; @@ -364,6 +368,39 @@ public void testMasterFailover() throws Exception { }); } + public void testCreatePolicyWhenStopped() throws Exception { + // start master node + logger.info("Starting server1"); + final String server_1 = internalCluster().startNode(); + final String node1 = getLocalNodeId(server_1); + + assertAcked(client().execute(StopILMAction.INSTANCE, new StopILMRequest()).get()); + assertBusy(() -> assertThat( + client().execute(GetStatusAction.INSTANCE, new GetStatusAction.Request()).get().getMode(), + equalTo(OperationMode.STOPPED))); + + logger.info("Creating lifecycle [test_lifecycle]"); + PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); + long lowerBoundModifiedDate = Instant.now().toEpochMilli(); + PutLifecycleAction.Response putLifecycleResponse = client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get(); + assertAcked(putLifecycleResponse); + long upperBoundModifiedDate = Instant.now().toEpochMilli(); + + // assert version and modified_date + GetLifecycleAction.Response getLifecycleResponse = client().execute(GetLifecycleAction.INSTANCE, + new GetLifecycleAction.Request()).get(); + assertThat(getLifecycleResponse.getPolicies().size(), equalTo(1)); + GetLifecycleAction.LifecyclePolicyResponseItem responseItem = getLifecycleResponse.getPolicies().get(0); + assertThat(responseItem.getLifecyclePolicy(), equalTo(lifecyclePolicy)); + assertThat(responseItem.getVersion(), equalTo(1L)); + long actualModifiedDate = Instant.parse(responseItem.getModifiedDate()).toEpochMilli(); + assertThat(actualModifiedDate, + is(both(greaterThanOrEqualTo(lowerBoundModifiedDate)).and(lessThanOrEqualTo(upperBoundModifiedDate)))); + // assert ILM is still stopped + GetStatusAction.Response statusResponse = client().execute(GetStatusAction.INSTANCE, new GetStatusAction.Request()).get(); + assertThat(statusResponse.getMode(), equalTo(OperationMode.STOPPED)); + } + public void testPollIntervalUpdate() throws Exception { TimeValue pollInterval = TimeValue.timeValueSeconds(randomLongBetween(1, 5)); final String server_1 = internalCluster().startMasterOnlyNode( From 9c39dea7ae0c623a37313e399f4e81a534b10279 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 1 Feb 2019 16:24:02 -0500 Subject: [PATCH 52/54] AwaitsFix testAbortedSnapshotDuringInitDoesNotStart (#38227) Tracked at #38226 --- .../elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index d633493622dcd..2c759ec3dd706 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -3637,6 +3637,7 @@ public void testParallelRestoreOperationsFromSingleSnapshot() throws Exception { } @TestLogging("org.elasticsearch.snapshots:TRACE") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/38226") public void testAbortedSnapshotDuringInitDoesNotStart() throws Exception { final Client client = client(); From 78a65c340d195e99a533ce57b6c263d1bb73dd31 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Fri, 1 Feb 2019 23:56:35 +0200 Subject: [PATCH 53/54] Correctly disable tests for FIPS JVMs (#38214) Replace assertFalse with assumeFalse Resolves: #38212 --- .../elasticsearch/index/reindex/ReindexRestClientSslTests.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRestClientSslTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRestClientSslTests.java index 87ab4b3241410..7c94b94bbb1d5 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRestClientSslTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRestClientSslTests.java @@ -143,9 +143,8 @@ public void testClientSucceedsWithCertificateAuthorities() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/38212") public void testClientSucceedsWithVerificationDisabled() throws IOException { - assertFalse("Cannot disable verification in FIPS JVM", inFipsJvm()); + assumeFalse("Cannot disable verification in FIPS JVM", inFipsJvm()); final List threads = new ArrayList<>(); final Settings settings = Settings.builder() .put("path.home", createTempDir()) From f181e170387ca50727b750605fad51a3e85fed3a Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 1 Feb 2019 17:19:19 -0500 Subject: [PATCH 54/54] Introduce retention leases versioning (#37951) Because concurrent sync requests from a primary to its replicas could be in flight, it can be the case that an older retention leases collection arrives and is processed on the replica after a newer retention leases collection has arrived and been processed. Without a defense, in this case the replica would overwrite the newer retention leases with the older retention leases. This commit addresses this issue by introducing a versioning scheme to retention leases. This versioning scheme is used to resolve out-of-order processing on the replica. We persist this version into Lucene and restore it on recovery. The encoding of retention leases is starting to get a little ugly. We can consider addressing this in a follow-up. --- build.gradle | 2 +- docs/reference/indices/flush.asciidoc | 4 +- .../index/engine/EngineConfig.java | 9 +- .../index/engine/InternalEngine.java | 6 +- .../index/engine/SoftDeletesPolicy.java | 11 +- .../index/seqno/ReplicationTracker.java | 70 ++--- .../index/seqno/RetentionLease.java | 52 +--- .../index/seqno/RetentionLeaseStats.java | 33 +-- .../index/seqno/RetentionLeaseSyncAction.java | 13 +- .../index/seqno/RetentionLeaseSyncer.java | 4 +- .../index/seqno/RetentionLeases.java | 253 ++++++++++++++++++ .../elasticsearch/index/shard/IndexShard.java | 14 +- .../engine/CombinedDeletionPolicyTests.java | 12 +- .../index/engine/InternalEngineTests.java | 59 ++-- .../index/engine/SoftDeletesPolicyTests.java | 19 +- ...ReplicationTrackerRetentionLeaseTests.java | 144 ++++++++-- .../index/seqno/ReplicationTrackerTests.java | 2 +- .../index/seqno/RetentionLeaseStatsTests.java | 2 +- ...tentionLeaseStatsWireSerializingTests.java | 4 +- .../seqno/RetentionLeaseSyncActionTests.java | 16 +- .../index/seqno/RetentionLeaseSyncIT.java | 25 +- .../index/seqno/RetentionLeaseTests.java | 25 -- .../index/seqno/RetentionLeasesTests.java | 95 +++++++ .../shard/IndexShardRetentionLeaseTests.java | 85 +++--- .../index/shard/RefreshListenersTests.java | 31 ++- .../index/engine/EngineTestCase.java | 13 +- .../index/engine/FollowingEngineTests.java | 3 +- 27 files changed, 729 insertions(+), 277 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java create mode 100644 server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java diff --git a/build.gradle b/build.gradle index 42a4a42002222..22505ed69a66d 100644 --- a/build.gradle +++ b/build.gradle @@ -160,7 +160,7 @@ task verifyVersions { * after the backport of the backcompat code is complete. */ final boolean bwc_tests_enabled = false -final String bwc_tests_disabled_issue = "backport of#37977, #37857 and #37872" /* place a PR link here when committing bwc changes */ +final String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/37951" /* place a PR link here when committing bwc changes */ if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index fdfcd80ecd463..ea8667aa1b713 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -104,7 +104,7 @@ which returns something similar to: "sync_id" : "AVvFY-071siAOuFGEO9P", <1> "max_unsafe_auto_id_timestamp" : "-1", "min_retained_seq_no" : "0", - "retention_leases" : "id:replica-0;retaining_seq_no:0;timestamp:1547235588;source:replica" + "retention_leases" : "primary_term:1;version:1;id:replica-0;retaining_seq_no:0;timestamp:1547235588;source:replica" }, "num_docs" : 0 } @@ -119,7 +119,7 @@ which returns something similar to: // TESTRESPONSE[s/"translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA"/"translog_uuid": $body.indices.twitter.shards.0.0.commit.user_data.translog_uuid/] // TESTRESPONSE[s/"history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ"/"history_uuid": $body.indices.twitter.shards.0.0.commit.user_data.history_uuid/] // TESTRESPONSE[s/"sync_id" : "AVvFY-071siAOuFGEO9P"/"sync_id": $body.indices.twitter.shards.0.0.commit.user_data.sync_id/] -// TESTRESPONSE[s/"retention_leases" : "id:replica-0;retaining_seq_no:0;timestamp:1547235588;source:replica"/"retention_leases": $body.indices.twitter.shards.0.0.commit.user_data.retention_leases/] +// TESTRESPONSE[s/"retention_leases" : "primary_term:1;version:1;id:replica-0;retaining_seq_no:0;timestamp:1547235588;source:replica"/"retention_leases": $body.indices.twitter.shards.0.0.commit.user_data.retention_leases/] <1> the `sync id` marker [float] diff --git a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 1cc92319b5e45..7716cf93ffd6b 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -35,7 +35,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.seqno.RetentionLease; +import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.TranslogConfig; @@ -43,7 +43,6 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.threadpool.ThreadPool; -import java.util.Collection; import java.util.List; import java.util.Objects; import java.util.function.LongSupplier; @@ -81,7 +80,7 @@ public final class EngineConfig { @Nullable private final CircuitBreakerService circuitBreakerService; private final LongSupplier globalCheckpointSupplier; - private final Supplier> retentionLeasesSupplier; + private final Supplier retentionLeasesSupplier; /** * A supplier of the outstanding retention leases. This is used during merged operations to determine which operations that have been @@ -89,7 +88,7 @@ public final class EngineConfig { * * @return a supplier of outstanding retention leases */ - public Supplier> retentionLeasesSupplier() { + public Supplier retentionLeasesSupplier() { return retentionLeasesSupplier; } @@ -141,7 +140,7 @@ public EngineConfig(ShardId shardId, String allocationId, ThreadPool threadPool, List externalRefreshListener, List internalRefreshListener, Sort indexSort, CircuitBreakerService circuitBreakerService, LongSupplier globalCheckpointSupplier, - Supplier> retentionLeasesSupplier, + Supplier retentionLeasesSupplier, LongSupplier primaryTermSupplier, TombstoneDocSupplier tombstoneDocSupplier) { this.shardId = shardId; diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index bb2da0947363d..acedd8356ea9e 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -75,7 +75,7 @@ import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.OnGoingMerge; import org.elasticsearch.index.seqno.LocalCheckpointTracker; -import org.elasticsearch.index.seqno.RetentionLease; +import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ElasticsearchMergePolicy; @@ -2348,9 +2348,9 @@ protected void commitIndexWriter(final IndexWriter writer, final Translog transl * We sample these from the policy (which occurs under a lock) to ensure that we have a consistent view of the minimum * retained sequence number, and the retention leases. */ - final Tuple> retentionPolicy = softDeletesPolicy.getRetentionPolicy(); + final Tuple retentionPolicy = softDeletesPolicy.getRetentionPolicy(); commitData.put(Engine.MIN_RETAINED_SEQNO, Long.toString(retentionPolicy.v1())); - commitData.put(Engine.RETENTION_LEASES, RetentionLease.encodeRetentionLeases(retentionPolicy.v2())); + commitData.put(Engine.RETENTION_LEASES, RetentionLeases.encodeRetentionLeases(retentionPolicy.v2())); } logger.trace("committing writer with commit data [{}]", commitData); return commitData.entrySet().iterator(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/SoftDeletesPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/SoftDeletesPolicy.java index 17ec9a172e384..49b8f9d3483f2 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/SoftDeletesPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/SoftDeletesPolicy.java @@ -25,10 +25,10 @@ import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.seqno.RetentionLease; +import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.translog.Translog; -import java.util.Collection; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.LongSupplier; @@ -47,13 +47,13 @@ final class SoftDeletesPolicy { // The min seq_no value that is retained - ops after this seq# should exist in the Lucene index. private long minRetainedSeqNo; // provides the retention leases used to calculate the minimum sequence number to retain - private final Supplier> retentionLeasesSupplier; + private final Supplier retentionLeasesSupplier; SoftDeletesPolicy( final LongSupplier globalCheckpointSupplier, final long minRetainedSeqNo, final long retentionOperations, - final Supplier> retentionLeasesSupplier) { + final Supplier retentionLeasesSupplier) { this.globalCheckpointSupplier = globalCheckpointSupplier; this.retentionOperations = retentionOperations; this.minRetainedSeqNo = minRetainedSeqNo; @@ -110,12 +110,12 @@ synchronized long getMinRetainedSeqNo() { return getRetentionPolicy().v1(); } - public synchronized Tuple> getRetentionPolicy() { + public synchronized Tuple getRetentionPolicy() { /* * When an engine is flushed, we need to provide it the latest collection of retention leases even when the soft deletes policy is * locked for peer recovery. */ - final Collection retentionLeases = retentionLeasesSupplier.get(); + final RetentionLeases retentionLeases = retentionLeasesSupplier.get(); // do not advance if the retention lock is held if (retentionLockCount == 0) { /* @@ -130,6 +130,7 @@ public synchronized Tuple> getRetentionPolicy() // calculate the minimum sequence number to retain based on retention leases final long minimumRetainingSequenceNumber = retentionLeases + .leases() .stream() .mapToLong(RetentionLease::retainingSequenceNumber) .min() diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index bc51bc7b67164..34ec443a5404a 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -38,11 +38,11 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.OptionalLong; @@ -54,6 +54,7 @@ import java.util.function.ToLongFunction; import java.util.stream.Collectors; import java.util.stream.LongStream; +import java.util.stream.Stream; /** * This class is responsible for tracking the replication group with its progress and safety markers (local and global checkpoints). @@ -157,7 +158,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L * A callback when a new retention lease is created or an existing retention lease expires. In practice, this callback invokes the * retention lease sync action, to sync retention leases to replicas. */ - private final BiConsumer, ActionListener> onSyncRetentionLeases; + private final BiConsumer> onSyncRetentionLeases; /** * This set contains allocation IDs for which there is a thread actively waiting for the local checkpoint to advance to at least the @@ -170,12 +171,10 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L */ volatile ReplicationGroup replicationGroup; - private final Map retentionLeases = new HashMap<>(); - - private Collection copyRetentionLeases() { - assert Thread.holdsLock(this); - return Collections.unmodifiableCollection(new ArrayList<>(retentionLeases.values())); - } + /** + * The current retention leases. + */ + private RetentionLeases retentionLeases = RetentionLeases.EMPTY; /** * Get all non-expired retention leases tracked on this shard. An unmodifiable copy of the retention leases is returned. Note that only @@ -183,27 +182,25 @@ private Collection copyRetentionLeases() { * * @return the retention leases */ - public Collection getRetentionLeases() { + public RetentionLeases getRetentionLeases() { final boolean wasPrimaryMode; - final Collection nonExpiredRetentionLeases; + final RetentionLeases nonExpiredRetentionLeases; synchronized (this) { if (primaryMode) { // the primary calculates the non-expired retention leases and syncs them to replicas final long currentTimeMillis = currentTimeMillisSupplier.getAsLong(); final long retentionLeaseMillis = indexSettings.getRetentionLeaseMillis(); - final Collection expiredRetentionLeases = retentionLeases - .values() + final Map> partitionByExpiration = retentionLeases + .leases() .stream() - .filter(retentionLease -> currentTimeMillis - retentionLease.timestamp() > retentionLeaseMillis) - .collect(Collectors.toList()); - if (expiredRetentionLeases.isEmpty()) { + .collect(Collectors.groupingBy(lease -> currentTimeMillis - lease.timestamp() > retentionLeaseMillis)); + if (partitionByExpiration.get(true) == null) { // early out as no retention leases have expired - return copyRetentionLeases(); - } - // clean up the expired retention leases - for (final RetentionLease expiredRetentionLease : expiredRetentionLeases) { - retentionLeases.remove(expiredRetentionLease.id()); + return retentionLeases; } + final Collection nonExpiredLeases = + partitionByExpiration.get(false) != null ? partitionByExpiration.get(false) : Collections.emptyList(); + retentionLeases = new RetentionLeases(operationPrimaryTerm, retentionLeases.version() + 1, nonExpiredLeases); } /* * At this point, we were either in primary mode and have updated the non-expired retention leases into the tracking map, or @@ -211,7 +208,7 @@ public Collection getRetentionLeases() { * non-expired retention leases, instead receiving them on syncs from the primary. */ wasPrimaryMode = primaryMode; - nonExpiredRetentionLeases = copyRetentionLeases(); + nonExpiredRetentionLeases = retentionLeases; } if (wasPrimaryMode) { onSyncRetentionLeases.accept(nonExpiredRetentionLeases, ActionListener.wrap(() -> {})); @@ -236,15 +233,18 @@ public RetentionLease addRetentionLease( final ActionListener listener) { Objects.requireNonNull(listener); final RetentionLease retentionLease; - final Collection currentRetentionLeases; + final RetentionLeases currentRetentionLeases; synchronized (this) { assert primaryMode; - if (retentionLeases.containsKey(id)) { + if (retentionLeases.contains(id)) { throw new IllegalArgumentException("retention lease with ID [" + id + "] already exists"); } retentionLease = new RetentionLease(id, retainingSequenceNumber, currentTimeMillisSupplier.getAsLong(), source); - retentionLeases.put(id, retentionLease); - currentRetentionLeases = copyRetentionLeases(); + retentionLeases = new RetentionLeases( + operationPrimaryTerm, + retentionLeases.version() + 1, + Stream.concat(retentionLeases.leases().stream(), Stream.of(retentionLease)).collect(Collectors.toList())); + currentRetentionLeases = retentionLeases; } onSyncRetentionLeases.accept(currentRetentionLeases, listener); return retentionLease; @@ -261,18 +261,25 @@ public RetentionLease addRetentionLease( */ public synchronized RetentionLease renewRetentionLease(final String id, final long retainingSequenceNumber, final String source) { assert primaryMode; - if (retentionLeases.containsKey(id) == false) { + if (retentionLeases.contains(id) == false) { throw new IllegalArgumentException("retention lease with ID [" + id + "] does not exist"); } final RetentionLease retentionLease = new RetentionLease(id, retainingSequenceNumber, currentTimeMillisSupplier.getAsLong(), source); - final RetentionLease existingRetentionLease = retentionLeases.put(id, retentionLease); + final RetentionLease existingRetentionLease = retentionLeases.get(id); assert existingRetentionLease != null; assert existingRetentionLease.retainingSequenceNumber() <= retentionLease.retainingSequenceNumber() : "retention lease renewal for [" + id + "]" + " from [" + source + "]" + " renewed a lower retaining sequence number [" + retentionLease.retainingSequenceNumber() + "]" + " than the current lease retaining sequence number [" + existingRetentionLease.retainingSequenceNumber() + "]"; + retentionLeases = new RetentionLeases( + operationPrimaryTerm, + retentionLeases.version() + 1, + Stream.concat( + retentionLeases.leases().stream().filter(lease -> lease.id().equals(id) == false), + Stream.of(retentionLease)) + .collect(Collectors.toList())); return retentionLease; } @@ -281,10 +288,11 @@ public synchronized RetentionLease renewRetentionLease(final String id, final lo * * @param retentionLeases the retention leases */ - public synchronized void updateRetentionLeasesOnReplica(final Collection retentionLeases) { + public synchronized void updateRetentionLeasesOnReplica(final RetentionLeases retentionLeases) { assert primaryMode == false; - this.retentionLeases.clear(); - this.retentionLeases.putAll(retentionLeases.stream().collect(Collectors.toMap(RetentionLease::id, Function.identity()))); + if (retentionLeases.supersedes(this.retentionLeases)) { + this.retentionLeases = retentionLeases; + } } public static class CheckpointState implements Writeable { @@ -565,7 +573,7 @@ public ReplicationTracker( final long globalCheckpoint, final LongConsumer onGlobalCheckpointUpdated, final LongSupplier currentTimeMillisSupplier, - final BiConsumer, ActionListener> onSyncRetentionLeases) { + final BiConsumer> onSyncRetentionLeases) { super(shardId, indexSettings); assert globalCheckpoint >= SequenceNumbers.UNASSIGNED_SEQ_NO : "illegal initial global checkpoint: " + globalCheckpoint; this.shardAllocationId = allocationId; diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java index 362d068f45e2d..e1d362d98764a 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java @@ -25,13 +25,8 @@ import java.io.IOException; import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; import java.util.Locale; -import java.util.Map; import java.util.Objects; -import java.util.function.Function; -import java.util.stream.Collectors; /** * A "shard history retention lease" (or "retention lease" for short) is conceptually a marker containing a retaining sequence number such @@ -162,22 +157,10 @@ static String encodeRetentionLease(final RetentionLease retentionLease) { return String.format( Locale.ROOT, "id:%s;retaining_seq_no:%d;timestamp:%d;source:%s", - retentionLease.id(), - retentionLease.retainingSequenceNumber(), - retentionLease.timestamp(), - retentionLease.source()); - } - - /** - * Encodes a collection of retention leases as a string. This encoding can be decoed by {@link #decodeRetentionLeases(String)}. The - * encoding is a comma-separated encoding of each retention lease as encoded by {@link #encodeRetentionLease(RetentionLease)}. - * - * @param retentionLeases the retention leases - * @return the encoding of the retention leases - */ - public static String encodeRetentionLeases(final Collection retentionLeases) { - Objects.requireNonNull(retentionLeases); - return retentionLeases.stream().map(RetentionLease::encodeRetentionLease).collect(Collectors.joining(",")); + retentionLease.id, + retentionLease.retainingSequenceNumber, + retentionLease.timestamp, + retentionLease.source); } /** @@ -201,23 +184,6 @@ static RetentionLease decodeRetentionLease(final String encodedRetentionLease) { return new RetentionLease(id, retainingSequenceNumber, timestamp, source); } - /** - * Decodes retention leases encoded by {@link #encodeRetentionLeases(Collection)}. - * - * @param encodedRetentionLeases an encoded collection of retention leases - * @return the decoded retention leases - */ - public static Collection decodeRetentionLeases(final String encodedRetentionLeases) { - Objects.requireNonNull(encodedRetentionLeases); - if (encodedRetentionLeases.isEmpty()) { - return Collections.emptyList(); - } - assert Arrays.stream(encodedRetentionLeases.split(",")) - .allMatch(s -> s.matches("id:[^:;,]+;retaining_seq_no:\\d+;timestamp:\\d+;source:[^:;,]+")) - : encodedRetentionLeases; - return Arrays.stream(encodedRetentionLeases.split(",")).map(RetentionLease::decodeRetentionLease).collect(Collectors.toList()); - } - @Override public boolean equals(final Object o) { if (this == o) return true; @@ -244,14 +210,4 @@ public String toString() { '}'; } - /** - * A utility method to convert a collection of retention leases to a map from retention lease ID to retention lease. - * - * @param leases the leases - * @return the map from retention lease ID to retention lease - */ - static Map toMap(final Collection leases) { - return leases.stream().collect(Collectors.toMap(RetentionLease::id, Function.identity())); - } - } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseStats.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseStats.java index b8f1454a12c2b..14f485d314928 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseStats.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseStats.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Collection; import java.util.Objects; /** @@ -34,24 +33,24 @@ */ public final class RetentionLeaseStats implements ToXContentFragment, Writeable { - private final Collection leases; + private final RetentionLeases retentionLeases; /** - * The underlying retention leases backing this stats object. + * The underlying retention lease collection backing this stats object. * - * @return the leases + * @return the retention lease collection */ - public Collection leases() { - return leases; + public RetentionLeases retentionLeases() { + return retentionLeases; } /** - * Constructs a new retention lease stats object from the specified leases. + * Constructs a new retention lease stats object from the specified retention lease collection. * - * @param leases the leases + * @param retentionLeases the retention lease collection */ - public RetentionLeaseStats(final Collection leases) { - this.leases = Objects.requireNonNull(leases); + public RetentionLeaseStats(final RetentionLeases retentionLeases) { + this.retentionLeases = Objects.requireNonNull(retentionLeases); } /** @@ -62,7 +61,7 @@ public RetentionLeaseStats(final Collection leases) { * @throws IOException if an I/O exception occurs reading from the stream */ public RetentionLeaseStats(final StreamInput in) throws IOException { - leases = in.readList(RetentionLease::new); + retentionLeases = new RetentionLeases(in); } /** @@ -74,7 +73,7 @@ public RetentionLeaseStats(final StreamInput in) throws IOException { */ @Override public void writeTo(final StreamOutput out) throws IOException { - out.writeCollection(leases); + retentionLeases.writeTo(out); } /** @@ -82,16 +81,18 @@ public void writeTo(final StreamOutput out) throws IOException { * * @param builder the builder * @param params the params - * @return the builder that these retention leases were converted to {@link org.elasticsearch.common.xcontent.XContent} into + * @return the builder that this retention lease collection was converted to {@link org.elasticsearch.common.xcontent.XContent} into * @throws IOException if an I/O exception occurs writing to the builder */ @Override public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { builder.startObject("retention_leases"); { + builder.field("primary_term", retentionLeases.primaryTerm()); + builder.field("version", retentionLeases.version()); builder.startArray("leases"); { - for (final RetentionLease retentionLease : leases) { + for (final RetentionLease retentionLease : retentionLeases.leases()) { builder.startObject(); { builder.field("id", retentionLease.id()); @@ -113,12 +114,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; final RetentionLeaseStats that = (RetentionLeaseStats) o; - return Objects.equals(leases, that.leases); + return Objects.equals(retentionLeases, that.retentionLeases); } @Override public int hashCode() { - return Objects.hash(leases); + return Objects.hash(retentionLeases); } } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java index 3b7df41f72d05..89a679abea591 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java @@ -47,7 +47,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.Collection; import java.util.Objects; /** @@ -99,7 +98,7 @@ public RetentionLeaseSyncAction( */ public void syncRetentionLeasesForShard( final ShardId shardId, - final Collection retentionLeases, + final RetentionLeases retentionLeases, final ActionListener listener) { Objects.requireNonNull(shardId); Objects.requireNonNull(retentionLeases); @@ -149,9 +148,9 @@ private void flush(final IndexShard indexShard) { public static final class Request extends ReplicatedWriteRequest { - private Collection retentionLeases; + private RetentionLeases retentionLeases; - public Collection getRetentionLeases() { + public RetentionLeases getRetentionLeases() { return retentionLeases; } @@ -159,7 +158,7 @@ public Request() { } - public Request(final ShardId shardId, final Collection retentionLeases) { + public Request(final ShardId shardId, final RetentionLeases retentionLeases) { super(Objects.requireNonNull(shardId)); this.retentionLeases = Objects.requireNonNull(retentionLeases); } @@ -167,13 +166,13 @@ public Request(final ShardId shardId, final Collection retention @Override public void readFrom(final StreamInput in) throws IOException { super.readFrom(in); - retentionLeases = in.readList(RetentionLease::new); + retentionLeases = new RetentionLeases(in); } @Override public void writeTo(final StreamOutput out) throws IOException { super.writeTo(Objects.requireNonNull(out)); - out.writeCollection(retentionLeases); + retentionLeases.writeTo(out); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncer.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncer.java index 1e276eb98adaf..a19700a94da4b 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncer.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncer.java @@ -23,8 +23,6 @@ import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.index.shard.ShardId; -import java.util.Collection; - /** * A functional interface that represents a method for syncing retention leases to replica shards after a new retention lease is added on * the primary. @@ -42,7 +40,7 @@ public interface RetentionLeaseSyncer { */ void syncRetentionLeasesForShard( ShardId shardId, - Collection retentionLeases, + RetentionLeases retentionLeases, ActionListener listener); } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java new file mode 100644 index 0000000000000..5a9d9e333b27b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java @@ -0,0 +1,253 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Represents a versioned collection of retention leases. We version the collection of retention leases to ensure that sync requests that + * arrive out of order on the replica, using the version to ensure that older sync requests are rejected. + */ +public class RetentionLeases implements Writeable { + + private final long primaryTerm; + + /** + * The primary term of this retention lease collection. + * + * @return the primary term + */ + public long primaryTerm() { + return primaryTerm; + } + + private final long version; + + /** + * The version of this retention lease collection. The version is managed on the primary and incremented any time that a retention lease + * is added, renewed, or when retention leases expire. + * + * @return the version of this retention lease collection + */ + public long version() { + return version; + } + + /** + * Checks if this retention leases collection supersedes the specified retention leases collection. A retention leases collection + * supersedes another retention leases collection if its primary term is higher, or if for equal primary terms its version is higher + * + * @param that the retention leases collection to test against + * @return true if this retention leases collection supercedes the specified retention lease collection, otherwise false + */ + public boolean supersedes(final RetentionLeases that) { + return primaryTerm > that.primaryTerm || primaryTerm == that.primaryTerm && version > that.version; + } + + private final Map leases; + + /** + * The underlying collection of retention leases + * + * @return the retention leases + */ + public Collection leases() { + return Collections.unmodifiableCollection(leases.values()); + } + + /** + * Checks if this retention lease collection contains a retention lease with the specified {@link RetentionLease#id()}. + * + * @param id the retention lease ID + * @return true if this retention lease collection contains a retention lease with the specified ID, otherwise false + */ + public boolean contains(final String id) { + return leases.containsKey(id); + } + + /** + * Returns the retention lease with the specified ID, or null if no such retention lease exists. + * + * @param id the retention lease ID + * @return the retention lease, or null if no retention lease with the specified ID exists + */ + public RetentionLease get(final String id) { + return leases.get(id); + } + + /** + * Represents an empty an un-versioned retention lease collection. This is used when no retention lease collection is found in the + * commit point + */ + public static RetentionLeases EMPTY = new RetentionLeases(1, 0, Collections.emptyList()); + + /** + * Constructs a new retention lease collection with the specified version and underlying collection of retention leases. + * + * @param primaryTerm the primary term under which this retention lease collection was created + * @param version the version of this retention lease collection + * @param leases the retention leases + */ + public RetentionLeases(final long primaryTerm, final long version, final Collection leases) { + if (primaryTerm <= 0) { + throw new IllegalArgumentException("primary term must be positive but was [" + primaryTerm + "]"); + } + if (version < 0) { + throw new IllegalArgumentException("version must be non-negative but was [" + version + "]"); + } + Objects.requireNonNull(leases); + this.primaryTerm = primaryTerm; + this.version = version; + this.leases = Collections.unmodifiableMap(toMap(leases)); + } + + /** + * Constructs a new retention lease collection from a stream. The retention lease collection should have been written via + * {@link #writeTo(StreamOutput)}. + * + * @param in the stream to construct the retention lease collection from + * @throws IOException if an I/O exception occurs reading from the stream + */ + public RetentionLeases(final StreamInput in) throws IOException { + primaryTerm = in.readVLong(); + version = in.readVLong(); + leases = Collections.unmodifiableMap(toMap(in.readList(RetentionLease::new))); + } + + /** + * Writes a retention lease collection to a stream in a manner suitable for later reconstruction via + * {@link #RetentionLeases(StreamInput)} (StreamInput)}. + * + * @param out the stream to write the retention lease collection to + * @throws IOException if an I/O exception occurs writing to the stream + */ + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeVLong(primaryTerm); + out.writeVLong(version); + out.writeCollection(leases.values()); + } + + /** + * Encodes a retention lease collection as a string. This encoding can be decoded by + * {@link RetentionLeases#decodeRetentionLeases(String)}. The encoding is a comma-separated encoding of each retention lease as encoded + * by {@link RetentionLease#encodeRetentionLease(RetentionLease)}, prefixed by the version of the retention lease collection. + * + * @param retentionLeases the retention lease collection + * @return the encoding of the retention lease collection + */ + public static String encodeRetentionLeases(final RetentionLeases retentionLeases) { + Objects.requireNonNull(retentionLeases); + return String.format( + Locale.ROOT, + "primary_term:%d;version:%d;%s", + retentionLeases.primaryTerm, + retentionLeases.version, + retentionLeases.leases.values().stream().map(RetentionLease::encodeRetentionLease).collect(Collectors.joining(","))); + } + + /** + * Decodes retention leases encoded by {@link #encodeRetentionLeases(RetentionLeases)}. + * + * @param encodedRetentionLeases an encoded retention lease collection + * @return the decoded retention lease collection + */ + public static RetentionLeases decodeRetentionLeases(final String encodedRetentionLeases) { + Objects.requireNonNull(encodedRetentionLeases); + if (encodedRetentionLeases.isEmpty()) { + return EMPTY; + } + assert encodedRetentionLeases.matches("primary_term:\\d+;version:\\d+;.*") : encodedRetentionLeases; + final int firstSemicolon = encodedRetentionLeases.indexOf(";"); + final long primaryTerm = Long.parseLong(encodedRetentionLeases.substring("primary_term:".length(), firstSemicolon)); + final int secondSemicolon = encodedRetentionLeases.indexOf(";", firstSemicolon + 1); + final long version = Long.parseLong(encodedRetentionLeases.substring(firstSemicolon + 1 + "version:".length(), secondSemicolon)); + final Collection leases; + if (secondSemicolon + 1 == encodedRetentionLeases.length()) { + leases = Collections.emptyList(); + } else { + assert Arrays.stream(encodedRetentionLeases.substring(secondSemicolon + 1).split(",")) + .allMatch(s -> s.matches("id:[^:;,]+;retaining_seq_no:\\d+;timestamp:\\d+;source:[^:;,]+")) + : encodedRetentionLeases; + leases = Arrays.stream(encodedRetentionLeases.substring(secondSemicolon + 1).split(",")) + .map(RetentionLease::decodeRetentionLease) + .collect(Collectors.toList()); + } + + return new RetentionLeases(primaryTerm, version, leases); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final RetentionLeases that = (RetentionLeases) o; + return primaryTerm == that.primaryTerm && + version == that.version && + Objects.equals(leases, that.leases); + } + + @Override + public int hashCode() { + return Objects.hash(primaryTerm, version, leases); + } + + @Override + public String toString() { + return "RetentionLeases{" + + "primaryTerm=" + primaryTerm + + ", version=" + version + + ", leases=" + leases + + '}'; + } + + /** + * A utility method to convert retention leases to a map from retention lease ID to retention lease. + * + * @param leases the retention leases + * @return the map from retention lease ID to retention lease + */ + private static Map toMap(final Collection leases) { + return leases.stream().collect(Collectors.toMap(RetentionLease::id, Function.identity())); + } + + /** + * A utility method to convert a retention lease collection to a map from retention lease ID to retention lease. + * + * @param retentionLeases the retention lease collection + * @return the map from retention lease ID to retention lease + */ + static Map toMap(final RetentionLeases retentionLeases) { + return retentionLeases.leases; + } + +} diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 42822942e3adf..261ee919f2a64 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -109,6 +109,7 @@ import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.RetentionLease; import org.elasticsearch.index.seqno.RetentionLeaseStats; +import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.PrimaryReplicaSyncer.ResyncTask; @@ -143,7 +144,6 @@ import java.nio.channels.ClosedByInterruptException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.EnumSet; import java.util.List; @@ -267,7 +267,7 @@ public IndexShard( final List searchOperationListener, final List listeners, final Runnable globalCheckpointSyncer, - final BiConsumer, ActionListener> retentionLeaseSyncer, + final BiConsumer> retentionLeaseSyncer, final CircuitBreakerService circuitBreakerService) throws IOException { super(shardRouting.shardId(), indexSettings); assert shardRouting.initializing(); @@ -1444,12 +1444,12 @@ private void innerOpenEngineAndTranslog() throws IOException { assert recoveryState.getStage() == RecoveryState.Stage.TRANSLOG : "TRANSLOG stage expected but was: " + recoveryState.getStage(); } - static Collection getRetentionLeases(final SegmentInfos segmentInfos) { + static RetentionLeases getRetentionLeases(final SegmentInfos segmentInfos) { final String committedRetentionLeases = segmentInfos.getUserData().get(Engine.RETENTION_LEASES); if (committedRetentionLeases == null) { - return Collections.emptyList(); + return RetentionLeases.EMPTY; } - return RetentionLease.decodeRetentionLeases(committedRetentionLeases); + return RetentionLeases.decodeRetentionLeases(committedRetentionLeases); } private void trimUnsafeCommits() throws IOException { @@ -1892,7 +1892,7 @@ public void addGlobalCheckpointListener( * * @return the retention leases */ - public Collection getRetentionLeases() { + public RetentionLeases getRetentionLeases() { verifyNotClosed(); return replicationTracker.getRetentionLeases(); } @@ -1943,7 +1943,7 @@ public RetentionLease renewRetentionLease(final String id, final long retainingS * * @param retentionLeases the retention leases */ - public void updateRetentionLeasesOnReplica(final Collection retentionLeases) { + public void updateRetentionLeasesOnReplica(final RetentionLeases retentionLeases) { assert assertReplicationTarget(); verifyNotClosed(); replicationTracker.updateRetentionLeasesOnReplica(retentionLeases); diff --git a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java index 054bfb8bad695..617d23c44e1c6 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java @@ -22,6 +22,7 @@ import com.carrotsearch.hppc.LongArrayList; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.store.Directory; +import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogDeletionPolicy; @@ -30,7 +31,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -55,7 +55,7 @@ public void testKeepCommitsAfterGlobalCheckpoint() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); final int extraRetainedOps = between(0, 100); final SoftDeletesPolicy softDeletesPolicy = - new SoftDeletesPolicy(globalCheckpoint::get, NO_OPS_PERFORMED, extraRetainedOps, Collections::emptyList); + new SoftDeletesPolicy(globalCheckpoint::get, NO_OPS_PERFORMED, extraRetainedOps, () -> RetentionLeases.EMPTY); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, softDeletesPolicy, globalCheckpoint::get); @@ -101,7 +101,7 @@ public void testAcquireIndexCommit() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); final int extraRetainedOps = between(0, 100); final SoftDeletesPolicy softDeletesPolicy = - new SoftDeletesPolicy(globalCheckpoint::get, -1, extraRetainedOps, Collections::emptyList); + new SoftDeletesPolicy(globalCheckpoint::get, -1, extraRetainedOps, () -> RetentionLeases.EMPTY); final UUID translogUUID = UUID.randomUUID(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, softDeletesPolicy, globalCheckpoint::get); @@ -182,7 +182,7 @@ public void testAcquireIndexCommit() throws Exception { public void testLegacyIndex() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); - final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, -1, 0, Collections::emptyList); + final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, -1, 0, () -> RetentionLeases.EMPTY); final UUID translogUUID = UUID.randomUUID(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); @@ -217,7 +217,7 @@ public void testLegacyIndex() throws Exception { public void testDeleteInvalidCommits() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(randomNonNegativeLong()); - final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, -1, 0, Collections::emptyList); + final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, -1, 0, () -> RetentionLeases.EMPTY); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, softDeletesPolicy, globalCheckpoint::get); @@ -251,7 +251,7 @@ public void testDeleteInvalidCommits() throws Exception { public void testCheckUnreferencedCommits() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); - final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, -1, 0, Collections::emptyList); + final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, -1, 0, () -> RetentionLeases.EMPTY); final UUID translogUUID = UUID.randomUUID(); final TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, softDeletesPolicy, globalCheckpoint::get); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index d984d1702f257..25b0c9e00cb7d 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -117,6 +117,7 @@ import org.elasticsearch.index.seqno.LocalCheckpointTracker; import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.RetentionLease; +import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexSearcherWrapper; @@ -141,7 +142,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; -import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; @@ -3052,12 +3052,29 @@ public void testRecoverFromForeignTranslog() throws IOException { TranslogConfig translogConfig = new TranslogConfig(shardId, translog.location(), config.getIndexSettings(), BigArrays.NON_RECYCLING_INSTANCE); - EngineConfig brokenConfig = new EngineConfig(shardId, allocationId.getId(), - threadPool, config.getIndexSettings(), null, store, newMergePolicy(), config.getAnalyzer(), config.getSimilarity(), - new CodecService(null, logger), config.getEventListener(), IndexSearcher.getDefaultQueryCache(), - IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), - config.getExternalRefreshListener(), config.getInternalRefreshListener(), null, - new NoneCircuitBreakerService(), () -> UNASSIGNED_SEQ_NO, Collections::emptyList, primaryTerm::get, + EngineConfig brokenConfig = new EngineConfig( + shardId, + allocationId.getId(), + threadPool, + config.getIndexSettings(), + null, + store, + newMergePolicy(), + config.getAnalyzer(), + config.getSimilarity(), + new CodecService(null, logger), + config.getEventListener(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + translogConfig, + TimeValue.timeValueMinutes(5), + config.getExternalRefreshListener(), + config.getInternalRefreshListener(), + null, + new NoneCircuitBreakerService(), + () -> UNASSIGNED_SEQ_NO, + () -> RetentionLeases.EMPTY, + primaryTerm::get, tombstoneDocSupplier()); expectThrows(EngineCreationFailureException.class, () -> new InternalEngine(brokenConfig)); @@ -5287,14 +5304,23 @@ public void testKeepMinRetainedSeqNoByMergePolicy() throws IOException { final IndexMetaData indexMetaData = IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build(); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetaData); final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - final AtomicReference> leasesHolder = new AtomicReference<>(Collections.emptyList()); + final long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final AtomicLong retentionLeasesVersion = new AtomicLong(); + final AtomicReference retentionLeasesHolder = new AtomicReference<>(RetentionLeases.EMPTY); final List operations = generateSingleDocHistory(true, randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), 2, 10, 300, "2"); Randomness.shuffle(operations); Set existingSeqNos = new HashSet<>(); store = createStore(); - engine = createEngine( - config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get, leasesHolder::get)); + engine = createEngine(config( + indexSettings, + store, + createTempDir(), + newMergePolicy(), + null, + null, + globalCheckpoint::get, + retentionLeasesHolder::get)); assertThat(engine.getMinRetainedSeqNo(), equalTo(0L)); long lastMinRetainedSeqNo = engine.getMinRetainedSeqNo(); for (Engine.Operation op : operations) { @@ -5309,6 +5335,7 @@ public void testKeepMinRetainedSeqNoByMergePolicy() throws IOException { globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpointTracker().getCheckpoint())); } if (randomBoolean()) { + retentionLeasesVersion.incrementAndGet(); final int length = randomIntBetween(0, 8); final List leases = new ArrayList<>(length); for (int i = 0; i < length; i++) { @@ -5318,7 +5345,7 @@ public void testKeepMinRetainedSeqNoByMergePolicy() throws IOException { final String source = randomAlphaOfLength(8); leases.add(new RetentionLease(id, retainingSequenceNumber, timestamp, source)); } - leasesHolder.set(leases); + retentionLeasesHolder.set(new RetentionLeases(primaryTerm, retentionLeasesVersion.get(), leases)); } if (rarely()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), randomLongBetween(0, 10)); @@ -5332,13 +5359,15 @@ public void testKeepMinRetainedSeqNoByMergePolicy() throws IOException { engine.flush(true, true); assertThat(Long.parseLong(engine.getLastCommittedSegmentInfos().userData.get(Engine.MIN_RETAINED_SEQNO)), equalTo(engine.getMinRetainedSeqNo())); - final Collection leases = leasesHolder.get(); - if (leases.isEmpty()) { - assertThat(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.RETENTION_LEASES), equalTo("")); + final RetentionLeases leases = retentionLeasesHolder.get(); + if (leases.leases().isEmpty()) { + assertThat( + engine.getLastCommittedSegmentInfos().getUserData().get(Engine.RETENTION_LEASES), + equalTo("primary_term:" + primaryTerm + ";version:" + retentionLeasesVersion.get() + ";")); } else { assertThat( engine.getLastCommittedSegmentInfos().getUserData().get(Engine.RETENTION_LEASES), - equalTo(RetentionLease.encodeRetentionLeases(leases))); + equalTo(RetentionLeases.encodeRetentionLeases(leases))); } } if (rarely()) { diff --git a/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java index 8a34b0d1b5207..8257aa99d0486 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java @@ -24,15 +24,14 @@ import org.apache.lucene.search.Query; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.index.seqno.RetentionLease; +import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.List; -import java.util.Set; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; @@ -54,13 +53,13 @@ public void testSoftDeletesRetentionLock() { for (int i = 0; i < retainingSequenceNumbers.length; i++) { retainingSequenceNumbers[i] = new AtomicLong(); } - final Supplier> retentionLeasesSupplier = + final Supplier retentionLeasesSupplier = () -> { - final Set leases = new HashSet<>(retainingSequenceNumbers.length); + final List leases = new ArrayList<>(retainingSequenceNumbers.length); for (int i = 0; i < retainingSequenceNumbers.length; i++) { leases.add(new RetentionLease(Integer.toString(i), retainingSequenceNumbers[i].get(), 0L, "test")); } - return leases; + return new RetentionLeases(1, 1, leases); }; long safeCommitCheckpoint = globalCheckpoint.get(); SoftDeletesPolicy policy = new SoftDeletesPolicy(globalCheckpoint::get, between(1, 10000), retainedOps, retentionLeasesSupplier); @@ -126,16 +125,20 @@ public void testAlwaysFetchLatestRetentionLeases() { for (int i = 0; i < numLeases; i++) { leases.add(new RetentionLease(Integer.toString(i), randomLongBetween(0, 1000), randomNonNegativeLong(), "test")); } - final Supplier> leasesSupplier = () -> Collections.unmodifiableCollection(new ArrayList<>(leases)); + final Supplier leasesSupplier = + () -> new RetentionLeases( + randomNonNegativeLong(), + randomNonNegativeLong(), + Collections.unmodifiableCollection(new ArrayList<>(leases))); final SoftDeletesPolicy policy = new SoftDeletesPolicy(globalCheckpoint::get, randomIntBetween(1, 1000), randomIntBetween(0, 1000), leasesSupplier); if (randomBoolean()) { policy.acquireRetentionLock(); } if (numLeases == 0) { - assertThat(policy.getRetentionPolicy().v2(), empty()); + assertThat(policy.getRetentionPolicy().v2().leases(), empty()); } else { - assertThat(policy.getRetentionPolicy().v2(), contains(leases.toArray(new RetentionLease[0]))); + assertThat(policy.getRetentionPolicy().v2().leases(), contains(leases.toArray(new RetentionLease[0]))); } } } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java index d7f135ffe4816..9781d893a1d53 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.IndexSettingsModule; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -40,6 +41,8 @@ import java.util.stream.Collectors; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; @@ -49,11 +52,12 @@ public class ReplicationTrackerRetentionLeaseTests extends ReplicationTrackerTes public void testAddOrRenewRetentionLease() { final AllocationId allocationId = AllocationId.newInitializing(); + long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); final ReplicationTracker replicationTracker = new ReplicationTracker( new ShardId("test", "_na", 0), allocationId.getId(), IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), - randomNonNegativeLong(), + primaryTerm, UNASSIGNED_SEQ_NO, value -> {}, () -> 0L, @@ -70,19 +74,27 @@ public void testAddOrRenewRetentionLease() { minimumRetainingSequenceNumbers[i] = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); replicationTracker.addRetentionLease( Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i, ActionListener.wrap(() -> {})); - assertRetentionLeases(replicationTracker, i + 1, minimumRetainingSequenceNumbers, () -> 0L, true); + if (rarely() && primaryTerm < Long.MAX_VALUE) { + primaryTerm = randomLongBetween(primaryTerm + 1, Long.MAX_VALUE); + replicationTracker.setOperationPrimaryTerm(primaryTerm); + } + assertRetentionLeases(replicationTracker, i + 1, minimumRetainingSequenceNumbers, () -> 0L, primaryTerm, 1 + i, true); } for (int i = 0; i < length; i++) { minimumRetainingSequenceNumbers[i] = randomLongBetween(minimumRetainingSequenceNumbers[i], Long.MAX_VALUE); replicationTracker.renewRetentionLease(Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i); - assertRetentionLeases(replicationTracker, length, minimumRetainingSequenceNumbers, () -> 0L, true); + if (rarely() && primaryTerm < Long.MAX_VALUE) { + primaryTerm = randomLongBetween(primaryTerm + 1, Long.MAX_VALUE); + replicationTracker.setOperationPrimaryTerm(primaryTerm); + } + assertRetentionLeases(replicationTracker, length, minimumRetainingSequenceNumbers, () -> 0L, primaryTerm, 1 + length + i, true); } } public void testAddRetentionLeaseCausesRetentionLeaseSync() { final AllocationId allocationId = AllocationId.newInitializing(); - final Map retentionLeases = new HashMap<>(); + final Map retainingSequenceNumbers = new HashMap<>(); final AtomicBoolean invoked = new AtomicBoolean(); final AtomicReference reference = new AtomicReference<>(); final ReplicationTracker replicationTracker = new ReplicationTracker( @@ -98,8 +110,10 @@ public void testAddRetentionLeaseCausesRetentionLeaseSync() { assertFalse(Thread.holdsLock(reference.get())); invoked.set(true); assertThat( - leases.stream().collect(Collectors.toMap(RetentionLease::id, RetentionLease::retainingSequenceNumber)), - equalTo(retentionLeases)); + leases.leases() + .stream() + .collect(Collectors.toMap(RetentionLease::id, RetentionLease::retainingSequenceNumber)), + equalTo(retainingSequenceNumbers)); }); reference.set(replicationTracker); replicationTracker.updateFromMaster( @@ -113,7 +127,7 @@ public void testAddRetentionLeaseCausesRetentionLeaseSync() { for (int i = 0; i < length; i++) { final String id = randomAlphaOfLength(8); final long retainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); - retentionLeases.put(id, retainingSequenceNumber); + retainingSequenceNumbers.put(id, retainingSequenceNumber); replicationTracker.addRetentionLease(id, retainingSequenceNumber, "test", ActionListener.wrap(() -> {})); // assert that the new retention lease callback was invoked assertTrue(invoked.get()); @@ -141,11 +155,12 @@ private void runExpirationTest(final boolean primaryMode) { .builder() .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING.getKey(), TimeValue.timeValueMillis(retentionLeaseMillis)) .build(); + final long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); final ReplicationTracker replicationTracker = new ReplicationTracker( new ShardId("test", "_na", 0), allocationId.getId(), IndexSettingsModule.newIndexSettings("test", settings), - randomNonNegativeLong(), + primaryTerm, UNASSIGNED_SEQ_NO, value -> {}, currentTimeMillis::get, @@ -163,16 +178,20 @@ private void runExpirationTest(final boolean primaryMode) { if (primaryMode) { replicationTracker.addRetentionLease("0", retainingSequenceNumbers[0], "test-0", ActionListener.wrap(() -> {})); } else { - replicationTracker.updateRetentionLeasesOnReplica( + final RetentionLeases retentionLeases = new RetentionLeases( + primaryTerm, + 1, Collections.singleton(new RetentionLease("0", retainingSequenceNumbers[0], currentTimeMillis.get(), "test-0"))); + replicationTracker.updateRetentionLeasesOnReplica(retentionLeases); } { - final Collection retentionLeases = replicationTracker.getRetentionLeases(); - assertThat(retentionLeases, hasSize(1)); - final RetentionLease retentionLease = retentionLeases.iterator().next(); + final RetentionLeases retentionLeases = replicationTracker.getRetentionLeases(); + assertThat(retentionLeases.version(), equalTo(1L)); + assertThat(retentionLeases.leases(), hasSize(1)); + final RetentionLease retentionLease = retentionLeases.leases().iterator().next(); assertThat(retentionLease.timestamp(), equalTo(currentTimeMillis.get())); - assertRetentionLeases(replicationTracker, 1, retainingSequenceNumbers, currentTimeMillis::get, primaryMode); + assertRetentionLeases(replicationTracker, 1, retainingSequenceNumbers, currentTimeMillis::get, primaryTerm, 1, primaryMode); } // renew the lease @@ -181,25 +200,29 @@ private void runExpirationTest(final boolean primaryMode) { if (primaryMode) { replicationTracker.renewRetentionLease("0", retainingSequenceNumbers[0], "test-0"); } else { - replicationTracker.updateRetentionLeasesOnReplica( + final RetentionLeases retentionLeases = new RetentionLeases( + primaryTerm, + 2, Collections.singleton(new RetentionLease("0", retainingSequenceNumbers[0], currentTimeMillis.get(), "test-0"))); + replicationTracker.updateRetentionLeasesOnReplica(retentionLeases); } { - final Collection retentionLeases = replicationTracker.getRetentionLeases(); - assertThat(retentionLeases, hasSize(1)); - final RetentionLease retentionLease = retentionLeases.iterator().next(); + final RetentionLeases retentionLeases = replicationTracker.getRetentionLeases(); + assertThat(retentionLeases.version(), equalTo(2L)); + assertThat(retentionLeases.leases(), hasSize(1)); + final RetentionLease retentionLease = retentionLeases.leases().iterator().next(); assertThat(retentionLease.timestamp(), equalTo(currentTimeMillis.get())); - assertRetentionLeases(replicationTracker, 1, retainingSequenceNumbers, currentTimeMillis::get, primaryMode); + assertRetentionLeases(replicationTracker, 1, retainingSequenceNumbers, currentTimeMillis::get, primaryTerm, 2, primaryMode); } // now force the lease to expire currentTimeMillis.set(currentTimeMillis.get() + randomLongBetween(retentionLeaseMillis, Long.MAX_VALUE - currentTimeMillis.get())); if (primaryMode) { - assertRetentionLeases(replicationTracker, 0, retainingSequenceNumbers, currentTimeMillis::get, true); + assertRetentionLeases(replicationTracker, 0, retainingSequenceNumbers, currentTimeMillis::get, primaryTerm, 3, true); } else { // leases do not expire on replicas until synced from the primary - assertRetentionLeases(replicationTracker, 1, retainingSequenceNumbers, currentTimeMillis::get, false); + assertRetentionLeases(replicationTracker, 1, retainingSequenceNumbers, currentTimeMillis::get, primaryTerm, 2, false); } } @@ -227,7 +250,9 @@ public void testRetentionLeaseExpirationCausesRetentionLeaseSync() { assertFalse(Thread.holdsLock(reference.get())); invoked.set(true); assertThat( - leases.stream().collect(Collectors.toMap(RetentionLease::id, ReplicationTrackerRetentionLeaseTests::toTuple)), + leases.leases() + .stream() + .collect(Collectors.toMap(RetentionLease::id, ReplicationTrackerRetentionLeaseTests::toTuple)), equalTo(retentionLeases)); }); reference.set(replicationTracker); @@ -239,11 +264,14 @@ public void testRetentionLeaseExpirationCausesRetentionLeaseSync() { replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final int length = randomIntBetween(0, 8); + long version = 0; for (int i = 0; i < length; i++) { final String id = randomAlphaOfLength(8); final long retainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); retentionLeases.put(id, Tuple.tuple(retainingSequenceNumber, currentTimeMillis.get())); replicationTracker.addRetentionLease(id, retainingSequenceNumber, "test", ActionListener.wrap(() -> {})); + version++; + assertThat(replicationTracker.getRetentionLeases().version(), equalTo(version)); // assert that the new retention lease callback was invoked assertTrue(invoked.get()); @@ -252,6 +280,8 @@ public void testRetentionLeaseExpirationCausesRetentionLeaseSync() { currentTimeMillis.set(1 + currentTimeMillis.get()); retentionLeases.put(id, Tuple.tuple(retainingSequenceNumber, currentTimeMillis.get())); replicationTracker.renewRetentionLease(id, retainingSequenceNumber, "test"); + version++; + assertThat(replicationTracker.getRetentionLeases().version(), equalTo(version)); // reset the invocation marker so that we can assert the callback was invoked if any leases are expired assertFalse(invoked.get()); @@ -264,16 +294,76 @@ public void testRetentionLeaseExpirationCausesRetentionLeaseSync() { .map(Map.Entry::getKey) .collect(Collectors.toList()); expiredIds.forEach(retentionLeases::remove); + if (expiredIds.isEmpty() == false) { + version++; + } currentTimeMillis.set(currentTimeMillis.get() + currentTimeMillisIncrement); // getting the leases has the side effect of calculating which leases are expired and invoking the sync callback - final Collection current = replicationTracker.getRetentionLeases(); + final RetentionLeases current = replicationTracker.getRetentionLeases(); + assertThat(current.version(), equalTo(version)); // the current leases should equal our tracking map assertThat( - current.stream().collect(Collectors.toMap(RetentionLease::id, ReplicationTrackerRetentionLeaseTests::toTuple)), + current.leases() + .stream() + .collect(Collectors.toMap(RetentionLease::id, ReplicationTrackerRetentionLeaseTests::toTuple)), equalTo(retentionLeases)); // the callback should only be invoked if there were expired leases assertThat(invoked.get(), equalTo(expiredIds.isEmpty() == false)); } + assertThat(replicationTracker.getRetentionLeases().version(), equalTo(version)); + } + + public void testReplicaIgnoresOlderRetentionLeasesVersion() { + final AllocationId allocationId = AllocationId.newInitializing(); + final ReplicationTracker replicationTracker = new ReplicationTracker( + new ShardId("test", "_na", 0), + allocationId.getId(), + IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), + randomNonNegativeLong(), + UNASSIGNED_SEQ_NO, + value -> {}, + () -> 0L, + (leases, listener) -> {}); + replicationTracker.updateFromMaster( + randomNonNegativeLong(), + Collections.singleton(allocationId.getId()), + routingTable(Collections.emptySet(), allocationId), + Collections.emptySet()); + final int length = randomIntBetween(0, 8); + final List retentionLeasesCollection = new ArrayList<>(length); + long primaryTerm = 1; + long version = 0; + for (int i = 0; i < length; i++) { + final int innerLength = randomIntBetween(0, 8); + final Collection leases = new ArrayList<>(); + for (int j = 0; j < innerLength; j++) { + leases.add( + new RetentionLease(i + "-" + j, randomNonNegativeLong(), randomNonNegativeLong(), randomAlphaOfLength(8))); + version++; + } + if (rarely()) { + primaryTerm++; + } + retentionLeasesCollection.add(new RetentionLeases(primaryTerm, version, leases)); + } + final Collection expectedLeases; + if (length == 0 || retentionLeasesCollection.get(length - 1).leases().isEmpty()) { + expectedLeases = Collections.emptyList(); + } else { + expectedLeases = retentionLeasesCollection.get(length - 1).leases(); + } + Collections.shuffle(retentionLeasesCollection, random()); + for (final RetentionLeases retentionLeases : retentionLeasesCollection) { + replicationTracker.updateRetentionLeasesOnReplica(retentionLeases); + } + assertThat(replicationTracker.getRetentionLeases().version(), equalTo(version)); + if (expectedLeases.isEmpty()) { + assertThat(replicationTracker.getRetentionLeases().leases(), empty()); + } else { + assertThat( + replicationTracker.getRetentionLeases().leases(), + contains(expectedLeases.toArray(new RetentionLease[0]))); + } } private static Tuple toTuple(final RetentionLease retentionLease) { @@ -285,10 +375,14 @@ private void assertRetentionLeases( final int size, final long[] minimumRetainingSequenceNumbers, final LongSupplier currentTimeMillisSupplier, + final long primaryTerm, + final long version, final boolean primaryMode) { - final Collection retentionLeases = replicationTracker.getRetentionLeases(); + final RetentionLeases retentionLeases = replicationTracker.getRetentionLeases(); + assertThat(retentionLeases.primaryTerm(), equalTo(primaryTerm)); + assertThat(retentionLeases.version(), equalTo(version)); final Map idToRetentionLease = new HashMap<>(); - for (final RetentionLease retentionLease : retentionLeases) { + for (final RetentionLease retentionLease : retentionLeases.leases()) { idToRetentionLease.put(retentionLease.id(), retentionLease); } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java index 7731f3cbf1d5f..037d2130b5c7b 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java @@ -687,7 +687,7 @@ public void testPrimaryContextHandoff() throws IOException { final LongConsumer onUpdate = updatedGlobalCheckpoint -> {}; final long primaryTerm = randomNonNegativeLong(); final long globalCheckpoint = UNASSIGNED_SEQ_NO; - final BiConsumer, ActionListener> onNewRetentionLease = + final BiConsumer> onNewRetentionLease = (leases, listener) -> {}; ReplicationTracker oldPrimary = new ReplicationTracker( shardId, aId.getId(), indexSettings, primaryTerm, globalCheckpoint, onUpdate, () -> 0L, onNewRetentionLease); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseStatsTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseStatsTests.java index d77acc53b247e..8721450073531 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseStatsTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseStatsTests.java @@ -61,7 +61,7 @@ public void testRetentionLeaseStats() throws InterruptedException { final IndicesStatsResponse indicesStats = client().admin().indices().prepareStats("index").execute().actionGet(); assertThat(indicesStats.getShards(), arrayWithSize(1)); final RetentionLeaseStats retentionLeaseStats = indicesStats.getShards()[0].getRetentionLeaseStats(); - assertThat(RetentionLease.toMap(retentionLeaseStats.leases()), equalTo(currentRetentionLeases)); + assertThat(RetentionLeases.toMap(retentionLeaseStats.retentionLeases()), equalTo(currentRetentionLeases)); } } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseStatsWireSerializingTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseStatsWireSerializingTests.java index fe5dee782c4fe..9c7aee5191ac8 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseStatsWireSerializingTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseStatsWireSerializingTests.java @@ -30,6 +30,8 @@ public class RetentionLeaseStatsWireSerializingTests extends AbstractWireSeriali @Override protected RetentionLeaseStats createTestInstance() { + final long primaryTerm = randomNonNegativeLong(); + final long version = randomNonNegativeLong(); final int length = randomIntBetween(0, 8); final Collection leases; if (length == 0) { @@ -44,7 +46,7 @@ protected RetentionLeaseStats createTestInstance() { leases.add(new RetentionLease(id, retainingSequenceNumber, timestamp, source)); } } - return new RetentionLeaseStats(leases); + return new RetentionLeaseStats(new RetentionLeases(primaryTerm, version, leases)); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java index 0cd85ef60f21a..ab92d5ad2326d 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -43,7 +43,6 @@ import org.elasticsearch.transport.TransportService; import org.mockito.ArgumentCaptor; -import java.util.Collection; import java.util.Collections; import java.util.concurrent.atomic.AtomicBoolean; @@ -114,10 +113,8 @@ public void testRetentionLeaseSyncActionOnPrimary() { shardStateAction, new ActionFilters(Collections.emptySet()), new IndexNameExpressionResolver()); - @SuppressWarnings("unchecked") final Collection retentionLeases = - (Collection) mock(Collection.class); - final RetentionLeaseSyncAction.Request request = - new RetentionLeaseSyncAction.Request(indexShard.shardId(), retentionLeases); + final RetentionLeases retentionLeases = mock(RetentionLeases.class); + final RetentionLeaseSyncAction.Request request = new RetentionLeaseSyncAction.Request(indexShard.shardId(), retentionLeases); final TransportWriteAction.WritePrimaryResult result = action.shardOperationOnPrimary(request, indexShard); @@ -155,10 +152,8 @@ public void testRetentionLeaseSyncActionOnReplica() { shardStateAction, new ActionFilters(Collections.emptySet()), new IndexNameExpressionResolver()); - @SuppressWarnings("unchecked") final Collection retentionLeases = - (Collection) mock(Collection.class); - final RetentionLeaseSyncAction.Request request = - new RetentionLeaseSyncAction.Request(indexShard.shardId(), retentionLeases); + final RetentionLeases retentionLeases = mock(RetentionLeases.class); + final RetentionLeaseSyncAction.Request request = new RetentionLeaseSyncAction.Request(indexShard.shardId(), retentionLeases); final TransportWriteAction.WriteReplicaResult result = action.shardOperationOnReplica(request, indexShard); // the retention leases on the shard should be updated @@ -190,8 +185,7 @@ public void testRetentionLeaseSyncExecution() { final Logger retentionLeaseSyncActionLogger = mock(Logger.class); - @SuppressWarnings("unchecked") final Collection retentionLeases = - (Collection) mock(Collection.class); + final RetentionLeases retentionLeases = mock(RetentionLeases.class); final AtomicBoolean invoked = new AtomicBoolean(); final RetentionLeaseSyncAction action = new RetentionLeaseSyncAction( Settings.EMPTY, diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncIT.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncIT.java index d009486778d89..3e69c84e3cde3 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncIT.java @@ -33,7 +33,6 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; -import java.util.Collection; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -78,9 +77,9 @@ public void testRetentionLeasesSyncedOnAdd() throws Exception { retentionLock.close(); // check retention leases have been committed on the primary - final Collection primaryCommittedRetentionLeases = RetentionLease.decodeRetentionLeases( + final RetentionLeases primaryCommittedRetentionLeases = RetentionLeases.decodeRetentionLeases( primary.acquireLastIndexCommit(false).getIndexCommit().getUserData().get(Engine.RETENTION_LEASES)); - assertThat(currentRetentionLeases, equalTo(RetentionLease.toMap(primaryCommittedRetentionLeases))); + assertThat(currentRetentionLeases, equalTo(RetentionLeases.toMap(primaryCommittedRetentionLeases))); // check current retention leases have been synced to all replicas for (final ShardRouting replicaShard : clusterService().state().routingTable().index("index").shard(0).replicaShards()) { @@ -89,13 +88,13 @@ public void testRetentionLeasesSyncedOnAdd() throws Exception { final IndexShard replica = internalCluster() .getInstance(IndicesService.class, replicaShardNodeName) .getShardOrNull(new ShardId(resolveIndex("index"), 0)); - final Map retentionLeasesOnReplica = RetentionLease.toMap(replica.getRetentionLeases()); + final Map retentionLeasesOnReplica = RetentionLeases.toMap(replica.getRetentionLeases()); assertThat(retentionLeasesOnReplica, equalTo(currentRetentionLeases)); // check retention leases have been committed on the replica - final Collection replicaCommittedRetentionLeases = RetentionLease.decodeRetentionLeases( + final RetentionLeases replicaCommittedRetentionLeases = RetentionLeases.decodeRetentionLeases( replica.acquireLastIndexCommit(false).getIndexCommit().getUserData().get(Engine.RETENTION_LEASES)); - assertThat(currentRetentionLeases, equalTo(RetentionLease.toMap(replicaCommittedRetentionLeases))); + assertThat(currentRetentionLeases, equalTo(RetentionLeases.toMap(replicaCommittedRetentionLeases))); } } } @@ -138,14 +137,14 @@ public void testRetentionLeasesSyncOnExpiration() throws Exception { final IndexShard replica = internalCluster() .getInstance(IndicesService.class, replicaShardNodeName) .getShardOrNull(new ShardId(resolveIndex("index"), 0)); - assertThat(replica.getRetentionLeases(), hasItem(currentRetentionLease)); + assertThat(replica.getRetentionLeases().leases(), hasItem(currentRetentionLease)); } // sleep long enough that *possibly* the current retention lease has expired, and certainly that any previous have final long later = System.nanoTime(); Thread.sleep(Math.max(0, retentionLeaseTimeToLive.millis() - TimeUnit.NANOSECONDS.toMillis(later - now))); - final Collection currentRetentionLeases = primary.getRetentionLeases(); - assertThat(currentRetentionLeases, anyOf(empty(), contains(currentRetentionLease))); + final RetentionLeases currentRetentionLeases = primary.getRetentionLeases(); + assertThat(currentRetentionLeases.leases(), anyOf(empty(), contains(currentRetentionLease))); /* * Check that expiration of retention leases has been synced to all replicas. We have to assert busy since syncing happens in @@ -158,10 +157,12 @@ public void testRetentionLeasesSyncOnExpiration() throws Exception { final IndexShard replica = internalCluster() .getInstance(IndicesService.class, replicaShardNodeName) .getShardOrNull(new ShardId(resolveIndex("index"), 0)); - if (currentRetentionLeases.isEmpty()) { - assertThat(replica.getRetentionLeases(), empty()); + if (currentRetentionLeases.leases().isEmpty()) { + assertThat(replica.getRetentionLeases().leases(), empty()); } else { - assertThat(replica.getRetentionLeases(), contains(currentRetentionLeases.toArray(new RetentionLease[0]))); + assertThat( + replica.getRetentionLeases().leases(), + contains(currentRetentionLeases.leases().toArray(new RetentionLease[0]))); } } }); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseTests.java index 1a8d159c18757..bd2dee78b05ed 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseTests.java @@ -24,13 +24,8 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; @@ -107,24 +102,4 @@ public void testRetentionLeaseEncoding() { assertThat(RetentionLease.decodeRetentionLease(RetentionLease.encodeRetentionLease(retentionLease)), equalTo(retentionLease)); } - public void testRetentionLeasesEncoding() { - final int length = randomIntBetween(0, 8); - final List retentionLeases = new ArrayList<>(length); - for (int i = 0; i < length; i++) { - final String id = randomAlphaOfLength(8); - final long retainingSequenceNumber = randomNonNegativeLong(); - final long timestamp = randomNonNegativeLong(); - final String source = randomAlphaOfLength(8); - final RetentionLease retentionLease = new RetentionLease(id, retainingSequenceNumber, timestamp, source); - retentionLeases.add(retentionLease); - } - final Collection decodedRetentionLeases = - RetentionLease.decodeRetentionLeases(RetentionLease.encodeRetentionLeases(retentionLeases)); - if (length == 0) { - assertThat(decodedRetentionLeases, empty()); - } else { - assertThat(decodedRetentionLeases, contains(retentionLeases.toArray(new RetentionLease[0]))); - } - } - } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java new file mode 100644 index 0000000000000..33cc83f602860 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; + +public class RetentionLeasesTests extends ESTestCase { + + public void testPrimaryTermOutOfRange() { + final long primaryTerm = randomLongBetween(Long.MIN_VALUE, 0); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> new RetentionLeases(primaryTerm, randomNonNegativeLong(), Collections.emptyList())); + assertThat(e, hasToString(containsString("primary term must be positive but was [" + primaryTerm + "]"))); + } + + public void testVersionOutOfRange() { + final long version = randomLongBetween(Long.MIN_VALUE, -1); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> new RetentionLeases(randomLongBetween(1, Long.MAX_VALUE), version, Collections.emptyList())); + assertThat(e, hasToString(containsString("version must be non-negative but was [" + version + "]"))); + } + + public void testRetentionLeasesEncoding() { + final long primaryTerm = randomNonNegativeLong(); + final long version = randomNonNegativeLong(); + final int length = randomIntBetween(0, 8); + final List retentionLeases = new ArrayList<>(length); + for (int i = 0; i < length; i++) { + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomNonNegativeLong(); + final long timestamp = randomNonNegativeLong(); + final String source = randomAlphaOfLength(8); + final RetentionLease retentionLease = new RetentionLease(id, retainingSequenceNumber, timestamp, source); + retentionLeases.add(retentionLease); + } + final RetentionLeases decodedRetentionLeases = + RetentionLeases.decodeRetentionLeases( + RetentionLeases.encodeRetentionLeases(new RetentionLeases(primaryTerm, version, retentionLeases))); + assertThat(decodedRetentionLeases.version(), equalTo(version)); + if (length == 0) { + assertThat(decodedRetentionLeases.leases(), empty()); + } else { + assertThat(decodedRetentionLeases.leases(), containsInAnyOrder(retentionLeases.toArray(new RetentionLease[0]))); + } + } + + public void testSupersedesByPrimaryTerm() { + final long lowerPrimaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final RetentionLeases left = new RetentionLeases(lowerPrimaryTerm, randomLongBetween(1, Long.MAX_VALUE), Collections.emptyList()); + final long higherPrimaryTerm = randomLongBetween(lowerPrimaryTerm + 1, Long.MAX_VALUE); + final RetentionLeases right = new RetentionLeases(higherPrimaryTerm, randomLongBetween(1, Long.MAX_VALUE), Collections.emptyList()); + assertTrue(right.supersedes(left)); + assertFalse(left.supersedes(right)); + } + + public void testSupersedesByVersion() { + final long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final long lowerVersion = randomLongBetween(1, Long.MAX_VALUE); + final long higherVersion = randomLongBetween(lowerVersion + 1, Long.MAX_VALUE); + final RetentionLeases left = new RetentionLeases(primaryTerm, lowerVersion, Collections.emptyList()); + final RetentionLeases right = new RetentionLeases(primaryTerm, higherVersion, Collections.emptyList()); + assertTrue(right.supersedes(left)); + assertFalse(left.supersedes(right)); + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java index f66b383c2799c..76ca9f5b02458 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java @@ -31,11 +31,11 @@ import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.seqno.RetentionLease; import org.elasticsearch.index.seqno.RetentionLeaseStats; +import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -76,21 +76,22 @@ protected void tearDownThreadPool() { public void testAddOrRenewRetentionLease() throws IOException { final IndexShard indexShard = newStartedShard(true); + final long primaryTerm = indexShard.getOperationPrimaryTerm(); try { final int length = randomIntBetween(0, 8); final long[] minimumRetainingSequenceNumbers = new long[length]; for (int i = 0; i < length; i++) { minimumRetainingSequenceNumbers[i] = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); indexShard.addRetentionLease( - Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i, ActionListener.wrap(() -> { - })); - assertRetentionLeases(indexShard, i + 1, minimumRetainingSequenceNumbers, () -> 0L, true); + Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i, ActionListener.wrap(() -> {})); + assertRetentionLeases( + indexShard, i + 1, minimumRetainingSequenceNumbers, () -> 0L, primaryTerm, 1 + i, true); } for (int i = 0; i < length; i++) { minimumRetainingSequenceNumbers[i] = randomLongBetween(minimumRetainingSequenceNumbers[i], Long.MAX_VALUE); indexShard.renewRetentionLease(Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i); - assertRetentionLeases(indexShard, length, minimumRetainingSequenceNumbers, () -> 0L, true); + assertRetentionLeases(indexShard, length, minimumRetainingSequenceNumbers, () -> 0L, primaryTerm, 1 + length + i, true); } } finally { closeShards(indexShard); @@ -113,6 +114,7 @@ private void runExpirationTest(final boolean primary) throws IOException { .build(); // current time is mocked through the thread pool final IndexShard indexShard = newStartedShard(primary, settings, new InternalEngineFactory()); + final long primaryTerm = indexShard.getOperationPrimaryTerm(); try { final long[] retainingSequenceNumbers = new long[1]; retainingSequenceNumbers[0] = randomLongBetween(0, Long.MAX_VALUE); @@ -120,16 +122,20 @@ private void runExpirationTest(final boolean primary) throws IOException { indexShard.addRetentionLease("0", retainingSequenceNumbers[0], "test-0", ActionListener.wrap(() -> { })); } else { - indexShard.updateRetentionLeasesOnReplica( + final RetentionLeases retentionLeases = new RetentionLeases( + primaryTerm, + 1, Collections.singleton(new RetentionLease("0", retainingSequenceNumbers[0], currentTimeMillis.get(), "test-0"))); + indexShard.updateRetentionLeasesOnReplica(retentionLeases); } { - final Collection retentionLeases = indexShard.getEngine().config().retentionLeasesSupplier().get(); - assertThat(retentionLeases, hasSize(1)); - final RetentionLease retentionLease = retentionLeases.iterator().next(); + final RetentionLeases retentionLeases = indexShard.getEngine().config().retentionLeasesSupplier().get(); + assertThat(retentionLeases.version(), equalTo(1L)); + assertThat(retentionLeases.leases(), hasSize(1)); + final RetentionLease retentionLease = retentionLeases.leases().iterator().next(); assertThat(retentionLease.timestamp(), equalTo(currentTimeMillis.get())); - assertRetentionLeases(indexShard, 1, retainingSequenceNumbers, currentTimeMillis::get, primary); + assertRetentionLeases(indexShard, 1, retainingSequenceNumbers, currentTimeMillis::get, primaryTerm, 1, primary); } // renew the lease @@ -138,25 +144,29 @@ private void runExpirationTest(final boolean primary) throws IOException { if (primary) { indexShard.renewRetentionLease("0", retainingSequenceNumbers[0], "test-0"); } else { - indexShard.updateRetentionLeasesOnReplica( + final RetentionLeases retentionLeases = new RetentionLeases( + primaryTerm, + 2, Collections.singleton(new RetentionLease("0", retainingSequenceNumbers[0], currentTimeMillis.get(), "test-0"))); + indexShard.updateRetentionLeasesOnReplica(retentionLeases); } { - final Collection retentionLeases = indexShard.getEngine().config().retentionLeasesSupplier().get(); - assertThat(retentionLeases, hasSize(1)); - final RetentionLease retentionLease = retentionLeases.iterator().next(); + final RetentionLeases retentionLeases = indexShard.getEngine().config().retentionLeasesSupplier().get(); + assertThat(retentionLeases.version(), equalTo(2L)); + assertThat(retentionLeases.leases(), hasSize(1)); + final RetentionLease retentionLease = retentionLeases.leases().iterator().next(); assertThat(retentionLease.timestamp(), equalTo(currentTimeMillis.get())); - assertRetentionLeases(indexShard, 1, retainingSequenceNumbers, currentTimeMillis::get, primary); + assertRetentionLeases(indexShard, 1, retainingSequenceNumbers, currentTimeMillis::get, primaryTerm, 2, primary); } // now force the lease to expire currentTimeMillis.set( currentTimeMillis.get() + randomLongBetween(retentionLeaseMillis, Long.MAX_VALUE - currentTimeMillis.get())); if (primary) { - assertRetentionLeases(indexShard, 0, retainingSequenceNumbers, currentTimeMillis::get, true); + assertRetentionLeases(indexShard, 0, retainingSequenceNumbers, currentTimeMillis::get, primaryTerm, 3, true); } else { - assertRetentionLeases(indexShard, 1, retainingSequenceNumbers, currentTimeMillis::get, false); + assertRetentionLeases(indexShard, 1, retainingSequenceNumbers, currentTimeMillis::get, primaryTerm, 2, false); } } finally { closeShards(indexShard); @@ -191,11 +201,14 @@ public void testCommit() throws IOException { // the committed retention leases should equal our current retention leases final SegmentInfos segmentCommitInfos = indexShard.store().readLastCommittedSegmentsInfo(); assertTrue(segmentCommitInfos.getUserData().containsKey(Engine.RETENTION_LEASES)); - final Collection retentionLeases = indexShard.getEngine().config().retentionLeasesSupplier().get(); - if (retentionLeases.isEmpty()) { - assertThat(IndexShard.getRetentionLeases(segmentCommitInfos), empty()); + final RetentionLeases retentionLeases = indexShard.getEngine().config().retentionLeasesSupplier().get(); + final RetentionLeases committedRetentionLeases = IndexShard.getRetentionLeases(segmentCommitInfos); + if (retentionLeases.leases().isEmpty()) { + assertThat(committedRetentionLeases.version(), equalTo(0L)); + assertThat(committedRetentionLeases.leases(), empty()); } else { - assertThat(IndexShard.getRetentionLeases(segmentCommitInfos), contains(retentionLeases.toArray(new RetentionLease[0]))); + assertThat(committedRetentionLeases.version(), equalTo((long) length)); + assertThat(retentionLeases.leases(), contains(retentionLeases.leases().toArray(new RetentionLease[0]))); } // when we recover, we should recover the retention leases @@ -204,12 +217,15 @@ public void testCommit() throws IOException { ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), RecoverySource.ExistingStoreRecoverySource.INSTANCE)); try { recoverShardFromStore(recoveredShard); - if (retentionLeases.isEmpty()) { - assertThat(recoveredShard.getEngine().config().retentionLeasesSupplier().get(), empty()); + final RetentionLeases recoveredRetentionLeases = recoveredShard.getEngine().config().retentionLeasesSupplier().get(); + if (retentionLeases.leases().isEmpty()) { + assertThat(recoveredRetentionLeases.version(), equalTo(0L)); + assertThat(recoveredRetentionLeases.leases(), empty()); } else { + assertThat(recoveredRetentionLeases.version(), equalTo((long) length)); assertThat( - recoveredShard.getEngine().config().retentionLeasesSupplier().get(), - contains(retentionLeases.toArray(new RetentionLease[0]))); + recoveredRetentionLeases.leases(), + contains(retentionLeases.leases().toArray(new RetentionLease[0]))); } } finally { closeShards(recoveredShard); @@ -227,16 +243,17 @@ public void testRetentionLeaseStats() throws IOException { for (int i = 0; i < length; i++) { minimumRetainingSequenceNumbers[i] = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); indexShard.addRetentionLease( - Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i, ActionListener.wrap(() -> { - })); + Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i, ActionListener.wrap(() -> {})); } final RetentionLeaseStats stats = indexShard.getRetentionLeaseStats(); assertRetentionLeases( - stats.leases(), + stats.retentionLeases(), indexShard.indexSettings().getRetentionLeaseMillis(), length, minimumRetainingSequenceNumbers, () -> 0L, + length == 0 ? RetentionLeases.EMPTY.primaryTerm() : indexShard.getOperationPrimaryTerm(), + length, true); } finally { closeShards(indexShard); @@ -248,6 +265,8 @@ private void assertRetentionLeases( final int size, final long[] minimumRetainingSequenceNumbers, final LongSupplier currentTimeMillisSupplier, + final long primaryTerm, + final long version, final boolean primary) { assertRetentionLeases( indexShard.getEngine().config().retentionLeasesSupplier().get(), @@ -255,18 +274,24 @@ private void assertRetentionLeases( size, minimumRetainingSequenceNumbers, currentTimeMillisSupplier, + primaryTerm, + version, primary); } private void assertRetentionLeases( - final Collection retentionLeases, + final RetentionLeases retentionLeases, final long retentionLeaseMillis, final int size, final long[] minimumRetainingSequenceNumbers, final LongSupplier currentTimeMillisSupplier, + final long primaryTerm, + final long version, final boolean primary) { + assertThat(retentionLeases.primaryTerm(), equalTo(primaryTerm)); + assertThat(retentionLeases.version(), equalTo(version)); final Map idToRetentionLease = new HashMap<>(); - for (final RetentionLease retentionLease : retentionLeases) { + for (final RetentionLease retentionLease : retentionLeases.leases()) { idToRetentionLease.put(retentionLease.id(), retentionLease); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 53c3e86ee01fb..c80b3b5074921 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -50,6 +50,7 @@ import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; @@ -122,12 +123,30 @@ public void onFailedEngine(String reason, @Nullable Exception e) { final String translogUUID = Translog.createEmptyTranslog(translogConfig.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm); store.associateIndexWithNewTranslog(translogUUID); - EngineConfig config = new EngineConfig(shardId, allocationId, threadPool, - indexSettings, null, store, newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), - eventListener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, - TimeValue.timeValueMinutes(5), Collections.singletonList(listeners), Collections.emptyList(), null, - new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED, Collections::emptyList, - () -> primaryTerm, EngineTestCase.tombstoneDocSupplier()); + EngineConfig config = new EngineConfig( + shardId, + allocationId, + threadPool, + indexSettings, + null, + store, + newMergePolicy(), + iwc.getAnalyzer(), + iwc.getSimilarity(), + new CodecService(null, logger), + eventListener, + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + translogConfig, + TimeValue.timeValueMinutes(5), + Collections.singletonList(listeners), + Collections.emptyList(), + null, + new NoneCircuitBreakerService(), + () -> SequenceNumbers.NO_OPS_PERFORMED, + () -> RetentionLeases.EMPTY, + () -> primaryTerm, + EngineTestCase.tombstoneDocSupplier()); engine = new InternalEngine(config); engine.initializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog((e, s) -> 0, Long.MAX_VALUE); diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index d893168b08205..e09455b55bd52 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -84,7 +84,7 @@ import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.index.seqno.LocalCheckpointTracker; import org.elasticsearch.index.seqno.ReplicationTracker; -import org.elasticsearch.index.seqno.RetentionLease; +import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; @@ -105,7 +105,6 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; @@ -586,7 +585,7 @@ public EngineConfig config(IndexSettings indexSettings, Store store, Path transl refreshListener, indexSort, globalCheckpointSupplier, - globalCheckpointSupplier == null ? null : Collections::emptyList); + globalCheckpointSupplier == null ? null : () -> RetentionLeases.EMPTY); } public EngineConfig config( @@ -597,7 +596,7 @@ public EngineConfig config( final ReferenceManager.RefreshListener refreshListener, final Sort indexSort, final LongSupplier globalCheckpointSupplier, - final Supplier> retentionLeasesSupplier) { + final Supplier retentionLeasesSupplier) { return config( indexSettings, store, @@ -625,7 +624,7 @@ public EngineConfig config(IndexSettings indexSettings, Store store, Path transl internalRefreshListener, indexSort, maybeGlobalCheckpointSupplier, - maybeGlobalCheckpointSupplier == null ? null : Collections::emptyList, + maybeGlobalCheckpointSupplier == null ? null : () -> RetentionLeases.EMPTY, breakerService); } @@ -638,7 +637,7 @@ public EngineConfig config( final ReferenceManager.RefreshListener internalRefreshListener, final Sort indexSort, final @Nullable LongSupplier maybeGlobalCheckpointSupplier, - final @Nullable Supplier> maybeRetentionLeasesSupplier, + final @Nullable Supplier maybeRetentionLeasesSupplier, final CircuitBreakerService breakerService) { final IndexWriterConfig iwc = newIndexWriterConfig(); final TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); @@ -648,7 +647,7 @@ public EngineConfig config( final List intRefreshListenerList = internalRefreshListener == null ? emptyList() : Collections.singletonList(internalRefreshListener); final LongSupplier globalCheckpointSupplier; - final Supplier> retentionLeasesSupplier; + final Supplier retentionLeasesSupplier; if (maybeGlobalCheckpointSupplier == null) { assert maybeRetentionLeasesSupplier == null; final ReplicationTracker replicationTracker = new ReplicationTracker( diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java index bccc5fed8364e..df406a4c09a68 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.engine.TranslogHandler; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; @@ -270,7 +271,7 @@ public void onFailedEngine(String reason, Exception e) { null, new NoneCircuitBreakerService(), globalCheckpoint::longValue, - Collections::emptyList, + () -> RetentionLeases.EMPTY, () -> primaryTerm.get(), EngineTestCase.tombstoneDocSupplier()); }