Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into async_flush_api
Browse files Browse the repository at this point in the history
  • Loading branch information
Tim-Brooks committed Apr 30, 2023
2 parents c7d743f + 3af5379 commit 2cb9e09
Show file tree
Hide file tree
Showing 94 changed files with 659 additions and 962 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.MultiBucketConsumerService.MultiBucketConsumer;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder;
import org.elasticsearch.search.aggregations.support.AggregationContext;
Expand Down Expand Up @@ -152,7 +151,6 @@ private class DummyAggregationContext extends AggregationContext {

private final CircuitBreaker breaker;
private final PreallocatedCircuitBreakerService preallocated;
private final MultiBucketConsumer multiBucketConsumer;

DummyAggregationContext(long bytesToPreallocate) {
CircuitBreakerService breakerService;
Expand All @@ -168,7 +166,6 @@ private class DummyAggregationContext extends AggregationContext {
preallocated = null;
}
breaker = breakerService.getBreaker(CircuitBreaker.REQUEST);
multiBucketConsumer = new MultiBucketConsumer(Integer.MAX_VALUE, breaker);
}

@Override
Expand Down Expand Up @@ -298,8 +295,8 @@ public void addReleasable(Aggregator aggregator) {
}

@Override
public MultiBucketConsumer multiBucketConsumer() {
return multiBucketConsumer;
public int maxBuckets() {
return Integer.MAX_VALUE;
}

@Override
Expand Down
5 changes: 5 additions & 0 deletions docs/changelog/95107.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 95107
summary: Cache modification time of translog writer file
area: Engine
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/95665.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 95665
summary: "[DLM] Fix the new endpoint rest-api specification"
area: DLM
type: bug
issues: []
14 changes: 7 additions & 7 deletions docs/reference/ingest/processors/geoip.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,13 @@ IPv4 or IPv6 address.

[[geoip-automatic-updates]]
By default, the processor uses the GeoLite2 City, GeoLite2 Country, and GeoLite2
ASN GeoIP2 databases from
http://dev.maxmind.com/geoip/geoip2/geolite2/[MaxMind], shared under the
CC BY-SA 4.0 license. It automatically downloads these databases if either
`ingest.geoip.downloader.eager.download` is set to true, or your cluster
has at least one pipeline with a `geoip` processor. {es}
automatically downloads updates for
these databases from the Elastic GeoIP endpoint:
ASN GeoIP2 databases from http://dev.maxmind.com/geoip/geoip2/geolite2/[MaxMind], shared under the
CC BY-SA 4.0 license. It automatically downloads these databases if your nodes can connect to `storage.googleapis.com` domain and either:

* `ingest.geoip.downloader.eager.download` is set to true
* your cluster has at least one pipeline with a `geoip` processor
{es} automatically downloads updates for these databases from the Elastic GeoIP endpoint:
https://geoip.elastic.co/v1/database. To get download statistics for these
updates, use the <<geoip-stats-api,GeoIP stats API>>.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ public static ParsedMediaType parseMediaType(String headerValue) {
if (isMediaRange(headerValue) || "*/*".equals(headerValue)) {
return null;
}
final String[] elements = headerValue.toLowerCase(Locale.ROOT).split("[\\s\\t]*;");
final String[] elements = headerValue.toLowerCase(Locale.ROOT).split(";");

final String[] splitMediaType = elements[0].split("/");
if ((splitMediaType.length == 2
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
import org.elasticsearch.aggregations.AggregationsPlugin;
import org.elasticsearch.aggregations.bucket.timeseries.TimeSeriesAggregationBuilder;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexMode;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.plugins.Plugin;
Expand Down Expand Up @@ -62,14 +61,10 @@ public void testCancellationDuringTimeSeriesAggregation() throws Exception {
int numberOfDocsPerRefresh = numberOfShards * between(3000, 3500) / numberOfRefreshes;
assertAcked(
prepareCreate("test").setSettings(
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES.name())
indexSettings(numberOfShards, 0).put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES.name())
.put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "dim")
.put(TIME_SERIES_START_TIME.getKey(), now)
.put(TIME_SERIES_END_TIME.getKey(), now + (long) numberOfRefreshes * numberOfDocsPerRefresh + 1)
.build()
).setMapping("""
{
"properties": {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1569,10 +1569,7 @@ public void testMultiThreadedRollover() throws Exception {

// Test that datastream's segments by default are sorted on @timestamp desc
public void testSegmentsSortedOnTimestampDesc() throws Exception {
Settings settings = Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.build();
Settings settings = indexSettings(1, 0).build();
putComposableIndexTemplate("template_for_foo", null, List.of("metrics-foo*"), settings, null);
CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("metrics-foo");
client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get();
Expand Down Expand Up @@ -2047,10 +2044,7 @@ public void testWriteIndexWriteLoadAndAvgShardSizeIsStoredAfterRollover() throws
final String dataStreamName = "logs-es";
final int numberOfShards = randomIntBetween(1, 5);
final int numberOfReplicas = randomIntBetween(0, 1);
final var indexSettings = Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas)
.build();
final var indexSettings = indexSettings(numberOfShards, numberOfReplicas).build();
DataStreamIT.putComposableIndexTemplate("my-template", null, List.of("logs-*"), indexSettings, null);
final var request = new CreateDataStreamAction.Request(dataStreamName);
assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet());
Expand Down Expand Up @@ -2097,10 +2091,7 @@ public void testWriteLoadAndAvgShardSizeIsStoredInABestEffort() throws Exception
final List<String> dataOnlyNodes = internalCluster().startDataOnlyNodes(4);
final String dataStreamName = "logs-es";

final var indexSettings = Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)
.put("index.routing.allocation.include._name", String.join(",", dataOnlyNodes))
final var indexSettings = indexSettings(2, 1).put("index.routing.allocation.include._name", String.join(",", dataOnlyNodes))
.build();
DataStreamIT.putComposableIndexTemplate("my-template", null, List.of("logs-*"), indexSettings, null);
final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName);
Expand Down Expand Up @@ -2172,11 +2163,7 @@ public void testNoShardSizeIsForecastedWhenAllShardStatRequestsFail() throws Exc
final String dataOnlyNode = internalCluster().startDataOnlyNode();
final String dataStreamName = "logs-es";

final var indexSettings = Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put("index.routing.allocation.require._name", dataOnlyNode)
.build();
final var indexSettings = indexSettings(1, 0).put("index.routing.allocation.require._name", dataOnlyNode).build();
DataStreamIT.putComposableIndexTemplate("my-template", null, List.of("logs-*"), indexSettings, null);
final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName);
assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet());
Expand Down Expand Up @@ -2217,10 +2204,7 @@ public void testShardSizeIsForecastedDuringRollover() throws Exception {
final String dataStreamName = "logs-es";
final int numberOfShards = randomIntBetween(1, 5);
final int numberOfReplicas = randomIntBetween(0, 1);
final var indexSettings = Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas)
.build();
final var indexSettings = indexSettings(numberOfShards, numberOfReplicas).build();
DataStreamIT.putComposableIndexTemplate("my-template", null, List.of("logs-*"), indexSettings, null);
final var request = new CreateDataStreamAction.Request(dataStreamName);
assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,12 +68,7 @@ public void testValidateTimestampFieldMappingInvalidFieldType() {
MappingLookup createMappingLookup(String mapping) throws IOException {
String indexName = "test";
IndexMetadata indexMetadata = IndexMetadata.builder(indexName)
.settings(
Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)
)
.settings(indexSettings(Version.CURRENT, 1, 1))
.putMapping(mapping)
.build();
IndicesModule indicesModule = new IndicesModule(List.of());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,11 @@

import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.geo.GeometryNormalizer;
import org.elasticsearch.common.geo.GeometryParser;
import org.elasticsearch.common.geo.Orientation;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.geometry.Geometry;
import org.elasticsearch.geometry.GeometryCollection;
import org.elasticsearch.geometry.Line;
Expand Down Expand Up @@ -385,13 +382,6 @@ public void testParse3DPolygon() throws IOException, ParseException {
Coordinate[] coordinates = shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]);

Version randomVersion = VersionUtils.randomIndexCompatibleVersion(random());
Settings indexSettings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, randomVersion)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
.build();

LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]));
Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, null);
final Version version = VersionUtils.randomPreviousCompatibleVersion(random(), Version.V_8_0_0);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.internal.Requests;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.InternalAggregation;
Expand Down Expand Up @@ -206,9 +204,7 @@ public void testPostCollection() throws Exception {
String masterType = "masterprod";
String childType = "variantsku";
assertAcked(
prepareCreate(indexName).setSettings(
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
)
prepareCreate(indexName).setSettings(indexSettings(1, 0))
.setMapping(
addFieldMappings(
buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, masterType, childType),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
Expand Down Expand Up @@ -269,10 +268,7 @@ public void testDeleteByQuery() throws Exception {
}

private void createIndexWithSingleShard(String index) throws Exception {
final Settings indexSettings = Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.build();
final Settings indexSettings = indexSettings(1, 0).build();
final XContentBuilder mappings = jsonBuilder();
{
mappings.startObject();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,7 @@ protected Map<String, Class<?>> getTokenFilters() {

public void testThreadSafety() throws IOException {
// TODO: is this the right boilerplate? I forked this out of TransportAnalyzeAction.java:
Settings settings = Settings.builder()
// for _na_
.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
Settings settings = indexSettings(Version.CURRENT, 1, 0).put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
Environment environment = TestEnvironment.newEnvironment(settings);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1501,9 +1501,7 @@ public void testPeerRecoveryRetentionLeases() throws Exception {
*/
public void testOperationBasedRecovery() throws Exception {
if (isRunningAgainstOldCluster()) {
Settings.Builder settings = Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1);
Settings.Builder settings = indexSettings(1, 1);
if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) {
settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean());
}
Expand Down Expand Up @@ -1741,9 +1739,7 @@ public void testEnableSoftDeletesOnRestore() throws Exception {
assumeTrue("soft deletes must be enabled on 8.0+", getOldClusterVersion().before(Version.V_8_0_0));
final String snapshot = "snapshot-" + index;
if (isRunningAgainstOldCluster()) {
final Settings.Builder settings = Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1);
final Settings.Builder settings = indexSettings(1, 1);
settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false);
createIndex(index, settings.build());
ensureGreen(index);
Expand Down Expand Up @@ -1801,10 +1797,7 @@ public void testEnableSoftDeletesOnRestore() throws Exception {
public void testForbidDisableSoftDeletesOnRestore() throws Exception {
final String snapshot = "snapshot-" + index;
if (isRunningAgainstOldCluster()) {
final Settings.Builder settings = Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true);
final Settings.Builder settings = indexSettings(1, 1).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true);
createIndex(index, settings.build());
ensureGreen(index);
int numDocs = randomIntBetween(0, 100);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -716,14 +716,7 @@ public void testAutoExpandIndicesDuringRollingUpgrade() throws Exception {
List<String> nodes = new ArrayList<>(nodeMap.keySet());

if (CLUSTER_TYPE == ClusterType.OLD) {
createIndex(
indexName,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, randomInt(2))
.put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-all")
.build()
);
createIndex(indexName, indexSettings(1, randomInt(2)).put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-all").build());
ensureGreen(indexName);
updateIndexSettings(
indexName,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,19 +4,13 @@
"url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/dlm-delete-lifecycle.html",
"description":"Deletes the data lifecycle of the selected data streams."
},
"stability":"stable",
"stability":"experimental",
"visibility":"public",
"headers":{
"accept": [ "application/json"]
},
"url":{
"paths":[
{
"path":"/_data_stream/_lifecycle",
"methods":[
"DELETE"
]
},
{
"path":"/_data_stream/{name}/_lifecycle",
"methods":[
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,19 +4,13 @@
"url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/dlm-get-lifecycle.html",
"description":"Returns the data lifecycle of the selected data streams."
},
"stability":"stable",
"stability":"experimental",
"visibility":"public",
"headers":{
"accept": [ "application/json"]
},
"url":{
"paths":[
{
"path":"/_data_stream/_lifecycle",
"methods":[
"GET"
]
},
{
"path":"/_data_stream/{name}/_lifecycle",
"methods":[
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,19 +4,13 @@
"url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/dlm-put-lifecycle.html",
"description":"Updates the data lifecycle of the selected data streams."
},
"stability":"stable",
"stability":"experimental",
"visibility":"public",
"headers":{
"accept": [ "application/json"]
},
"url":{
"paths":[
{
"path":"/_data_stream/_lifecycle",
"methods":[
"PUT"
]
},
"url": {
"paths": [
{
"path":"/_data_stream/{name}/_lifecycle",
"methods":[
Expand Down
6 changes: 0 additions & 6 deletions server/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -81,12 +81,6 @@ dependencies {

}

spotless {
java {
targetExclude "src/main/java/org/elasticsearch/Version.java"
}
}

tasks.named("forbiddenPatterns").configure {
exclude '**/*.json'
exclude '**/*.jmx'
Expand Down
Loading

0 comments on commit 2cb9e09

Please sign in to comment.