From 659e82b08b355b17572a54163d090c4fdd13c20e Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 18 Feb 2019 15:01:44 -0500 Subject: [PATCH 01/54] Add some logging related to retention lease syncing (#39066) When the background retention lease sync fires, we check an see if any retention leases are expired. If any did expire, we execute a full retention lease sync (write action). Since this is happening on a background thread, we do not block that thread waiting for success (it will simply try again when the timer elapses). However, we were swallowing exceptions that indicate failure. This commit addresses that by logging the failures. Additionally, we add some trace logging to the execution of syncing retention leases. --- .../java/org/elasticsearch/index/shard/IndexShard.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 50400c6961741..4bdad8d5ec328 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -2011,8 +2011,13 @@ public void syncRetentionLeases() { verifyNotClosed(); final Tuple retentionLeases = getRetentionLeases(true); if (retentionLeases.v1()) { - retentionLeaseSyncer.sync(shardId, retentionLeases.v2(), ActionListener.wrap(() -> {})); + logger.trace("syncing retention leases [{}] after expiration check", retentionLeases.v2()); + retentionLeaseSyncer.sync( + shardId, + retentionLeases.v2(), + ActionListener.wrap(r -> {}, e -> logger.warn("failed to sync retention leases after expiration check", e))); } else { + logger.trace("background syncing retention leases [{}] after expiration check", retentionLeases.v2()); retentionLeaseSyncer.backgroundSync(shardId, retentionLeases.v2()); } } From 2c90534039e0838c94a9692988e78116cea3d31b Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 18 Feb 2019 15:06:54 -0500 Subject: [PATCH 02/54] Include in log retention leases that failed to sync When retention leases fail to sync after an expiration check, we emit a log message about this. This commit adds the retention leases that failed to sync. --- .../java/org/elasticsearch/index/shard/IndexShard.java | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 4bdad8d5ec328..faa3f86591ff0 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -21,6 +21,7 @@ import com.carrotsearch.hppc.ObjectLongMap; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.SegmentInfos; @@ -2015,7 +2016,12 @@ public void syncRetentionLeases() { retentionLeaseSyncer.sync( shardId, retentionLeases.v2(), - ActionListener.wrap(r -> {}, e -> logger.warn("failed to sync retention leases after expiration check", e))); + ActionListener.wrap( + r -> {}, + e -> logger.warn(new ParameterizedMessage( + "failed to sync retention leases [{}] after expiration check", + retentionLeases), + e))); } else { logger.trace("background syncing retention leases [{}] after expiration check", retentionLeases.v2()); retentionLeaseSyncer.backgroundSync(shardId, retentionLeases.v2()); From 331ef9dc59270fce338e6e93e9d45dbdd7c93d03 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 18 Feb 2019 16:52:51 -0500 Subject: [PATCH 03/54] Introduce retention lease state file (#39004) This commit moves retention leases from being persisted in the Lucene commit point to being persisted in a dedicated state file. --- docs/reference/indices/flush.asciidoc | 4 +- .../elasticsearch/index/engine/Engine.java | 1 - .../index/engine/InternalEngine.java | 10 +- .../index/engine/SoftDeletesPolicy.java | 7 +- .../index/seqno/ReplicationTracker.java | 37 ++++++ .../index/seqno/RetentionLease.java | 87 +++++++------- .../RetentionLeaseBackgroundSyncAction.java | 11 +- .../index/seqno/RetentionLeaseSyncAction.java | 23 ++-- .../index/seqno/RetentionLeases.java | 107 ++++++++++------- .../elasticsearch/index/shard/IndexShard.java | 32 ++++-- .../org/elasticsearch/index/store/Store.java | 9 +- .../indices/recovery/RecoveryTarget.java | 1 + .../index/engine/InternalEngineTests.java | 10 -- .../index/engine/SoftDeletesPolicyTests.java | 26 ----- ...ReplicationTrackerRetentionLeaseTests.java | 108 ++++++++++++++++++ ...tentionLeaseBackgroundSyncActionTests.java | 13 ++- .../index/seqno/RetentionLeaseIT.java | 29 ++--- .../seqno/RetentionLeaseSyncActionTests.java | 20 ++-- .../index/seqno/RetentionLeaseTests.java | 25 ---- .../seqno/RetentionLeaseXContentTests.java | 48 ++++++++ .../index/seqno/RetentionLeasesTests.java | 75 ++++++++---- .../seqno/RetentionLeasesXContentTests.java | 58 ++++++++++ .../shard/IndexShardRetentionLeaseTests.java | 101 ++-------------- .../indexlifecycle/CCRIndexLifecycleIT.java | 1 - 24 files changed, 485 insertions(+), 358 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseXContentTests.java create mode 100644 server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesXContentTests.java diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index ea8667aa1b713..a03d2bb248dc4 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -103,8 +103,7 @@ which returns something similar to: "max_seq_no" : "-1", "sync_id" : "AVvFY-071siAOuFGEO9P", <1> "max_unsafe_auto_id_timestamp" : "-1", - "min_retained_seq_no" : "0", - "retention_leases" : "primary_term:1;version:1;id:replica-0;retaining_seq_no:0;timestamp:1547235588;source:replica" + "min_retained_seq_no" : "0" }, "num_docs" : 0 } @@ -119,7 +118,6 @@ which returns something similar to: // TESTRESPONSE[s/"translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA"/"translog_uuid": $body.indices.twitter.shards.0.0.commit.user_data.translog_uuid/] // TESTRESPONSE[s/"history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ"/"history_uuid": $body.indices.twitter.shards.0.0.commit.user_data.history_uuid/] // TESTRESPONSE[s/"sync_id" : "AVvFY-071siAOuFGEO9P"/"sync_id": $body.indices.twitter.shards.0.0.commit.user_data.sync_id/] -// TESTRESPONSE[s/"retention_leases" : "primary_term:1;version:1;id:replica-0;retaining_seq_no:0;timestamp:1547235588;source:replica"/"retention_leases": $body.indices.twitter.shards.0.0.commit.user_data.retention_leases/] <1> the `sync id` marker [float] diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 56d8c6bab6184..dbe779864fe47 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -114,7 +114,6 @@ public abstract class Engine implements Closeable { public static final String SYNC_COMMIT_ID = "sync_id"; public static final String HISTORY_UUID_KEY = "history_uuid"; public static final String MIN_RETAINED_SEQNO = "min_retained_seq_no"; - public static final String RETENTION_LEASES = "retention_leases"; public static final String MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID = "max_unsafe_auto_id_timestamp"; protected final ShardId shardId; diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 2def84f875b17..32354ab4b16d7 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -51,7 +51,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lucene.LoggerInfoStream; import org.elasticsearch.common.lucene.Lucene; @@ -75,7 +74,6 @@ import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.OnGoingMerge; import org.elasticsearch.index.seqno.LocalCheckpointTracker; -import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ElasticsearchMergePolicy; @@ -2344,13 +2342,7 @@ protected void commitIndexWriter(final IndexWriter writer, final Translog transl commitData.put(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, Long.toString(maxUnsafeAutoIdTimestamp.get())); commitData.put(HISTORY_UUID_KEY, historyUUID); if (softDeleteEnabled) { - /* - * We sample these from the policy (which occurs under a lock) to ensure that we have a consistent view of the minimum - * retained sequence number, and the retention leases. - */ - final Tuple retentionPolicy = softDeletesPolicy.getRetentionPolicy(); - commitData.put(Engine.MIN_RETAINED_SEQNO, Long.toString(retentionPolicy.v1())); - commitData.put(Engine.RETENTION_LEASES, RetentionLeases.encodeRetentionLeases(retentionPolicy.v2())); + commitData.put(Engine.MIN_RETAINED_SEQNO, Long.toString(softDeletesPolicy.getMinRetainedSeqNo())); } logger.trace("committing writer with commit data [{}]", commitData); return commitData.entrySet().iterator(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/SoftDeletesPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/SoftDeletesPolicy.java index 9a9c7bd0ee869..4c9ee0be92f46 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/SoftDeletesPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/SoftDeletesPolicy.java @@ -21,7 +21,6 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.search.Query; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.seqno.RetentionLease; @@ -107,10 +106,6 @@ private synchronized void releaseRetentionLock() { * Operations whose seq# is least this value should exist in the Lucene index. */ synchronized long getMinRetainedSeqNo() { - return getRetentionPolicy().v1(); - } - - public synchronized Tuple getRetentionPolicy() { /* * When an engine is flushed, we need to provide it the latest collection of retention leases even when the soft deletes policy is * locked for peer recovery. @@ -151,7 +146,7 @@ public synchronized Tuple getRetentionPolicy() { */ minRetainedSeqNo = Math.max(minRetainedSeqNo, minSeqNoToRetain); } - return Tuple.tuple(minRetainedSeqNo, retentionLeases); + return minRetainedSeqNo; } /** diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 3e4e83d365ec1..566a81b3af4b0 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -32,6 +32,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.IndexShard; @@ -39,6 +41,7 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +import java.nio.file.Path; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -318,6 +321,40 @@ public synchronized void updateRetentionLeasesOnReplica(final RetentionLeases re } } + /** + * Loads the latest retention leases from their dedicated state file. + * + * @param path the path to the directory containing the state file + * @return the retention leases + * @throws IOException if an I/O exception occurs reading the retention leases + */ + public RetentionLeases loadRetentionLeases(final Path path) throws IOException { + final RetentionLeases retentionLeases = RetentionLeases.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path); + if (retentionLeases == null) { + return RetentionLeases.EMPTY; + } + return retentionLeases; + } + + private final Object retentionLeasePersistenceLock = new Object(); + + /** + * Persists the current retention leases to their dedicated state file. + * + * @param path the path to the directory containing the state file + * @throws WriteStateException if an exception occurs writing the state file + */ + public void persistRetentionLeases(final Path path) throws WriteStateException { + synchronized (retentionLeasePersistenceLock) { + final RetentionLeases currentRetentionLeases; + synchronized (this) { + currentRetentionLeases = retentionLeases; + } + logger.trace("persisting retention leases [{}]", currentRetentionLeases); + RetentionLeases.FORMAT.writeAndCleanup(currentRetentionLeases, path); + } + } + public static class CheckpointState implements Writeable { /** diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java index e1d362d98764a..e6d6ed3fe825f 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java @@ -19,13 +19,16 @@ package org.elasticsearch.index.seqno; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; -import java.util.Arrays; -import java.util.Locale; import java.util.Objects; /** @@ -34,7 +37,7 @@ * otherwise merge away operations that have been soft deleted). Each retention lease contains a unique identifier, the retaining sequence * number, the timestamp of when the lease was created or renewed, and the source of the retention lease (e.g., "ccr"). */ -public final class RetentionLease implements Writeable { +public final class RetentionLease implements ToXContent, Writeable { private final String id; @@ -94,10 +97,6 @@ public RetentionLease(final String id, final long retainingSequenceNumber, final if (id.isEmpty()) { throw new IllegalArgumentException("retention lease ID can not be empty"); } - if (id.contains(":") || id.contains(";") || id.contains(",")) { - // retention lease IDs can not contain these characters because they are used in encoding retention leases - throw new IllegalArgumentException("retention lease ID can not contain any of [:;,] but was [" + id + "]"); - } if (retainingSequenceNumber < 0) { throw new IllegalArgumentException("retention lease retaining sequence number [" + retainingSequenceNumber + "] out of range"); } @@ -108,10 +107,6 @@ public RetentionLease(final String id, final long retainingSequenceNumber, final if (source.isEmpty()) { throw new IllegalArgumentException("retention lease source can not be empty"); } - if (source.contains(":") || source.contains(";") || source.contains(",")) { - // retention lease sources can not contain these characters because they are used in encoding retention leases - throw new IllegalArgumentException("retention lease source can not contain any of [:;,] but was [" + source + "]"); - } this.id = id; this.retainingSequenceNumber = retainingSequenceNumber; this.timestamp = timestamp; @@ -145,43 +140,49 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeString(source); } - /** - * Encodes a retention lease as a string. This encoding can be decoded by {@link #decodeRetentionLease(String)}. The retention lease is - * encoded in the format id:{id};retaining_seq_no:{retainingSequenecNumber};timestamp:{timestamp};source:{source}. - * - * @param retentionLease the retention lease - * @return the encoding of the retention lease - */ - static String encodeRetentionLease(final RetentionLease retentionLease) { - Objects.requireNonNull(retentionLease); - return String.format( - Locale.ROOT, - "id:%s;retaining_seq_no:%d;timestamp:%d;source:%s", - retentionLease.id, - retentionLease.retainingSequenceNumber, - retentionLease.timestamp, - retentionLease.source); + private static final ParseField ID_FIELD = new ParseField("id"); + private static final ParseField RETAINING_SEQUENCE_NUMBER_FIELD = new ParseField("retaining_sequence_number"); + private static final ParseField TIMESTAMP_FIELD = new ParseField("timestamp"); + private static final ParseField SOURCE_FIELD = new ParseField("source"); + + private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "retention_leases", + (a) -> new RetentionLease((String) a[0], (Long) a[1], (Long) a[2], (String) a[3])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), ID_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), RETAINING_SEQUENCE_NUMBER_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), TIMESTAMP_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), SOURCE_FIELD); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + { + builder.field(ID_FIELD.getPreferredName(), id); + builder.field(RETAINING_SEQUENCE_NUMBER_FIELD.getPreferredName(), retainingSequenceNumber); + builder.field(TIMESTAMP_FIELD.getPreferredName(), timestamp); + builder.field(SOURCE_FIELD.getPreferredName(), source); + } + builder.endObject(); + return builder; + } + + @Override + public boolean isFragment() { + return false; } /** - * Decodes a retention lease encoded by {@link #encodeRetentionLease(RetentionLease)}. + * Parses a retention lease from {@link org.elasticsearch.common.xcontent.XContent}. This method assumes that the retention lease was + * converted to {@link org.elasticsearch.common.xcontent.XContent} via {@link #toXContent(XContentBuilder, Params)}. * - * @param encodedRetentionLease an encoded retention lease - * @return the decoded retention lease + * @param parser the parser + * @return a retention lease */ - static RetentionLease decodeRetentionLease(final String encodedRetentionLease) { - Objects.requireNonNull(encodedRetentionLease); - final String[] fields = encodedRetentionLease.split(";"); - assert fields.length == 4 : Arrays.toString(fields); - assert fields[0].matches("id:[^:;,]+") : fields[0]; - final String id = fields[0].substring("id:".length()); - assert fields[1].matches("retaining_seq_no:\\d+") : fields[1]; - final long retainingSequenceNumber = Long.parseLong(fields[1].substring("retaining_seq_no:".length())); - assert fields[2].matches("timestamp:\\d+") : fields[2]; - final long timestamp = Long.parseLong(fields[2].substring("timestamp:".length())); - assert fields[3].matches("source:[^:;,]+") : fields[3]; - final String source = fields[3].substring("source:".length()); - return new RetentionLease(id, retainingSequenceNumber, timestamp, source); + public static RetentionLease fromXContent(final XContentParser parser) { + return PARSER.apply(parser, null); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java index 906b505dad7e3..4033dcf0c4bef 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardClosedException; import org.elasticsearch.index.shard.ShardId; @@ -119,19 +120,21 @@ public void backgroundSync( } @Override - protected PrimaryResult shardOperationOnPrimary(final Request request, final IndexShard primary) { + protected PrimaryResult shardOperationOnPrimary( + final Request request, + final IndexShard primary) throws WriteStateException { Objects.requireNonNull(request); Objects.requireNonNull(primary); - primary.afterWriteOperation(); + primary.persistRetentionLeases(); return new PrimaryResult<>(request, new ReplicationResponse()); } @Override - protected ReplicaResult shardOperationOnReplica(final Request request, final IndexShard replica){ + protected ReplicaResult shardOperationOnReplica(final Request request, final IndexShard replica) throws WriteStateException { Objects.requireNonNull(request); Objects.requireNonNull(replica); replica.updateRetentionLeasesOnReplica(request.getRetentionLeases()); - replica.afterWriteOperation(); + replica.persistRetentionLeases(); return new ReplicaResult(); } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java index 9be7ab046eb8b..760271e53ee1e 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java @@ -25,7 +25,6 @@ import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; @@ -39,6 +38,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardClosedException; import org.elasticsearch.index.shard.ShardId; @@ -121,31 +121,26 @@ public void sync( } @Override - protected WritePrimaryResult shardOperationOnPrimary(final Request request, final IndexShard primary) { + protected WritePrimaryResult shardOperationOnPrimary( + final Request request, + final IndexShard primary) throws WriteStateException { Objects.requireNonNull(request); Objects.requireNonNull(primary); - // we flush to ensure that retention leases are committed - flush(primary); + primary.persistRetentionLeases(); return new WritePrimaryResult<>(request, new Response(), null, null, primary, logger); } @Override - protected WriteReplicaResult shardOperationOnReplica(final Request request, final IndexShard replica) { + protected WriteReplicaResult shardOperationOnReplica( + final Request request, + final IndexShard replica) throws WriteStateException { Objects.requireNonNull(request); Objects.requireNonNull(replica); replica.updateRetentionLeasesOnReplica(request.getRetentionLeases()); - // we flush to ensure that retention leases are committed - flush(replica); + replica.persistRetentionLeases(); return new WriteReplicaResult<>(request, null, null, replica, logger); } - private void flush(final IndexShard indexShard) { - final FlushRequest flushRequest = new FlushRequest(); - flushRequest.force(true); - flushRequest.waitIfOngoing(true); - indexShard.flush(flushRequest); - } - public static final class Request extends ReplicatedWriteRequest { private RetentionLeases retentionLeases; diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java index 5a9d9e333b27b..3bad887282502 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java @@ -19,15 +19,20 @@ package org.elasticsearch.index.seqno; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.gateway.MetaDataStateFormat; import java.io.IOException; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.Locale; +import java.util.LinkedHashMap; import java.util.Map; import java.util.Objects; import java.util.function.Function; @@ -37,7 +42,7 @@ * Represents a versioned collection of retention leases. We version the collection of retention leases to ensure that sync requests that * arrive out of order on the replica, using the version to ensure that older sync requests are rejected. */ -public class RetentionLeases implements Writeable { +public class RetentionLeases implements ToXContent, Writeable { private final long primaryTerm; @@ -157,54 +162,59 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeCollection(leases.values()); } - /** - * Encodes a retention lease collection as a string. This encoding can be decoded by - * {@link RetentionLeases#decodeRetentionLeases(String)}. The encoding is a comma-separated encoding of each retention lease as encoded - * by {@link RetentionLease#encodeRetentionLease(RetentionLease)}, prefixed by the version of the retention lease collection. - * - * @param retentionLeases the retention lease collection - * @return the encoding of the retention lease collection - */ - public static String encodeRetentionLeases(final RetentionLeases retentionLeases) { - Objects.requireNonNull(retentionLeases); - return String.format( - Locale.ROOT, - "primary_term:%d;version:%d;%s", - retentionLeases.primaryTerm, - retentionLeases.version, - retentionLeases.leases.values().stream().map(RetentionLease::encodeRetentionLease).collect(Collectors.joining(","))); + private static final ParseField PRIMARY_TERM_FIELD = new ParseField("primary_term"); + private static final ParseField VERSION_FIELD = new ParseField("version"); + private static final ParseField LEASES_FIELD = new ParseField("leases"); + + @SuppressWarnings("unchecked") + private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "retention_leases", + (a) -> new RetentionLeases((Long) a[0], (Long) a[1], (Collection) a[2])); + + static { + PARSER.declareLong(ConstructingObjectParser.constructorArg(), PRIMARY_TERM_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), VERSION_FIELD); + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), (p, c) -> RetentionLease.fromXContent(p), LEASES_FIELD); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.field(PRIMARY_TERM_FIELD.getPreferredName(), primaryTerm); + builder.field(VERSION_FIELD.getPreferredName(), version); + builder.startArray(LEASES_FIELD.getPreferredName()); + { + for (final RetentionLease retentionLease : leases.values()) { + retentionLease.toXContent(builder, params); + } + } + builder.endArray(); + return builder; } /** - * Decodes retention leases encoded by {@link #encodeRetentionLeases(RetentionLeases)}. + * Parses a retention leases collection from {@link org.elasticsearch.common.xcontent.XContent}. This method assumes that the retention + * leases were converted to {@link org.elasticsearch.common.xcontent.XContent} via {@link #toXContent(XContentBuilder, Params)}. * - * @param encodedRetentionLeases an encoded retention lease collection - * @return the decoded retention lease collection + * @param parser the parser + * @return a retention leases collection */ - public static RetentionLeases decodeRetentionLeases(final String encodedRetentionLeases) { - Objects.requireNonNull(encodedRetentionLeases); - if (encodedRetentionLeases.isEmpty()) { - return EMPTY; + public static RetentionLeases fromXContent(final XContentParser parser) { + return PARSER.apply(parser, null); + } + + static final MetaDataStateFormat FORMAT = new MetaDataStateFormat("retention-leases-") { + + @Override + public void toXContent(final XContentBuilder builder, final RetentionLeases retentionLeases) throws IOException { + retentionLeases.toXContent(builder, ToXContent.EMPTY_PARAMS); } - assert encodedRetentionLeases.matches("primary_term:\\d+;version:\\d+;.*") : encodedRetentionLeases; - final int firstSemicolon = encodedRetentionLeases.indexOf(";"); - final long primaryTerm = Long.parseLong(encodedRetentionLeases.substring("primary_term:".length(), firstSemicolon)); - final int secondSemicolon = encodedRetentionLeases.indexOf(";", firstSemicolon + 1); - final long version = Long.parseLong(encodedRetentionLeases.substring(firstSemicolon + 1 + "version:".length(), secondSemicolon)); - final Collection leases; - if (secondSemicolon + 1 == encodedRetentionLeases.length()) { - leases = Collections.emptyList(); - } else { - assert Arrays.stream(encodedRetentionLeases.substring(secondSemicolon + 1).split(",")) - .allMatch(s -> s.matches("id:[^:;,]+;retaining_seq_no:\\d+;timestamp:\\d+;source:[^:;,]+")) - : encodedRetentionLeases; - leases = Arrays.stream(encodedRetentionLeases.substring(secondSemicolon + 1).split(",")) - .map(RetentionLease::decodeRetentionLease) - .collect(Collectors.toList()); + + @Override + public RetentionLeases fromXContent(final XContentParser parser) { + return RetentionLeases.fromXContent(parser); } - return new RetentionLeases(primaryTerm, version, leases); - } + }; @Override public boolean equals(Object o) { @@ -237,7 +247,16 @@ public String toString() { * @return the map from retention lease ID to retention lease */ private static Map toMap(final Collection leases) { - return leases.stream().collect(Collectors.toMap(RetentionLease::id, Function.identity())); + // use a linked hash map to preserve order + return leases.stream() + .collect(Collectors.toMap( + RetentionLease::id, + Function.identity(), + (left, right) -> { + assert left.id().equals(right.id()) : "expected [" + left.id() + "] to equal [" + right.id() + "]"; + throw new IllegalStateException("duplicate retention lease ID [" + left.id() + "]"); + }, + LinkedHashMap::new)); } /** diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index faa3f86591ff0..7a5ec6bd28685 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -67,6 +67,7 @@ import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexNotFoundException; @@ -1432,7 +1433,7 @@ private void innerOpenEngineAndTranslog() throws IOException { final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID); replicationTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "read from translog checkpoint"); - replicationTracker.updateRetentionLeasesOnReplica(getRetentionLeases(store.readLastCommittedSegmentsInfo())); + updateRetentionLeasesOnReplica(loadRetentionLeases()); trimUnsafeCommits(); synchronized (mutex) { verifyNotClosed(); @@ -1452,14 +1453,6 @@ private void innerOpenEngineAndTranslog() throws IOException { assert recoveryState.getStage() == RecoveryState.Stage.TRANSLOG : "TRANSLOG stage expected but was: " + recoveryState.getStage(); } - static RetentionLeases getRetentionLeases(final SegmentInfos segmentInfos) { - final String committedRetentionLeases = segmentInfos.getUserData().get(Engine.RETENTION_LEASES); - if (committedRetentionLeases == null) { - return RetentionLeases.EMPTY; - } - return RetentionLeases.decodeRetentionLeases(committedRetentionLeases); - } - private void trimUnsafeCommits() throws IOException { assert currentEngineReference.get() == null || currentEngineReference.get() instanceof ReadOnlyEngine : "a write engine is running"; final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); @@ -2004,6 +1997,27 @@ public void updateRetentionLeasesOnReplica(final RetentionLeases retentionLeases replicationTracker.updateRetentionLeasesOnReplica(retentionLeases); } + /** + * Loads the latest retention leases from their dedicated state file. + * + * @return the retention leases + * @throws IOException if an I/O exception occurs reading the retention leases + */ + public RetentionLeases loadRetentionLeases() throws IOException { + verifyNotClosed(); + return replicationTracker.loadRetentionLeases(path.getShardStatePath()); + } + + /** + * Persists the current retention leases to their dedicated state file. + * + * @throws WriteStateException if an exception occurs writing the state file + */ + public void persistRetentionLeases() throws WriteStateException { + verifyNotClosed(); + replicationTracker.persistRetentionLeases(path.getShardStatePath()); + } + /** * Syncs the current retention leases to all replicas. */ diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index c693d6b9d80fe..46f75f0db3745 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -1548,13 +1548,6 @@ public void trimUnsafeCommits(final long lastSyncedGlobalCheckpoint, final long + translogUUID + "]"); } if (startingIndexCommit.equals(lastIndexCommitCommit) == false) { - /* - * Unlike other commit tags, the retention-leases tag is not restored when an engine is - * recovered from translog. We need to manually copy it from the last commit to the safe commit; - * otherwise we might lose the latest committed retention leases when re-opening an engine. - */ - final Map userData = new HashMap<>(startingIndexCommit.getUserData()); - userData.put(Engine.RETENTION_LEASES, lastIndexCommitCommit.getUserData().getOrDefault(Engine.RETENTION_LEASES, "")); try (IndexWriter writer = newAppendingIndexWriter(directory, startingIndexCommit)) { // this achieves two things: // - by committing a new commit based on the starting commit, it make sure the starting commit will be opened @@ -1565,7 +1558,7 @@ public void trimUnsafeCommits(final long lastSyncedGlobalCheckpoint, final long // The new commit will use segment files from the starting commit but userData from the last commit by default. // Thus, we need to manually set the userData from the starting commit to the new commit. - writer.setLiveCommitData(userData.entrySet()); + writer.setLiveCommitData(startingIndexCommit.getUserData().entrySet()); writer.commit(); } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 76f2200a47d82..00f07ed84e527 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -307,6 +307,7 @@ public void finalizeRecovery(final long globalCheckpoint, ActionListener l indexShard.updateGlobalCheckpointOnReplica(globalCheckpoint, "finalizing recovery"); // Persist the global checkpoint. indexShard.sync(); + indexShard.persistRetentionLeases(); indexShard.finalizeRecovery(); return null; }); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index d9ed5cd2c719e..8d0865a652578 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -5361,16 +5361,6 @@ public void testKeepMinRetainedSeqNoByMergePolicy() throws IOException { engine.flush(true, true); assertThat(Long.parseLong(engine.getLastCommittedSegmentInfos().userData.get(Engine.MIN_RETAINED_SEQNO)), equalTo(engine.getMinRetainedSeqNo())); - final RetentionLeases leases = retentionLeasesHolder.get(); - if (leases.leases().isEmpty()) { - assertThat( - engine.getLastCommittedSegmentInfos().getUserData().get(Engine.RETENTION_LEASES), - equalTo("primary_term:" + primaryTerm + ";version:" + retentionLeasesVersion.get() + ";")); - } else { - assertThat( - engine.getLastCommittedSegmentInfos().getUserData().get(Engine.RETENTION_LEASES), - equalTo(RetentionLeases.encodeRetentionLeases(leases))); - } } if (rarely()) { engine.forceMerge(randomBoolean()); diff --git a/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java index e4da636deaf6d..3c71e4fede3d5 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java @@ -37,8 +37,6 @@ import java.util.function.Supplier; import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; -import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -121,30 +119,6 @@ public void testSoftDeletesRetentionLock() { assertThat(policy.getMinRetainedSeqNo(), equalTo(minRetainedSeqNo)); } - public void testAlwaysFetchLatestRetentionLeases() { - final AtomicLong globalCheckpoint = new AtomicLong(NO_OPS_PERFORMED); - final Collection leases = new ArrayList<>(); - final int numLeases = randomIntBetween(0, 10); - for (int i = 0; i < numLeases; i++) { - leases.add(new RetentionLease(Integer.toString(i), randomLongBetween(0, 1000), randomNonNegativeLong(), "test")); - } - final Supplier leasesSupplier = - () -> new RetentionLeases( - randomNonNegativeLong(), - randomNonNegativeLong(), - Collections.unmodifiableCollection(new ArrayList<>(leases))); - final SoftDeletesPolicy policy = - new SoftDeletesPolicy(globalCheckpoint::get, randomIntBetween(1, 1000), randomIntBetween(0, 1000), leasesSupplier); - if (randomBoolean()) { - policy.acquireRetentionLock(); - } - if (numLeases == 0) { - assertThat(policy.getRetentionPolicy().v2().leases(), empty()); - } else { - assertThat(policy.getRetentionPolicy().v2().leases(), contains(leases.toArray(new RetentionLease[0]))); - } - } - public void testWhenGlobalCheckpointDictatesThePolicy() { final int retentionOperations = randomIntBetween(0, 1024); final AtomicLong globalCheckpoint = new AtomicLong(randomLongBetween(0, Long.MAX_VALUE - 2)); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java index a9aae80db6ca4..967328514a98d 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java @@ -24,16 +24,21 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.IndexSettingsModule; +import java.io.IOException; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; @@ -460,6 +465,109 @@ public void testReplicaIgnoresOlderRetentionLeasesVersion() { } } + public void testLoadAndPersistRetentionLeases() throws IOException { + final AllocationId allocationId = AllocationId.newInitializing(); + long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final ReplicationTracker replicationTracker = new ReplicationTracker( + new ShardId("test", "_na", 0), + allocationId.getId(), + IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), + primaryTerm, + UNASSIGNED_SEQ_NO, + value -> {}, + () -> 0L, + (leases, listener) -> {}); + replicationTracker.updateFromMaster( + randomNonNegativeLong(), + Collections.singleton(allocationId.getId()), + routingTable(Collections.emptySet(), allocationId), + Collections.emptySet()); + replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + final int length = randomIntBetween(0, 8); + for (int i = 0; i < length; i++) { + if (rarely() && primaryTerm < Long.MAX_VALUE) { + primaryTerm = randomLongBetween(primaryTerm + 1, Long.MAX_VALUE); + replicationTracker.setOperationPrimaryTerm(primaryTerm); + } + final long retainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + replicationTracker.addRetentionLease( + Integer.toString(i), retainingSequenceNumber, "test-" + i, ActionListener.wrap(() -> {})); + } + + final Path path = createTempDir(); + replicationTracker.persistRetentionLeases(path); + assertThat(replicationTracker.loadRetentionLeases(path), equalTo(replicationTracker.getRetentionLeases())); + } + + /** + * Test that we correctly synchronize writing the retention lease state file in {@link ReplicationTracker#persistRetentionLeases(Path)}. + * This test can fail without the synchronization block in that method. + * + * @throws IOException if an I/O exception occurs loading the retention lease state file + */ + public void testPersistRetentionLeasesUnderConcurrency() throws IOException { + final AllocationId allocationId = AllocationId.newInitializing(); + long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final ReplicationTracker replicationTracker = new ReplicationTracker( + new ShardId("test", "_na", 0), + allocationId.getId(), + IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), + primaryTerm, + UNASSIGNED_SEQ_NO, + value -> {}, + () -> 0L, + (leases, listener) -> {}); + replicationTracker.updateFromMaster( + randomNonNegativeLong(), + Collections.singleton(allocationId.getId()), + routingTable(Collections.emptySet(), allocationId), + Collections.emptySet()); + replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + final int length = randomIntBetween(0, 8); + for (int i = 0; i < length; i++) { + if (rarely() && primaryTerm < Long.MAX_VALUE) { + primaryTerm = randomLongBetween(primaryTerm + 1, Long.MAX_VALUE); + replicationTracker.setOperationPrimaryTerm(primaryTerm); + } + final long retainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + replicationTracker.addRetentionLease( + Integer.toString(i), retainingSequenceNumber, "test-" + i, ActionListener.wrap(() -> {})); + } + + final Path path = createTempDir(); + final int numberOfThreads = randomIntBetween(1, 2 * Runtime.getRuntime().availableProcessors()); + final CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); + final Thread[] threads = new Thread[numberOfThreads]; + for (int i = 0; i < numberOfThreads; i++) { + final String id = Integer.toString(length + i); + threads[i] = new Thread(() -> { + try { + barrier.await(); + final long retainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + replicationTracker.addRetentionLease(id, retainingSequenceNumber, "test-" + id, ActionListener.wrap(() -> {})); + replicationTracker.persistRetentionLeases(path); + barrier.await(); + } catch (final BrokenBarrierException | InterruptedException | WriteStateException e) { + throw new AssertionError(e); + } + }); + threads[i].start(); + } + + try { + // synchronize the threads invoking ReplicationTracker#persistRetentionLeases(Path path) + barrier.await(); + // wait for all the threads to finish + barrier.await(); + for (int i = 0; i < numberOfThreads; i++) { + threads[i].join(); + } + } catch (final BrokenBarrierException | InterruptedException e) { + throw new AssertionError(e); + } + assertThat(replicationTracker.loadRetentionLeases(path), equalTo(replicationTracker.getRetentionLeases())); + } + private void assertRetentionLeases( final ReplicationTracker replicationTracker, final int size, diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java index e738c04d2a1bb..4567f3e382337 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; @@ -91,7 +92,7 @@ public void tearDown() throws Exception { super.tearDown(); } - public void testRetentionLeaseBackgroundSyncActionOnPrimary() { + public void testRetentionLeaseBackgroundSyncActionOnPrimary() throws WriteStateException { final IndicesService indicesService = mock(IndicesService.class); final Index index = new Index("index", "uuid"); @@ -120,13 +121,13 @@ public void testRetentionLeaseBackgroundSyncActionOnPrimary() { final ReplicationOperation.PrimaryResult result = action.shardOperationOnPrimary(request, indexShard); - // the retention leases on the shard should be periodically flushed - verify(indexShard).afterWriteOperation(); + // the retention leases on the shard should be persisted + verify(indexShard).persistRetentionLeases(); // we should forward the request containing the current retention leases to the replica assertThat(result.replicaRequest(), sameInstance(request)); } - public void testRetentionLeaseBackgroundSyncActionOnReplica() { + public void testRetentionLeaseBackgroundSyncActionOnReplica() throws WriteStateException { final IndicesService indicesService = mock(IndicesService.class); final Index index = new Index("index", "uuid"); @@ -156,8 +157,8 @@ public void testRetentionLeaseBackgroundSyncActionOnReplica() { final TransportReplicationAction.ReplicaResult result = action.shardOperationOnReplica(request, indexShard); // the retention leases on the shard should be updated verify(indexShard).updateRetentionLeasesOnReplica(retentionLeases); - // the retention leases on the shard should be periodically flushed - verify(indexShard).afterWriteOperation(); + // the retention leases on the shard should be persisted + verify(indexShard).persistRetentionLeases(); // the result should indicate success final AtomicBoolean success = new AtomicBoolean(); result.respond(ActionListener.wrap(r -> success.set(true), e -> fail(e.toString()))); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java index d92db46701df8..44a8cd70c42eb 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; @@ -103,10 +102,8 @@ public void testRetentionLeasesSyncedOnAdd() throws Exception { latch.await(); retentionLock.close(); - // check retention leases have been committed on the primary - final RetentionLeases primaryCommittedRetentionLeases = RetentionLeases.decodeRetentionLeases( - primary.commitStats().getUserData().get(Engine.RETENTION_LEASES)); - assertThat(currentRetentionLeases, equalTo(RetentionLeases.toMap(primaryCommittedRetentionLeases))); + // check retention leases have been written on the primary + assertThat(currentRetentionLeases, equalTo(RetentionLeases.toMap(primary.loadRetentionLeases()))); // check current retention leases have been synced to all replicas for (final ShardRouting replicaShard : clusterService().state().routingTable().index("index").shard(0).replicaShards()) { @@ -118,10 +115,8 @@ public void testRetentionLeasesSyncedOnAdd() throws Exception { final Map retentionLeasesOnReplica = RetentionLeases.toMap(replica.getRetentionLeases()); assertThat(retentionLeasesOnReplica, equalTo(currentRetentionLeases)); - // check retention leases have been committed on the replica - final RetentionLeases replicaCommittedRetentionLeases = RetentionLeases.decodeRetentionLeases( - replica.commitStats().getUserData().get(Engine.RETENTION_LEASES)); - assertThat(currentRetentionLeases, equalTo(RetentionLeases.toMap(replicaCommittedRetentionLeases))); + // check retention leases have been written on the replica + assertThat(currentRetentionLeases, equalTo(RetentionLeases.toMap(replica.loadRetentionLeases()))); } } } @@ -165,10 +160,8 @@ public void testRetentionLeaseSyncedOnRemove() throws Exception { latch.await(); retentionLock.close(); - // check retention leases have been committed on the primary - final RetentionLeases primaryCommittedRetentionLeases = RetentionLeases.decodeRetentionLeases( - primary.commitStats().getUserData().get(Engine.RETENTION_LEASES)); - assertThat(currentRetentionLeases, equalTo(RetentionLeases.toMap(primaryCommittedRetentionLeases))); + // check retention leases have been written on the primary + assertThat(currentRetentionLeases, equalTo(RetentionLeases.toMap(primary.loadRetentionLeases()))); // check current retention leases have been synced to all replicas for (final ShardRouting replicaShard : clusterService().state().routingTable().index("index").shard(0).replicaShards()) { @@ -180,10 +173,8 @@ public void testRetentionLeaseSyncedOnRemove() throws Exception { final Map retentionLeasesOnReplica = RetentionLeases.toMap(replica.getRetentionLeases()); assertThat(retentionLeasesOnReplica, equalTo(currentRetentionLeases)); - // check retention leases have been committed on the replica - final RetentionLeases replicaCommittedRetentionLeases = RetentionLeases.decodeRetentionLeases( - replica.commitStats().getUserData().get(Engine.RETENTION_LEASES)); - assertThat(currentRetentionLeases, equalTo(RetentionLeases.toMap(replicaCommittedRetentionLeases))); + // check retention leases have been written on the replica + assertThat(currentRetentionLeases, equalTo(RetentionLeases.toMap(replica.loadRetentionLeases()))); } } } @@ -322,7 +313,6 @@ public void testBackgroundRetentionLeaseSync() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/38588") public void testRetentionLeasesSyncOnRecovery() throws Exception { final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2); internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas); @@ -378,6 +368,9 @@ public void testRetentionLeasesSyncOnRecovery() throws Exception { .getShardOrNull(new ShardId(resolveIndex("index"), 0)); final Map retentionLeasesOnReplica = RetentionLeases.toMap(replica.getRetentionLeases()); assertThat(retentionLeasesOnReplica, equalTo(currentRetentionLeases)); + + // check retention leases have been written on the replica; see RecoveryTarget#finalizeRecovery + assertThat(currentRetentionLeases, equalTo(RetentionLeases.toMap(replica.loadRetentionLeases()))); } } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java index 18817d784b131..80baa23a4d7ac 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; @@ -29,6 +28,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; @@ -90,7 +90,7 @@ public void tearDown() throws Exception { super.tearDown(); } - public void testRetentionLeaseSyncActionOnPrimary() { + public void testRetentionLeaseSyncActionOnPrimary() throws WriteStateException { final IndicesService indicesService = mock(IndicesService.class); final Index index = new Index("index", "uuid"); @@ -118,18 +118,15 @@ public void testRetentionLeaseSyncActionOnPrimary() { final TransportWriteAction.WritePrimaryResult result = action.shardOperationOnPrimary(request, indexShard); - // the retention leases on the shard should be flushed - final ArgumentCaptor flushRequest = ArgumentCaptor.forClass(FlushRequest.class); - verify(indexShard).flush(flushRequest.capture()); - assertTrue(flushRequest.getValue().force()); - assertTrue(flushRequest.getValue().waitIfOngoing()); + // the retention leases on the shard should be persisted + verify(indexShard).persistRetentionLeases(); // we should forward the request containing the current retention leases to the replica assertThat(result.replicaRequest(), sameInstance(request)); // we should start with an empty replication response assertNull(result.finalResponseIfSuccessful.getShardInfo()); } - public void testRetentionLeaseSyncActionOnReplica() { + public void testRetentionLeaseSyncActionOnReplica() throws WriteStateException { final IndicesService indicesService = mock(IndicesService.class); final Index index = new Index("index", "uuid"); @@ -159,11 +156,8 @@ public void testRetentionLeaseSyncActionOnReplica() { action.shardOperationOnReplica(request, indexShard); // the retention leases on the shard should be updated verify(indexShard).updateRetentionLeasesOnReplica(retentionLeases); - // the retention leases on the shard should be flushed - final ArgumentCaptor flushRequest = ArgumentCaptor.forClass(FlushRequest.class); - verify(indexShard).flush(flushRequest.capture()); - assertTrue(flushRequest.getValue().force()); - assertTrue(flushRequest.getValue().waitIfOngoing()); + // the retention leases on the shard should be persisteed + verify(indexShard).persistRetentionLeases(); // the result should indicate success final AtomicBoolean success = new AtomicBoolean(); result.respond(ActionListener.wrap(r -> success.set(true), e -> fail(e.toString()))); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseTests.java index bd2dee78b05ed..f38a806bd7b95 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseTests.java @@ -31,14 +31,6 @@ public class RetentionLeaseTests extends ESTestCase { - public void testInvalidId() { - final String id = "id" + randomFrom(":", ";", ","); - final IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> new RetentionLease(id, randomNonNegativeLong(), randomNonNegativeLong(), "source")); - assertThat(e, hasToString(containsString("retention lease ID can not contain any of [:;,] but was [" + id + "]"))); - } - public void testEmptyId() { final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, @@ -64,14 +56,6 @@ public void testTimestampOutOfRange() { assertThat(e, hasToString(containsString("retention lease timestamp [" + timestamp + "] out of range"))); } - public void testInvalidSource() { - final String source = "source" + randomFrom(":", ";", ","); - final IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> new RetentionLease("id", randomNonNegativeLong(), randomNonNegativeLong(), source)); - assertThat(e, hasToString(containsString("retention lease source can not contain any of [:;,] but was [" + source + "]"))); - } - public void testEmptySource() { final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, @@ -93,13 +77,4 @@ public void testRetentionLeaseSerialization() throws IOException { } } - public void testRetentionLeaseEncoding() { - final String id = randomAlphaOfLength(8); - final long retainingSequenceNumber = randomNonNegativeLong(); - final long timestamp = randomNonNegativeLong(); - final String source = randomAlphaOfLength(8); - final RetentionLease retentionLease = new RetentionLease(id, retainingSequenceNumber, timestamp, source); - assertThat(RetentionLease.decodeRetentionLease(RetentionLease.encodeRetentionLease(retentionLease)), equalTo(retentionLease)); - } - } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseXContentTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseXContentTests.java new file mode 100644 index 0000000000000..159e85b572b98 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseXContentTests.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class RetentionLeaseXContentTests extends AbstractXContentTestCase { + + @Override + protected RetentionLease createTestInstance() { + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomNonNegativeLong(); + final long timestamp = randomNonNegativeLong(); + final String source = randomAlphaOfLength(8); + return new RetentionLease(id, retainingSequenceNumber, timestamp, source); + } + + @Override + protected RetentionLease doParseInstance(final XContentParser parser) throws IOException { + return RetentionLease.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java index 33cc83f602860..28444c7825e4d 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java @@ -19,13 +19,18 @@ package org.elasticsearch.index.seqno; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; -import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -49,30 +54,6 @@ public void testVersionOutOfRange() { assertThat(e, hasToString(containsString("version must be non-negative but was [" + version + "]"))); } - public void testRetentionLeasesEncoding() { - final long primaryTerm = randomNonNegativeLong(); - final long version = randomNonNegativeLong(); - final int length = randomIntBetween(0, 8); - final List retentionLeases = new ArrayList<>(length); - for (int i = 0; i < length; i++) { - final String id = randomAlphaOfLength(8); - final long retainingSequenceNumber = randomNonNegativeLong(); - final long timestamp = randomNonNegativeLong(); - final String source = randomAlphaOfLength(8); - final RetentionLease retentionLease = new RetentionLease(id, retainingSequenceNumber, timestamp, source); - retentionLeases.add(retentionLease); - } - final RetentionLeases decodedRetentionLeases = - RetentionLeases.decodeRetentionLeases( - RetentionLeases.encodeRetentionLeases(new RetentionLeases(primaryTerm, version, retentionLeases))); - assertThat(decodedRetentionLeases.version(), equalTo(version)); - if (length == 0) { - assertThat(decodedRetentionLeases.leases(), empty()); - } else { - assertThat(decodedRetentionLeases.leases(), containsInAnyOrder(retentionLeases.toArray(new RetentionLease[0]))); - } - } - public void testSupersedesByPrimaryTerm() { final long lowerPrimaryTerm = randomLongBetween(1, Long.MAX_VALUE); final RetentionLeases left = new RetentionLeases(lowerPrimaryTerm, randomLongBetween(1, Long.MAX_VALUE), Collections.emptyList()); @@ -92,4 +73,48 @@ public void testSupersedesByVersion() { assertFalse(left.supersedes(right)); } + public void testRetentionLeasesRejectsDuplicates() { + final RetentionLeases retentionLeases = randomRetentionLeases(false); + final RetentionLease retentionLease = randomFrom(retentionLeases.leases()); + final IllegalStateException e = expectThrows( + IllegalStateException.class, + () -> new RetentionLeases( + retentionLeases.primaryTerm(), + retentionLeases.version(), + Stream.concat(retentionLeases.leases().stream(), Stream.of(retentionLease)).collect(Collectors.toList()))); + assertThat(e, hasToString(containsString("duplicate retention lease ID [" + retentionLease.id() + "]"))); + } + + public void testLeasesPreservesIterationOrder() { + final RetentionLeases retentionLeases = randomRetentionLeases(true); + if (retentionLeases.leases().isEmpty()) { + assertThat(retentionLeases.leases(), empty()); + } else { + assertThat(retentionLeases.leases(), contains(retentionLeases.leases().toArray(new RetentionLease[0]))); + } + } + + public void testRetentionLeasesMetaDataStateFormat() throws IOException { + final Path path = createTempDir(); + final RetentionLeases retentionLeases = randomRetentionLeases(true); + RetentionLeases.FORMAT.writeAndCleanup(retentionLeases, path); + assertThat(RetentionLeases.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path), equalTo(retentionLeases)); + } + + private RetentionLeases randomRetentionLeases(boolean allowEmpty) { + final long primaryTerm = randomNonNegativeLong(); + final long version = randomNonNegativeLong(); + final int length = randomIntBetween(allowEmpty ? 0 : 1, 8); + final List leases = new ArrayList<>(length); + for (int i = 0; i < length; i++) { + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomNonNegativeLong(); + final long timestamp = randomNonNegativeLong(); + final String source = randomAlphaOfLength(8); + final RetentionLease retentionLease = new RetentionLease(id, retainingSequenceNumber, timestamp, source); + leases.add(retentionLease); + } + return new RetentionLeases(primaryTerm, version, leases); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesXContentTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesXContentTests.java new file mode 100644 index 0000000000000..5fc2ace16ee94 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesXContentTests.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class RetentionLeasesXContentTests extends AbstractXContentTestCase { + + @Override + protected RetentionLeases createTestInstance() { + final long primaryTerm = randomNonNegativeLong(); + final long version = randomNonNegativeLong(); + final int length = randomIntBetween(0, 8); + final List leases = new ArrayList<>(length); + for (int i = 0; i < length; i++) { + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomNonNegativeLong(); + final long timestamp = randomNonNegativeLong(); + final String source = randomAlphaOfLength(8); + final RetentionLease retentionLease = new RetentionLease(id, retainingSequenceNumber, timestamp, source); + leases.add(retentionLease); + } + return new RetentionLeases(primaryTerm, version, leases); + } + + @Override + protected RetentionLeases doParseInstance(final XContentParser parser) throws IOException { + return RetentionLeases.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java index 5f103d484f8c1..566d1feaf007d 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java @@ -19,43 +19,30 @@ package org.elasticsearch.index.shard; -import org.apache.lucene.index.SegmentInfos; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRoutingHelper; -import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.seqno.RetentionLease; import org.elasticsearch.index.seqno.RetentionLeaseStats; import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; -import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting; import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; @@ -221,7 +208,7 @@ private void runExpirationTest(final boolean primary) throws IOException { } } - public void testCommit() throws IOException { + public void testPersistence() throws IOException { final Settings settings = Settings.builder() .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING.getKey(), Long.MAX_VALUE, TimeUnit.NANOSECONDS) @@ -242,19 +229,17 @@ public void testCommit() throws IOException { currentTimeMillis.set(TimeUnit.NANOSECONDS.toMillis(Long.MAX_VALUE)); - // force a commit - indexShard.flush(new FlushRequest().force(true)); + // force the retention leases to persist + indexShard.persistRetentionLeases(); - // the committed retention leases should equal our current retention leases - final SegmentInfos segmentCommitInfos = indexShard.store().readLastCommittedSegmentsInfo(); - assertTrue(segmentCommitInfos.getUserData().containsKey(Engine.RETENTION_LEASES)); + // the written retention leases should equal our current retention leases final RetentionLeases retentionLeases = indexShard.getEngine().config().retentionLeasesSupplier().get(); - final RetentionLeases committedRetentionLeases = IndexShard.getRetentionLeases(segmentCommitInfos); + final RetentionLeases writtenRetentionLeases = indexShard.loadRetentionLeases(); if (retentionLeases.leases().isEmpty()) { - assertThat(committedRetentionLeases.version(), equalTo(0L)); - assertThat(committedRetentionLeases.leases(), empty()); + assertThat(writtenRetentionLeases.version(), equalTo(0L)); + assertThat(writtenRetentionLeases.leases(), empty()); } else { - assertThat(committedRetentionLeases.version(), equalTo((long) length)); + assertThat(writtenRetentionLeases.version(), equalTo((long) length)); assertThat(retentionLeases.leases(), contains(retentionLeases.leases().toArray(new RetentionLease[0]))); } @@ -304,76 +289,6 @@ public void testRetentionLeaseStats() throws IOException { } } - public void testRecoverFromStoreReserveRetentionLeases() throws Exception { - final AtomicBoolean throwDuringRecoverFromTranslog = new AtomicBoolean(); - final IndexShard shard = newStartedShard(false, Settings.builder().put("index.soft_deletes.enabled", true).build(), - config -> new InternalEngine(config) { - @Override - public InternalEngine recoverFromTranslog(TranslogRecoveryRunner translogRecoveryRunner, - long recoverUpToSeqNo) throws IOException { - if (throwDuringRecoverFromTranslog.get()) { - throw new RuntimeException("crashed before recover from translog is completed"); - } - return super.recoverFromTranslog(translogRecoveryRunner, recoverUpToSeqNo); - } - }); - final List leases = new ArrayList<>(); - long version = randomLongBetween(0, 100); - long primaryTerm = randomLongBetween(1, 100); - final int iterations = randomIntBetween(1, 10); - for (int i = 0; i < iterations; i++) { - if (randomBoolean()) { - indexDoc(shard, "_doc", Integer.toString(i)); - } else { - leases.add(new RetentionLease(Integer.toString(i), randomNonNegativeLong(), - randomLongBetween(Integer.MAX_VALUE, Long.MAX_VALUE), "test")); - } - if (randomBoolean()) { - if (randomBoolean()) { - version += randomLongBetween(1, 100); - primaryTerm += randomLongBetween(0, 100); - shard.updateRetentionLeasesOnReplica(new RetentionLeases(primaryTerm, version, leases)); - shard.flush(new FlushRequest().force(true).waitIfOngoing(true)); - } - } - if (randomBoolean()) { - shard.updateGlobalCheckpointOnReplica(randomLongBetween(shard.getGlobalCheckpoint(), shard.getLocalCheckpoint()), "test"); - flushShard(shard); - } - } - version += randomLongBetween(1, 100); - primaryTerm += randomLongBetween(0, 100); - shard.updateRetentionLeasesOnReplica(new RetentionLeases(primaryTerm, version, leases)); - shard.flush(new FlushRequest().force(true).waitIfOngoing(true)); - closeShard(shard, false); - - final IndexShard failedShard = reinitShard(shard, newShardRouting(shard.routingEntry().shardId(), - shard.routingEntry().currentNodeId(), true, ShardRoutingState.INITIALIZING, - RecoverySource.ExistingStoreRecoverySource.INSTANCE)); - final DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), - Collections.emptyMap(), Collections.emptySet(), Version.CURRENT); - failedShard.markAsRecovering("store", new RecoveryState(failedShard.routingEntry(), localNode, null)); - throwDuringRecoverFromTranslog.set(true); - expectThrows(IndexShardRecoveryException.class, failedShard::recoverFromStore); - closeShards(failedShard); - - final IndexShard newShard = reinitShard(shard, newShardRouting(shard.routingEntry().shardId(), - shard.routingEntry().currentNodeId(), true, ShardRoutingState.INITIALIZING, - RecoverySource.ExistingStoreRecoverySource.INSTANCE)); - newShard.markAsRecovering("store", new RecoveryState(failedShard.routingEntry(), localNode, null)); - throwDuringRecoverFromTranslog.set(false); - assertTrue(newShard.recoverFromStore()); - final RetentionLeases retentionLeases = newShard.getRetentionLeases(); - assertThat(retentionLeases.version(), equalTo(version)); - assertThat(retentionLeases.primaryTerm(), equalTo(primaryTerm)); - if (leases.isEmpty()) { - assertThat(retentionLeases.leases(), empty()); - } else { - assertThat(retentionLeases.leases(), containsInAnyOrder(leases.toArray(new RetentionLease[0]))); - } - closeShards(newShard); - } - private void assertRetentionLeases( final IndexShard indexShard, final int size, diff --git a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java index 8b4c21ee086aa..b3c93acb97b99 100644 --- a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java +++ b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java @@ -278,7 +278,6 @@ public void testCcrAndIlmWithRollover() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37165") public void testUnfollowInjectedBeforeShrink() throws Exception { final String indexName = "shrink-test"; final String shrunkenIndexName = "shrink-" + indexName; From 2eb7c1d3f0ee4a623f635295c7e8e98de539788b Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Tue, 19 Feb 2019 00:24:35 +0200 Subject: [PATCH 04/54] Mute GatewayMetaStateTests.testAtomicityWithFailures (#39079) Mute test GatewayMetaStateTests.testAtomicityWithFailures --- .../java/org/elasticsearch/gateway/GatewayMetaStateTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java index 22259b919ec6f..1f4e0bafe4a3b 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java @@ -374,6 +374,7 @@ private static MetaData randomMetaDataForTx() { return builder.build(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39077") public void testAtomicityWithFailures() throws IOException { try (NodeEnvironment env = newNodeEnvironment()) { MetaStateServiceWithFailures metaStateService = From e0691bbc29e5b045209e510f13fe5ebbae0ceb92 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Tue, 19 Feb 2019 02:17:05 +0200 Subject: [PATCH 05/54] Fix libs:ssl-config project setup (#39074) The build script file for the `:libs:elasticsearch-ssl-config` and `:libs:ssl-config-tests` projects was incorrectly named `eclipse.build.gradle` while the expected name was `eclipse-build.gradle`. In addition, this also adds a missing snippet in the `build.gradle` conf file, that fixes the project setup for Eclipse users. --- libs/ssl-config/build.gradle | 14 ++++++++++++++ .../{eclipse.build.gradle => eclipse-build.gradle} | 2 +- .../{eclipse.build.gradle => eclipse-build.gradle} | 4 ++-- 3 files changed, 17 insertions(+), 3 deletions(-) rename libs/ssl-config/src/main/{eclipse.build.gradle => eclipse-build.gradle} (72%) rename libs/ssl-config/src/test/{eclipse.build.gradle => eclipse-build.gradle} (81%) diff --git a/libs/ssl-config/build.gradle b/libs/ssl-config/build.gradle index 0b8eac5486ccb..85693780a8b71 100644 --- a/libs/ssl-config/build.gradle +++ b/libs/ssl-config/build.gradle @@ -34,9 +34,23 @@ dependencies { testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" } +if (isEclipse) { + // in eclipse the project is under a fake root, we need to change around the source sets + sourceSets { + if (project.path == ":libs:ssl-config") { + main.java.srcDirs = ['java'] + main.resources.srcDirs = ['resources'] + } else { + test.java.srcDirs = ['java'] + test.resources.srcDirs = ['resources'] + } + } +} + forbiddenApisMain { replaceSignatureFiles 'jdk-signatures' } + forbiddenPatterns { exclude '**/*.key' exclude '**/*.pem' diff --git a/libs/ssl-config/src/main/eclipse.build.gradle b/libs/ssl-config/src/main/eclipse-build.gradle similarity index 72% rename from libs/ssl-config/src/main/eclipse.build.gradle rename to libs/ssl-config/src/main/eclipse-build.gradle index 58b2d7077120a..be8b9d5b3b0bd 100644 --- a/libs/ssl-config/src/main/eclipse.build.gradle +++ b/libs/ssl-config/src/main/eclipse-build.gradle @@ -1,2 +1,2 @@ -// this is just shell gradle file for eclipse to have separate projects for geo src and tests +// this is just shell gradle file for eclipse to have separate projects for ssl-config src and tests apply from: '../../build.gradle' diff --git a/libs/ssl-config/src/test/eclipse.build.gradle b/libs/ssl-config/src/test/eclipse-build.gradle similarity index 81% rename from libs/ssl-config/src/test/eclipse.build.gradle rename to libs/ssl-config/src/test/eclipse-build.gradle index f8265e3dfed08..aca207a09b7c8 100644 --- a/libs/ssl-config/src/test/eclipse.build.gradle +++ b/libs/ssl-config/src/test/eclipse-build.gradle @@ -1,5 +1,5 @@ -// this is just shell gradle file for eclipse to have separate projects for geo src and tests +// this is just shell gradle file for eclipse to have separate projects for ssl-config src and tests apply from: '../../build.gradle' dependencies { testCompile project(':libs:elasticsearch-ssl-config') -} +} From 04e18ad8a7aeb2e3640159d12a4c2c5de3b94ce5 Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Tue, 19 Feb 2019 13:45:20 +1100 Subject: [PATCH 06/54] Remove OpenLdapTests.testTcpTimeout (#39081) This test was flaky in CI, and has been disabled for 2.5 years. The functionality that it attempted to verify is sufficiently tested in LdapSessionFactoryTests.testBindWithReadTimeout Resolves: #29758 --- .../org/elasticsearch/test/OpenLdapTests.java | 21 ------------------- 1 file changed, 21 deletions(-) diff --git a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java index bb88103048a40..0cf930d2efd1b 100644 --- a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java +++ b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.xpack.core.security.authc.ldap.SearchGroupsResolverSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapMetaDataResolverSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; -import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.VerificationMode; @@ -162,26 +161,6 @@ public void testCustomFilter() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29758") - public void testTcpTimeout() throws Exception { - final RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier("ldap", "oldap-test"); - String groupSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; - String userTemplate = "uid={0},ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; - Settings settings = Settings.builder() - .put(buildLdapSettings(realmId, OPEN_LDAP_DNS_URL, userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put(getFullSettingKey(realmId.getName(), SearchGroupsResolverSettings.FILTER), "(objectClass=*)") - .put(getFullSettingKey(realmId, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM), VerificationMode.CERTIFICATE) - .put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_TCP_READ_SETTING), "1ms") - .build(); - RealmConfig config = new RealmConfig(realmId, settings, - TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); - LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); - - LDAPException expected = expectThrows(LDAPException.class, - () -> session(sessionFactory, "thor", PASSWORD_SECURE_STRING).groups(new PlainActionFuture<>())); - assertThat(expected.getMessage(), containsString("A client-side timeout was encountered while waiting")); - } - public void testStandardLdapConnectionHostnameVerificationFailure() throws Exception { //openldap does not use cn as naming attributes by default String groupSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; From dc02278aa4f3a8146fb9b572d1f6b25345256f16 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Tue, 19 Feb 2019 08:45:01 +0200 Subject: [PATCH 07/54] Disable specific locales for tests in fips mode (#38938) * Disable specific locales for tests in fips mode The Bouncy Castle FIPS provider that we use for running our tests in fips mode has an issue with locale sensitive handling of Dates as described in https://github.com/bcgit/bc-java/issues/405 This causes certificate validation to fail if any given test that includes some form of certificate validation happens to run in one of the locales. This manifested earlier in #33081 which was handled insufficiently in #33299 This change ensures that the problematic 3 locales * th-TH * ja-JP-u-ca-japanese-x-lvariant-JP * th-TH-u-nu-thai-x-lvariant-TH will not be used when running our tests in a FIPS 140 JVM. It also reverts #33299 --- .../org/elasticsearch/test/ESTestCase.java | 16 +++++++++ .../core/ssl/RestrictedTrustManagerTests.java | 33 ------------------- .../security/authc/saml/SamlTestCase.java | 2 +- .../authc/kerberos/KerberosTestCase.java | 2 +- 4 files changed, 18 insertions(+), 35 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index a36018921e9f4..7ce82163d7224 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -327,6 +327,16 @@ public static void restoreContentType() { Requests.INDEX_CONTENT_TYPE = XContentType.JSON; } + @BeforeClass + public static void ensureSupportedLocale() { + if (isUnusableLocale()) { + Logger logger = LogManager.getLogger(ESTestCase.class); + logger.warn("Attempting to run tests in an unusable locale in a FIPS JVM. Certificate expiration validation will fail, " + + "switching to English. See: https://github.com/bcgit/bc-java/issues/405"); + Locale.setDefault(Locale.ENGLISH); + } + } + @Before public final void before() { logger.info("{}before test", getTestParamsForLogging()); @@ -1419,6 +1429,12 @@ public TestAnalysis(IndexAnalyzers indexAnalyzers, } } + private static boolean isUnusableLocale() { + return inFipsJvm() && (Locale.getDefault().toLanguageTag().equals("th-TH") + || Locale.getDefault().toLanguageTag().equals("ja-JP-u-ca-japanese-x-lvariant-JP") + || Locale.getDefault().toLanguageTag().equals("th-TH-u-nu-thai-x-lvariant-TH")); + } + public static boolean inFipsJvm() { return Security.getProviders()[0].getName().toLowerCase(Locale.ROOT).contains("fips"); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustManagerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustManagerTests.java index 32f75f56da2a9..109722c37c086 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustManagerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustManagerTests.java @@ -5,15 +5,11 @@ */ package org.elasticsearch.xpack.core.ssl; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Description; import org.hamcrest.TypeSafeMatcher; -import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; -import org.junit.BeforeClass; import javax.net.ssl.X509ExtendedTrustManager; @@ -32,7 +28,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.regex.Pattern; @@ -45,34 +40,6 @@ public class RestrictedTrustManagerTests extends ESTestCase { private int numberOfClusters; private int numberOfNodes; - private static Locale restoreLocale; - - @BeforeClass - public static void ensureSupportedLocale() throws Exception { - Logger logger = LogManager.getLogger(RestrictedTrustManagerTests.class); - if (isUnusableLocale()) { - // See: https://github.com/elastic/elasticsearch/issues/33081 - logger.warn("Attempting to run RestrictedTrustManagerTests tests in an unusable locale in a FIPS JVM. Certificate expiration " + - "validation will fail, switching to English"); - restoreLocale = Locale.getDefault(); - Locale.setDefault(Locale.ENGLISH); - } - } - - private static boolean isUnusableLocale() { - return inFipsJvm() && (Locale.getDefault().toLanguageTag().equals("th-TH") - || Locale.getDefault().toLanguageTag().equals("ja-JP-u-ca-japanese-x-lvariant-JP") - || Locale.getDefault().toLanguageTag().equals("th-TH-u-nu-thai-x-lvariant-TH")); - } - - @AfterClass - public static void restoreLocale() throws Exception { - if (restoreLocale != null) { - Locale.setDefault(restoreLocale); - restoreLocale = null; - } - } - @Before public void readCertificates() throws GeneralSecurityException, IOException { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlTestCase.java index 7bf13e8be265c..c35561102020b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlTestCase.java @@ -53,7 +53,7 @@ private static boolean isTurkishLocale() { } @AfterClass - public static void restoreLocale() throws Exception { + public static void restoreLocale() { if (restoreLocale != null) { Locale.setDefault(restoreLocale); restoreLocale = null; diff --git a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java index ecaf67205ac80..6754b1acb9347 100644 --- a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java +++ b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java @@ -98,7 +98,7 @@ public static void setupKerberos() throws Exception { } @AfterClass - public static void restoreLocale() throws Exception { + public static void restoreLocale() { if (restoreLocale != null) { Locale.setDefault(restoreLocale); restoreLocale = null; From cddd1e35f963eb58e862bb37c274b974cbe37c99 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 19 Feb 2019 08:53:20 +0100 Subject: [PATCH 08/54] Fix shard follow task startup error handling (#39053) Prior to this commit, if during fetch leader / follower GCP a fatal error occurred, then the shard follow task was removed. This is unexpected, because if such an error occurs during the lifetime of shard follow task then replication is stopped and the fatal error flag is set. This allows the ccr stats api to report the fatal exception that has occurred (instead of the user grepping through the elasticsearch logs). This issue was found by a rare failure of the `FollowStatsIT#testFollowStatsApiIncludeShardFollowStatsWithRemovedFollowerIndex` test. Closes #38779 --- .../xpack/ccr/action/ShardFollowNodeTask.java | 8 ++++++-- .../xpack/ccr/action/ShardFollowTasksExecutor.java | 2 +- .../java/org/elasticsearch/xpack/ccr/FollowStatsIT.java | 1 - 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java index a4f02707bc40f..3918b815e9150 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java @@ -452,11 +452,15 @@ private void handleFailure(Exception e, AtomicInteger retryCounter, Runnable tas scheduler.accept(TimeValue.timeValueMillis(delay), task); } } else { - fatalException = ExceptionsHelper.convertToElastic(e); - LOGGER.warn("shard follow task encounter non-retryable error", e); + setFatalException(e); } } + void setFatalException(Exception e) { + fatalException = ExceptionsHelper.convertToElastic(e); + LOGGER.warn("shard follow task encounter non-retryable error", e); + } + static long computeDelay(int currentRetry, long maxRetryDelayInMillis) { // Cap currentRetry to avoid overflow when computing n variable int maxCurrentRetry = Math.min(currentRetry, 24); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index 1a1a60678de42..46b3c6e54f576 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -282,7 +282,7 @@ protected void nodeOperation(final AllocatedPersistentTask task, final ShardFoll shardFollowNodeTask), e); threadPool.schedule(() -> nodeOperation(task, params, state), params.getMaxRetryDelay(), Ccr.CCR_THREAD_POOL_NAME); } else { - shardFollowNodeTask.markAsFailed(e); + shardFollowNodeTask.setFatalException(e); } }; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java index e6662f3770d24..1f1c6cd5c64e3 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java @@ -149,7 +149,6 @@ public void testFollowStatsApiResourceNotFound() throws Exception { assertAcked(client().execute(PauseFollowAction.INSTANCE, new PauseFollowAction.Request("follower1")).actionGet()); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/38779") public void testFollowStatsApiIncludeShardFollowStatsWithRemovedFollowerIndex() throws Exception { final String leaderIndexSettings = getIndexSettings(1, 0, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); From 3725823f45ff7071330da8da8a0095160c3ed895 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Tue, 19 Feb 2019 09:15:23 +0100 Subject: [PATCH 09/54] mute tests, see 37117 --- .../xpack/ml/integration/MlDistributedFailureIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java index d68fe5225fb16..bfe90f1a03166 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java @@ -62,6 +62,7 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37117") public void testFailOver() throws Exception { internalCluster().ensureAtLeastNumDataNodes(3); ensureStableClusterOnAllNodes(3); @@ -105,6 +106,7 @@ public void testLoseDedicatedMasterNode() throws Exception { }); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37117") public void testFullClusterRestart() throws Exception { internalCluster().ensureAtLeastNumDataNodes(3); ensureStableClusterOnAllNodes(3); From c9f08d85ed0c4ce942a4f20f62c3e476a276c153 Mon Sep 17 00:00:00 2001 From: Dimitrios Liappis Date: Tue, 19 Feb 2019 10:54:11 +0200 Subject: [PATCH 10/54] Remove unnecessary Dockerfile commands (#39040) As the Dockerfile evolved we don't need anymore certain commands like `unzip`, `which` and `wget` allowing us to slightly shrink the images. --- distribution/docker/src/docker/Dockerfile | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/distribution/docker/src/docker/Dockerfile b/distribution/docker/src/docker/Dockerfile index c202fa78668f5..aad9c05b11d55 100644 --- a/distribution/docker/src/docker/Dockerfile +++ b/distribution/docker/src/docker/Dockerfile @@ -23,8 +23,6 @@ RUN curl --retry 8 -s ${jdkUrl} | tar -C /opt -zxf - # REF: https://github.com/elastic/elasticsearch-docker/issues/171 RUN ln -sf /etc/pki/ca-trust/extracted/java/cacerts /opt/jdk-${jdkVersion}/lib/security/cacerts -RUN yum install -y unzip which - RUN groupadd -g 1000 elasticsearch && \ adduser -u 1000 -g 1000 -d /usr/share/elasticsearch elasticsearch @@ -51,7 +49,7 @@ ENV JAVA_HOME /opt/jdk-${jdkVersion} COPY --from=builder /opt/jdk-${jdkVersion} /opt/jdk-${jdkVersion} RUN yum update -y && \ - yum install -y nc unzip wget which && \ + yum install -y nc && \ yum clean all RUN groupadd -g 1000 elasticsearch && \ From 90d46a804fba8e12c5b7a113c1583fa6fd7406cd Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Tue, 19 Feb 2019 11:15:35 +0200 Subject: [PATCH 11/54] Make specifying java home mandatory for testclusters (#37864) Since we build and test with multiple versions it's better to be explicit about it. --- .../gradle/testclusters/ElasticsearchNode.java | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index 4deb47f15ae4e..b494c3be36418 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -129,6 +129,7 @@ public void setDistribution(Distribution distribution) { public void freeze() { requireNonNull(distribution, "null distribution passed when configuring test cluster `" + this + "`"); requireNonNull(version, "null version passed when configuring test cluster `" + this + "`"); + requireNonNull(javaHome, "null javaHome passed when configuring test cluster `" + this + "`"); logger.info("Locking configuration of `{}`", this); configurationFrozen.set(true); } @@ -204,16 +205,7 @@ private void startElasticsearchProcess(Path distroArtifact) { Map environment = processBuilder.environment(); // Don't inherit anything from the environment for as that would lack reproductability environment.clear(); - if (javaHome != null) { - environment.put("JAVA_HOME", getJavaHome().getAbsolutePath()); - } else if (System.getenv().get("JAVA_HOME") != null) { - logger.warn("{}: No java home configured will use it from environment: {}", - this, System.getenv().get("JAVA_HOME") - ); - environment.put("JAVA_HOME", System.getenv().get("JAVA_HOME")); - } else { - logger.warn("{}: No javaHome configured, will rely on default java detection", this); - } + environment.put("JAVA_HOME", getJavaHome().getAbsolutePath()); environment.put("ES_PATH_CONF", configFile.getParent().toAbsolutePath().toString()); environment.put("ES_JAVA_OPTIONS", "-Xms512m -Xmx512m"); // don't buffer all in memory, make sure we don't block on the default pipes From f3e8d66ffb7a145abf2cd74776deba2c0f0e311c Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Tue, 19 Feb 2019 10:47:59 +0100 Subject: [PATCH 12/54] Remove beta marker from the synonym_graph docs (#38185) --- .../tokenfilters/synonym-graph-tokenfilter.asciidoc | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc index 2a555d7d044da..2cdf51e51f230 100644 --- a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc @@ -1,8 +1,6 @@ [[analysis-synonym-graph-tokenfilter]] === Synonym Graph Token Filter -beta[] - The `synonym_graph` token filter allows to easily handle synonyms, including multi-word synonyms correctly during the analysis process. @@ -187,3 +185,8 @@ multiple versions of a token may choose which version of the token to emit when parsing synonyms, e.g. `asciifolding` will only produce the folded version of the token. Others, e.g. `multiplexer`, `word_delimiter_graph` or `ngram` will throw an error. + +WARNING:The synonym rules should not contain words that are removed by +a filter that appears after in the chain (a `stop` filter for instance). +Removing a term from a synonym rule breaks the matching at query time. + From df60f6866affac359cf1e6db8ab4ee1d14621949 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Tue, 19 Feb 2019 10:53:50 +0100 Subject: [PATCH 13/54] add debug info for intermittent test failure --- .../test/java/org/elasticsearch/client/IndicesClientIT.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 0f152551ddc3e..d9adf61782b3d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -1261,7 +1261,8 @@ public void testGetAlias() throws IOException { GetAliasesResponse getAliasesResponse = execute(getAliasesRequest, highLevelClient().indices()::getAlias, highLevelClient().indices()::getAliasAsync); - assertThat(getAliasesResponse.getAliases().size(), equalTo(3)); + assertThat("Unexpected number of aliases, got: " + getAliasesResponse.getAliases().toString(), + getAliasesResponse.getAliases().size(), equalTo(3)); assertThat(getAliasesResponse.getAliases().get("index1").size(), equalTo(1)); AliasMetaData aliasMetaData1 = getAliasesResponse.getAliases().get("index1").iterator().next(); assertThat(aliasMetaData1, notNullValue()); From 87bb5d08726b70c45ae41b0927bfc420b103b634 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 19 Feb 2019 10:00:46 +0000 Subject: [PATCH 14/54] repository-s3 also works with S3-compatibles (#38524) - Notes that you can adjust the `s3.client.*.endpoint` setting to point to a repository held on an S3-compatible service. - Notes that the default is `s3.amazonaws.com` and not to auto-detect the endpoint. - Reformats docs to width. Closes #35925 --- docs/plugins/repository-s3.asciidoc | 218 +++++++++++++++++----------- 1 file changed, 133 insertions(+), 85 deletions(-) diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index e7ab83ca6e69b..43077d27b54b6 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -1,21 +1,25 @@ [[repository-s3]] === S3 Repository Plugin -The S3 repository plugin adds support for using S3 as a repository for +The S3 repository plugin adds support for using AWS S3 as a repository for {ref}/modules-snapshots.html[Snapshot/Restore]. -*If you are looking for a hosted solution of Elasticsearch on AWS, please visit http://www.elastic.co/cloud.* +*If you are looking for a hosted solution of Elasticsearch on AWS, please visit +http://www.elastic.co/cloud.* :plugin_name: repository-s3 include::install_remove.asciidoc[] [[repository-s3-usage]] -==== Getting started with AWS +==== Getting Started -The plugin provides a repository type named `s3` which may be used when creating a repository. -The repository defaults to using https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html[ECS IAM Role] or -http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html[EC2 IAM Role] -credentials for authentication. The only mandatory setting is the bucket name: +The plugin provides a repository type named `s3` which may be used when creating +a repository. The repository defaults to using +https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html[ECS +IAM Role] or +http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html[EC2 +IAM Role] credentials for authentication. The only mandatory setting is the +bucket name: [source,js] ---- @@ -34,10 +38,10 @@ PUT _snapshot/my_s3_repository [[repository-s3-client]] ==== Client Settings -The client that you use to connect to S3 has a number of settings available. The -settings have the form `s3.client.CLIENT_NAME.SETTING_NAME`. The default client -name that is looked up by an `s3` repository is `default`. It can be modified -using the <> `client`. For example: +The client that you use to connect to S3 has a number of settings available. +The settings have the form `s3.client.CLIENT_NAME.SETTING_NAME`. By default, +`s3` repositories use a client named `default`, but this can be modified using +the <> `client`. For example: [source,js] ---- @@ -51,7 +55,7 @@ PUT _snapshot/my_s3_repository } ---- // CONSOLE -// TEST[skip:we don't have s3 setup while testing this] +// TEST[skip:we don't have S3 setup while testing this] Most client settings can be added to the `elasticsearch.yml` configuration file with the exception of the secure settings, which you add to the {es} keystore. @@ -74,9 +78,9 @@ contents, will utilize the latest settings from the keystore. Any existing `s3` repositories, as well as any newly created ones, will pick up the new values stored in the keystore. -NOTE: In progress snapshot/restore tasks will not be preempted by a *reload* -of the client's secure settings. The task will complete using the client as it -was built when the operation started. +NOTE: In-progress snapshot/restore tasks will not be preempted by a *reload* of +the client's secure settings. The task will complete using the client as it was +built when the operation started. The following list contains the available client settings. Those that must be stored in the keystore are marked as "secure" and are *reloadable*; the other @@ -84,61 +88,86 @@ settings belong in the `elasticsearch.yml` file. `access_key` ({ref}/secure-settings.html[Secure]):: - An s3 access key. The `secret_key` setting must also be specified. + An S3 access key. The `secret_key` setting must also be specified. `secret_key` ({ref}/secure-settings.html[Secure]):: - An s3 secret key. The `access_key` setting must also be specified. + An S3 secret key. The `access_key` setting must also be specified. `session_token`:: - An s3 session token. The `access_key` and `secret_key` settings must also - be specified. (Secure) + + An S3 session token. The `access_key` and `secret_key` settings must also be + specified. (Secure) `endpoint`:: - The s3 service endpoint to connect to. This will be automatically - figured out by the s3 client based on the bucket location, but - can be specified explicitly. See http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region. + The S3 service endpoint to connect to. This defaults to `s3.amazonaws.com` + but the + http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region[AWS + documentation] lists alternative S3 endpoints. If you are using an + <> then you should + set this to the service's endpoint. `protocol`:: - The protocol to use to connect to s3. Valid values are either `http` - or `https`. Defaults to `https`. + The protocol to use to connect to S3. Valid values are either `http` or + `https`. Defaults to `https`. `proxy.host`:: - The host name of a proxy to connect to s3 through. + The host name of a proxy to connect to S3 through. `proxy.port`:: - The port of a proxy to connect to s3 through. + The port of a proxy to connect to S3 through. `proxy.username` ({ref}/secure-settings.html[Secure]):: - The username to connect to the `proxy.host` with. + The username to connect to the `proxy.host` with. `proxy.password` ({ref}/secure-settings.html[Secure]):: - The password to connect to the `proxy.host` with. + The password to connect to the `proxy.host` with. `read_timeout`:: - The socket timeout for connecting to s3. The value should specify the unit. For example, - a value of `5s` specifies a 5 second timeout. The default value is 50 seconds. + The socket timeout for connecting to S3. The value should specify the unit. + For example, a value of `5s` specifies a 5 second timeout. The default value + is 50 seconds. `max_retries`:: - The number of retries to use when an s3 request fails. The default value is 3. + The number of retries to use when an S3 request fails. The default value is + `3`. `use_throttle_retries`:: - Whether retries should be throttled (ie use backoff). Must be `true` or `false`. Defaults to `true`. + Whether retries should be throttled (i.e. should back off). Must be `true` + or `false`. Defaults to `true`. + +[float] +[[repository-s3-compatible-services]] +===== S3-compatible services + +There are a number of storage systems that provide an S3-compatible API, and +the `repository-s3` plugin allows you to use these systems in place of AWS S3. +To do so, you should set the `s3.client.CLIENT_NAME.endpoint` setting to the +system's endpoint. This setting accepts IP addresses and hostnames and may +include a port. For example, the endpoint may be `172.17.0.2` or +`172.17.0.2:9000`. You may also need to set `s3.client.CLIENT_NAME.protocol` to +`http` if the endpoint does not support HTTPS. + +https://minio.io[Minio] is an example of a storage system that provides an +S3-compatible API. The `repository-s3` plugin allows {es} to work with +Minio-backed repositories as well as repositories stored on AWS S3. Other +S3-compatible storage systems may also work with {es}, but these are not tested +or supported. [[repository-s3-repository]] ==== Repository Settings -The `s3` repository type supports a number of settings to customize how data is stored in S3. -These can be specified when creating the repository. For example: +The `s3` repository type supports a number of settings to customize how data is +stored in S3. These can be specified when creating the repository. For example: [source,js] ---- @@ -152,7 +181,7 @@ PUT _snapshot/my_s3_repository } ---- // CONSOLE -// TEST[skip:we don't have s3 set up while testing this] +// TEST[skip:we don't have S3 set up while testing this] The following settings are supported: @@ -162,21 +191,21 @@ The following settings are supported: `client`:: - The name of the s3 client to use to connect to S3. Defaults to `default`. + The name of the <> to use to connect to S3. + Defaults to `default`. `base_path`:: - Specifies the path within bucket to repository data. Defaults to - value of `repositories.s3.base_path` or to root directory if not set. - Previously, the base_path could take a leading `/` (forward slash). - However, this has been deprecated and setting the base_path now should - omit the leading `/`. + Specifies the path within bucket to repository data. Defaults to value of + `repositories.s3.base_path` or to root directory if not set. Previously, + the base_path could take a leading `/` (forward slash). However, this has + been deprecated and setting the base_path now should omit the leading `/`. `chunk_size`:: - Big files can be broken down into chunks during snapshotting if needed. - The chunk size can be specified in bytes or by using size value notation, - i.e. `1gb`, `10mb`, `5kb`. Defaults to `1gb`. + Big files can be broken down into chunks during snapshotting if needed. The + chunk size can be specified in bytes or by using size value notation, i.e. + `1gb`, `10mb`, `5kb`. Defaults to `1gb`. `compress`:: @@ -191,41 +220,49 @@ The following settings are supported: `buffer_size`:: - Minimum threshold below which the chunk is uploaded using a single - request. Beyond this threshold, the S3 repository will use the - http://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html[AWS Multipart Upload API] - to split the chunk into several parts, each of `buffer_size` length, and - to upload each part in its own request. Note that setting a buffer - size lower than `5mb` is not allowed since it will prevent the use of the - Multipart API and may result in upload errors. It is also not possible to - set a buffer size greater than `5gb` as it is the maximum upload size - allowed by S3. Defaults to the minimum between `100mb` and `5%` of the heap size. + Minimum threshold below which the chunk is uploaded using a single request. + Beyond this threshold, the S3 repository will use the + http://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html[AWS + Multipart Upload API] to split the chunk into several parts, each of + `buffer_size` length, and to upload each part in its own request. Note that + setting a buffer size lower than `5mb` is not allowed since it will prevent + the use of the Multipart API and may result in upload errors. It is also not + possible to set a buffer size greater than `5gb` as it is the maximum upload + size allowed by S3. Defaults to the minimum between `100mb` and `5%` of the + heap size. `canned_acl`:: - The S3 repository supports all http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl[S3 canned ACLs] - : `private`, `public-read`, `public-read-write`, `authenticated-read`, `log-delivery-write`, - `bucket-owner-read`, `bucket-owner-full-control`. Defaults to `private`. - You could specify a canned ACL using the `canned_acl` setting. When the S3 repository - creates buckets and objects, it adds the canned ACL into the buckets and objects. + The S3 repository supports all + http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl[S3 + canned ACLs] : `private`, `public-read`, `public-read-write`, + `authenticated-read`, `log-delivery-write`, `bucket-owner-read`, + `bucket-owner-full-control`. Defaults to `private`. You could specify a + canned ACL using the `canned_acl` setting. When the S3 repository creates + buckets and objects, it adds the canned ACL into the buckets and objects. `storage_class`:: Sets the S3 storage class for objects stored in the snapshot repository. - Values may be `standard`, `reduced_redundancy`, `standard_ia`. - Defaults to `standard`. Changing this setting on an existing repository - only affects the storage class for newly created objects, resulting in a - mixed usage of storage classes. Additionally, S3 Lifecycle Policies can - be used to manage the storage class of existing objects. - Due to the extra complexity with the Glacier class lifecycle, it is not - currently supported by the plugin. For more information about the - different classes, see http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html[AWS Storage Classes Guide] - -NOTE: The option of defining client settings in the repository settings as documented below is considered deprecated: - -In addition to the above settings, you may also specify all non-secure client settings in the repository settings. -In this case, the client settings found in the repository settings will be merged with those of the named client used by the repository. -Conflicts between client and repository settings are resolved by the repository settings taking precedence over client settings. + Values may be `standard`, `reduced_redundancy`, `standard_ia`. Defaults to + `standard`. Changing this setting on an existing repository only affects the + storage class for newly created objects, resulting in a mixed usage of + storage classes. Additionally, S3 Lifecycle Policies can be used to manage + the storage class of existing objects. Due to the extra complexity with the + Glacier class lifecycle, it is not currently supported by the plugin. For + more information about the different classes, see + http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html[AWS + Storage Classes Guide] + +NOTE: The option of defining client settings in the repository settings as +documented below is considered deprecated, and will be removed in a future +version. + +In addition to the above settings, you may also specify all non-secure client +settings in the repository settings. In this case, the client settings found in +the repository settings will be merged with those of the named client used by +the repository. Conflicts between client and repository settings are resolved +by the repository settings taking precedence over client settings. For example: @@ -244,16 +281,19 @@ PUT _snapshot/my_s3_repository // CONSOLE // TEST[skip:we don't have s3 set up while testing this] -This sets up a repository that uses all client settings from the client `my_client_named` except for the `endpoint` that is overridden -to `my.s3.endpoint` by the repository settings. +This sets up a repository that uses all client settings from the client +`my_client_name` except for the `endpoint` that is overridden to +`my.s3.endpoint` by the repository settings. [[repository-s3-permissions]] ===== Recommended S3 Permissions -In order to restrict the Elasticsearch snapshot process to the minimum required resources, we recommend using Amazon -IAM in conjunction with pre-existing S3 buckets. Here is an example policy which will allow the snapshot access to an - S3 bucket named "snaps.example.com". This may be configured through the AWS IAM console, by creating a Custom Policy, - and using a Policy Document similar to this (changing snaps.example.com to your bucket name). +In order to restrict the Elasticsearch snapshot process to the minimum required +resources, we recommend using Amazon IAM in conjunction with pre-existing S3 +buckets. Here is an example policy which will allow the snapshot access to an S3 +bucket named "snaps.example.com". This may be configured through the AWS IAM +console, by creating a Custom Policy, and using a Policy Document similar to +this (changing snaps.example.com to your bucket name). [source,js] ---- @@ -290,7 +330,8 @@ IAM in conjunction with pre-existing S3 buckets. Here is an example policy which ---- // NOTCONSOLE -You may further restrict the permissions by specifying a prefix within the bucket, in this example, named "foo". +You may further restrict the permissions by specifying a prefix within the +bucket, in this example, named "foo". [source,js] ---- @@ -334,16 +375,23 @@ You may further restrict the permissions by specifying a prefix within the bucke ---- // NOTCONSOLE -The bucket needs to exist to register a repository for snapshots. If you did not create the bucket then the repository -registration will fail. +The bucket needs to exist to register a repository for snapshots. If you did not +create the bucket then the repository registration will fail. -Note: Starting in version 7.0, all bucket operations are using the path style access pattern. In previous versions the decision to use virtual hosted style -or path style access was made by the AWS Java SDK. +Note: Starting in version 7.0, all bucket operations are using the path style +access pattern. In previous versions the decision to use virtual hosted style or +path style access was made by the AWS Java SDK. [[repository-s3-aws-vpc]] [float] ==== AWS VPC Bandwidth Settings -AWS instances resolve S3 endpoints to a public IP. If the Elasticsearch instances reside in a private subnet in an AWS VPC then all traffic to S3 will go through that VPC's NAT instance. If your VPC's NAT instance is a smaller instance size (e.g. a t1.micro) or is handling a high volume of network traffic your bandwidth to S3 may be limited by that NAT instance's networking bandwidth limitations. +AWS instances resolve S3 endpoints to a public IP. If the Elasticsearch +instances reside in a private subnet in an AWS VPC then all traffic to S3 will +go through that VPC's NAT instance. If your VPC's NAT instance is a smaller +instance size (e.g. a t1.micro) or is handling a high volume of network traffic +your bandwidth to S3 may be limited by that NAT instance's networking bandwidth +limitations. -Instances residing in a public subnet in an AWS VPC will connect to S3 via the VPC's internet gateway and not be bandwidth limited by the VPC's NAT instance. +Instances residing in a public subnet in an AWS VPC will connect to S3 via the +VPC's internet gateway and not be bandwidth limited by the VPC's NAT instance. From bc2fe70e55d56f6d0339aca2917b49492309fd06 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 19 Feb 2019 11:02:22 +0100 Subject: [PATCH 15/54] Don't swallow IOExceptions in InternalTestCluster. (#39068) Relates #39030 --- .../org/elasticsearch/test/InternalTestCluster.java | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 0aee6c45a9129..2c02abab9dc1d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -844,15 +844,18 @@ public synchronized Client smartClient() { } @Override - public synchronized void close() { + public synchronized void close() throws IOException { if (this.open.compareAndSet(true, false)) { if (activeDisruptionScheme != null) { activeDisruptionScheme.testClusterClosed(); activeDisruptionScheme = null; } - IOUtils.closeWhileHandlingException(nodes.values()); - nodes.clear(); - executor.shutdownNow(); + try { + IOUtils.close(nodes.values()); + } finally { + nodes.clear(); + executor.shutdownNow(); + } } } From 3f8e0229881a8197d0e7dd336a95a58032841265 Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Tue, 19 Feb 2019 10:11:01 +0000 Subject: [PATCH 16/54] Adjust backwards-compatibility versions for before,after,overlapping interval filters (#39062) Follow up to #38999, this commit re-enables backwards-compatibility REST tests for `before`, `after` and `overlapping` filters against versions 7.1 and up. --- .../rest-api-spec/test/search/230_interval_query.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml index 1d4f1883ef5f5..8d7a77cac8859 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml @@ -326,7 +326,7 @@ setup: --- "Test overlapping": - skip: - version: " - 7.9.99" + version: " - 7.0.99" reason: "Implemented in 7.1" - do: search: @@ -349,7 +349,7 @@ setup: --- "Test before": - skip: - version: " - 7.9.99" + version: " - 7.0.99" reason: "Implemented in 7.1" - do: search: @@ -369,7 +369,7 @@ setup: --- "Test after": - skip: - version: " - 7.9.99" + version: " - 7.0.99" reason: "Implemented in 7.1" - do: search: From a459cb0ad852487035c5b50c4418e263f48c1075 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 19 Feb 2019 12:20:47 +0100 Subject: [PATCH 17/54] Don't Ping on Handshake Connection (#39076) * Don't Ping on Handshake Connection * It does not make sense to run pings on the handshake connection * Set the ping interval to `-1` to deactivate pings on it --- .../HandshakingTransportAddressConnector.java | 3 ++- .../discovery/zen/UnicastZenPing.java | 3 ++- .../transport/ConnectionProfile.java | 17 ----------------- 3 files changed, 4 insertions(+), 19 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java b/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java index 656bfbe4f98bb..7f2512f97f87b 100644 --- a/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java +++ b/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java @@ -80,7 +80,8 @@ protected void doRun() throws Exception { logger.trace("[{}] opening probe connection", this); final Connection connection = transportService.openConnection(targetNode, - ConnectionProfile.buildSingleChannelProfile(Type.REG, probeConnectTimeout, probeHandshakeTimeout)); + ConnectionProfile.buildSingleChannelProfile(Type.REG, probeConnectTimeout, probeHandshakeTimeout, + TimeValue.MINUS_ONE, null)); logger.trace("[{}] opened probe connection", this); final DiscoveryNode remoteNode; diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java index 59b8b9513d6aa..d0cd0e27976fd 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java @@ -293,7 +293,8 @@ protected void ping(final Consumer resultsConsumer, } final ConnectionProfile connectionProfile = - ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, requestDuration, requestDuration); + ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, requestDuration, requestDuration, + TimeValue.MINUS_ONE, null); final PingingRound pingingRound = new PingingRound(pingingRoundIdGenerator.incrementAndGet(), seedAddresses, resultsConsumer, nodes.getLocalNode(), connectionProfile); activePingingRounds.put(pingingRound.id(), pingingRound); diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java index 07d4818ffafa1..66db091557f48 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java @@ -101,23 +101,6 @@ public static ConnectionProfile buildSingleChannelProfile(TransportRequestOption return buildSingleChannelProfile(channelType, null, null, null, null); } - /** - * Builds a connection profile that is dedicated to a single channel type. Allows passing compression - * settings. - */ - public static ConnectionProfile buildSingleChannelProfile(TransportRequestOptions.Type channelType, boolean compressionEnabled) { - return buildSingleChannelProfile(channelType, null, null, null, compressionEnabled); - } - - /** - * Builds a connection profile that is dedicated to a single channel type. Allows passing connection and - * handshake timeouts. - */ - public static ConnectionProfile buildSingleChannelProfile(TransportRequestOptions.Type channelType, @Nullable TimeValue connectTimeout, - @Nullable TimeValue handshakeTimeout) { - return buildSingleChannelProfile(channelType, connectTimeout, handshakeTimeout, null, null); - } - /** * Builds a connection profile that is dedicated to a single channel type. Allows passing connection and * handshake timeouts and compression settings. From a54d1c61f5d57cb5dfe2ca31c1beb2bc2814a635 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Tue, 19 Feb 2019 13:36:04 +0100 Subject: [PATCH 18/54] mute testDelayedMappingPropagationOnReplica --- .../elasticsearch/cluster/coordination/RareClusterStateIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java index dc3cd4eb46f94..da19fcf1c2415 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java @@ -277,6 +277,7 @@ public void testDelayedMappingPropagationOnPrimary() throws Exception { }); } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/36813") public void testDelayedMappingPropagationOnReplica() throws Exception { // This is essentially the same thing as testDelayedMappingPropagationOnPrimary // but for replicas From 01d6263a7447ee45f9ae39ff4737a7b445bfab2a Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Tue, 19 Feb 2019 14:11:52 +0100 Subject: [PATCH 19/54] Fix DateFormatters.parseMillis when no timezone is given (#39100) The parseMillis method was able to work on formats without timezones by falling back to UTC. The Date Formatter interface did not support this, as the calling code was using the `Instant.from` java time API. This switches over to an internal method which adds UTC as a timezone. Closes #39067 --- .../java/org/elasticsearch/common/time/DateFormatter.java | 2 +- .../common/joda/JavaJodaTimeDuellingTests.java | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java index aeea14ee1f011..bf7999067b05a 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java @@ -47,7 +47,7 @@ public interface DateFormatter { * Parse the given input into millis-since-epoch. */ default long parseMillis(String input) { - return Instant.from(parse(input)).toEpochMilli(); + return DateFormatters.from(parse(input)).toInstant().toEpochMilli(); } /** diff --git a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java index b5fcf1c7f30ce..40822d5a38b84 100644 --- a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java @@ -683,6 +683,12 @@ public void testIso8601Parsers() { assertSameDate("2018-10-10T10:11:12,123Z", format, jodaFormatter, javaFormatter); } + public void testParsingMissingTimezone() { + long millisJava = DateFormatter.forPattern("8yyyy-MM-dd HH:mm:ss").parseMillis("2018-02-18 17:47:17"); + long millisJoda = DateFormatter.forPattern("yyyy-MM-dd HH:mm:ss").parseMillis("2018-02-18 17:47:17"); + assertThat(millisJava, is(millisJoda)); + } + private void assertSamePrinterOutput(String format, ZonedDateTime javaDate, DateTime jodaDate) { assertThat(jodaDate.getMillis(), is(javaDate.toInstant().toEpochMilli())); String javaTimeOut = DateFormatter.forPattern(format).format(javaDate); From fbabd81e3d5cf74add6157bd3d9c0391b967d60f Mon Sep 17 00:00:00 2001 From: David Roberts Date: Tue, 19 Feb 2019 13:47:47 +0000 Subject: [PATCH 20/54] [ML] Allow stop unassigned datafeed and relax unset upgrade mode wait (#39034) These two changes are interlinked. Before this change unsetting ML upgrade mode would wait for all datafeeds to be assigned and not waiting for their corresponding jobs to initialise. However, this could be inappropriate, if there was a reason other that upgrade mode why one job was unable to be assigned or slow to start up. Unsetting of upgrade mode would hang in this case. This change relaxes the condition for considering upgrade mode to be unset to simply that an assignment attempt has been made for each ML persistent task that did not fail because upgrade mode was enabled. Thus after unsetting upgrade mode there is no guarantee that every ML persistent task is assigned, just that each is not unassigned due to upgrade mode. In order to make setting upgrade mode work immediately after unsetting upgrade mode it was then also necessary to make it possible to stop a datafeed that was not assigned. There was no particularly good reason why this was not allowed in the past. It is trivial to stop an unassigned datafeed because it just involves removing the persistent task. --- .../action/TransportSetUpgradeModeAction.java | 8 +--- .../action/TransportStopDatafeedAction.java | 41 +++++++++++-------- .../integration/MlDistributedFailureIT.java | 13 ++---- .../test/ml/set_upgrade_mode.yml | 12 +++--- 4 files changed, 34 insertions(+), 40 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java index d16f9e18421d8..58ff31a6bc847 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java @@ -197,13 +197,9 @@ protected void masterOperation(SetUpgradeModeAction.Request request, ClusterStat (t) -> t.getAssignment().equals(AWAITING_UPGRADE)) .isEmpty() && - // Datafeeds to wait for a non-"Awaiting upgrade" assignment and for the job task allocations to converge - // If we do not wait, deleting datafeeds, or attempting to unallocate them again causes issues as the - // job's task allocationId could have changed during either process. + // Wait for datafeeds to not be "Awaiting upgrade" persistentTasksCustomMetaData.findTasks(DATAFEED_TASK_NAME, - (t) -> - t.getAssignment().equals(AWAITING_UPGRADE) || - t.getAssignment().getExplanation().contains("state is stale")) + (t) -> t.getAssignment().equals(AWAITING_UPGRADE)) .isEmpty(), request.timeout(), ActionListener.wrap(r -> wrappedListener.onResponse(new AcknowledgedResponse(true)), wrappedListener::onFailure) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java index 636138a855bce..cbd55bb60d896 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java @@ -29,7 +29,6 @@ import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; -import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; @@ -104,7 +103,7 @@ protected void doExecute(Task task, StopDatafeedAction.Request request, ActionLi final DiscoveryNodes nodes = state.nodes(); if (nodes.isLocalNodeElectedMaster() == false) { // Delegates stop datafeed to elected master node, so it becomes the coordinating node. - // See comment in StartDatafeedAction.Transport class for more information. + // See comment in TransportStartDatafeedAction for more information. if (nodes.getMasterNode() == null) { listener.onFailure(new MasterNotDiscoveredException("no known master node")); } else { @@ -142,13 +141,21 @@ private void normalStopDatafeed(Task task, StopDatafeedAction.Request request, A Set executorNodes = new HashSet<>(); for (String datafeedId : startedDatafeeds) { PersistentTasksCustomMetaData.PersistentTask datafeedTask = MlTasks.getDatafeedTask(datafeedId, tasks); - if (datafeedTask == null || datafeedTask.isAssigned() == false) { - String message = "Cannot stop datafeed [" + datafeedId + "] because the datafeed does not have an assigned node." + - " Use force stop to stop the datafeed"; - listener.onFailure(ExceptionsHelper.conflictStatusException(message)); - return; - } else { + if (datafeedTask == null) { + // This should not happen, because startedDatafeeds was derived from the same tasks that is passed to this method + String msg = "Requested datafeed [" + datafeedId + "] be stopped, but datafeed's task could not be found."; + assert datafeedTask != null : msg; + logger.error(msg); + } else if (datafeedTask.isAssigned()) { executorNodes.add(datafeedTask.getExecutorNode()); + } else { + // This is the easy case - the datafeed is not currently assigned to a node, + // so can be gracefully stopped simply by removing its persistent task. (Usually + // a graceful stop cannot be achieved by simply removing the persistent task, but + // if the datafeed has no running code then graceful/forceful are the same.) + // The listener here can be a no-op, as waitForDatafeedStopped() already waits for + // these persistent tasks to disappear. + persistentTasksService.sendRemoveRequest(datafeedTask.getId(), ActionListener.wrap(r -> {}, e -> {})); } } @@ -198,9 +205,10 @@ public void onFailure(Exception e) { } }); } else { - String msg = "Requested datafeed [" + request.getDatafeedId() + "] be force-stopped, but " + - "datafeed's task could not be found."; - logger.warn(msg); + // This should not happen, because startedDatafeeds was derived from the same tasks that is passed to this method + String msg = "Requested datafeed [" + datafeedId + "] be force-stopped, but datafeed's task could not be found."; + assert datafeedTask != null : msg; + logger.error(msg); final int slot = counter.incrementAndGet(); failures.set(slot - 1, new RuntimeException(msg)); if (slot == startedDatafeeds.size()) { @@ -248,19 +256,18 @@ protected void doRun() throws Exception { private void sendResponseOrFailure(String datafeedId, ActionListener listener, AtomicArray failures) { - List catchedExceptions = failures.asList(); - if (catchedExceptions.size() == 0) { + List caughtExceptions = failures.asList(); + if (caughtExceptions.size() == 0) { listener.onResponse(new StopDatafeedAction.Response(true)); return; } - String msg = "Failed to stop datafeed [" + datafeedId + "] with [" + catchedExceptions.size() + String msg = "Failed to stop datafeed [" + datafeedId + "] with [" + caughtExceptions.size() + "] failures, rethrowing last, all Exceptions: [" - + catchedExceptions.stream().map(Exception::getMessage).collect(Collectors.joining(", ")) + + caughtExceptions.stream().map(Exception::getMessage).collect(Collectors.joining(", ")) + "]"; - ElasticsearchException e = new ElasticsearchException(msg, - catchedExceptions.get(0)); + ElasticsearchException e = new ElasticsearchException(msg, caughtExceptions.get(0)); listener.onFailure(e); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java index bfe90f1a03166..fd402f6d2183f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java @@ -159,22 +159,15 @@ public void testCloseUnassignedJobAndDatafeed() throws Exception { client().execute(GetDatafeedsStatsAction.INSTANCE, datafeedStatsRequest).actionGet(); assertEquals(DatafeedState.STARTED, datafeedStatsResponse.getResponse().results().get(0).getDatafeedState()); - // Can't normal stop an unassigned datafeed + // An unassigned datafeed can be stopped either normally or by force StopDatafeedAction.Request stopDatafeedRequest = new StopDatafeedAction.Request(datafeedId); - ElasticsearchStatusException statusException = expectThrows(ElasticsearchStatusException.class, - () -> client().execute(StopDatafeedAction.INSTANCE, stopDatafeedRequest).actionGet()); - assertEquals("Cannot stop datafeed [" + datafeedId + - "] because the datafeed does not have an assigned node. Use force stop to stop the datafeed", - statusException.getMessage()); - - // Can only force stop an unassigned datafeed - stopDatafeedRequest.setForce(true); + stopDatafeedRequest.setForce(randomBoolean()); StopDatafeedAction.Response stopDatafeedResponse = client().execute(StopDatafeedAction.INSTANCE, stopDatafeedRequest).actionGet(); assertTrue(stopDatafeedResponse.isStopped()); // Can't normal stop an unassigned job CloseJobAction.Request closeJobRequest = new CloseJobAction.Request(jobId); - statusException = expectThrows(ElasticsearchStatusException.class, + ElasticsearchStatusException statusException = expectThrows(ElasticsearchStatusException.class, () -> client().execute(CloseJobAction.INSTANCE, closeJobRequest).actionGet()); assertEquals("Cannot close job [" + jobId + "] because the job does not have an assigned node. Use force close to close the job", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/set_upgrade_mode.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/set_upgrade_mode.yml index 9b33af5f48bb0..4a93e46c6b491 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/set_upgrade_mode.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/set_upgrade_mode.yml @@ -206,7 +206,11 @@ teardown: ml.get_datafeed_stats: datafeed_id: set-upgrade-mode-job-datafeed - match: { datafeeds.0.state: "started" } - - match: { datafeeds.0.assignment_explanation: "" } + # The datafeed will not be assigned until the job has updated its status on the node it's assigned + # to, and that probably won't happen in time for this assertion. That is indicated by an assignment + # reason ending "state is stale". However, the datafeed should NOT be unassigned with a reason of + # "upgrade mode is enabled" - that reason should have gone away before this test. + - match: { datafeeds.0.assignment_explanation: /(^$|.+job.+state.is.stale)/ } - do: cat.tasks: {} @@ -214,12 +218,6 @@ teardown: $body: | /.+job.+/ - - do: - cat.tasks: {} - - match: - $body: | - /.+datafeed.+/ - --- "Attempt to open job when upgrade_mode is enabled": - do: From 44fc57f07d394a3f6db0aaeefea8937191a5d7f8 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 19 Feb 2019 08:53:16 -0500 Subject: [PATCH 21/54] Fix retention leases sync on recovery test This test had a bug. We attempt to allow only the primary to be allocated, to force all replicas to recovery from the primary after we had set the state of the retention leases on the primary. However, in building the index settings, we were overwriting the settings that exclude the replicas from being allocated. This means that some of the replicas would end up assigned and rather than receive retention leases during recovery, they would be part of the replication group receiving retention leases as they are manipulated. Since retention lease renewals are only synced periodically, this means that the replica could be lagging a little behind in some cases leading to an assertion tripping in the test. This commit addresses this by ensuring that the replicas are indeed not allocated until after the retention leases are done being manipulated on the replica. We did this by not overwriting the exclude settings. Closes #39105 --- .../index/seqno/RetentionLeaseIT.java | 24 ++++++++----------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java index 44a8cd70c42eb..a05d383eee080 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java @@ -39,7 +39,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -89,7 +89,7 @@ public void testRetentionLeasesSyncedOnAdd() throws Exception { .getShardOrNull(new ShardId(resolveIndex("index"), 0)); // we will add multiple retention leases and expect to see them synced to all replicas final int length = randomIntBetween(1, 8); - final Map currentRetentionLeases = new HashMap<>(); + final Map currentRetentionLeases = new LinkedHashMap<>(); for (int i = 0; i < length; i++) { final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8)); final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); @@ -136,7 +136,7 @@ public void testRetentionLeaseSyncedOnRemove() throws Exception { .getInstance(IndicesService.class, primaryShardNodeName) .getShardOrNull(new ShardId(resolveIndex("index"), 0)); final int length = randomIntBetween(1, 8); - final Map currentRetentionLeases = new HashMap<>(); + final Map currentRetentionLeases = new LinkedHashMap<>(); for (int i = 0; i < length; i++) { final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8)); final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); @@ -277,7 +277,7 @@ public void testBackgroundRetentionLeaseSync() throws Exception { .getShardOrNull(new ShardId(resolveIndex("index"), 0)); // we will add multiple retention leases and expect to see them synced to all replicas final int length = randomIntBetween(1, 8); - final Map currentRetentionLeases = new HashMap<>(length); + final Map currentRetentionLeases = new LinkedHashMap<>(length); final List ids = new ArrayList<>(length); for (int i = 0; i < length; i++) { final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8)); @@ -318,15 +318,15 @@ public void testRetentionLeasesSyncOnRecovery() throws Exception { internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas); /* * We effectively disable the background sync to ensure that the retention leases are not synced in the background so that the only - * source of retention leases on the replicas would be from the commit point and recovery. + * source of retention leases on the replicas would be from recovery. */ - final Settings settings = Settings.builder() + final Settings.Builder settings = Settings.builder() .put("index.number_of_shards", 1) .put("index.number_of_replicas", 0) - .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueHours(24)) - .build(); + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueHours(24)); // when we increase the number of replicas below we want to exclude the replicas from being allocated so that they do not recover - assertAcked(prepareCreate("index", 1).setSettings(settings)); + assertAcked(prepareCreate("index", 1, settings)); ensureYellow("index"); final AcknowledgedResponse response = client().admin() .indices() @@ -339,7 +339,7 @@ public void testRetentionLeasesSyncOnRecovery() throws Exception { .getInstance(IndicesService.class, primaryShardNodeName) .getShardOrNull(new ShardId(resolveIndex("index"), 0)); final int length = randomIntBetween(1, 8); - final Map currentRetentionLeases = new HashMap<>(); + final Map currentRetentionLeases = new LinkedHashMap<>(); for (int i = 0; i < length; i++) { final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8)); final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); @@ -348,10 +348,6 @@ public void testRetentionLeasesSyncOnRecovery() throws Exception { final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener)); latch.await(); - /* - * Now renew the leases; since we do not flush immediately on renewal, this means that the latest retention leases will not be - * in the latest commit point and therefore not transferred during the file-copy phase of recovery. - */ currentRetentionLeases.put(id, primary.renewRetentionLease(id, retainingSequenceNumber, source)); } From 4acb50fd1c2facff15cd246a786cf47853a9e021 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Tue, 19 Feb 2019 08:46:46 -0600 Subject: [PATCH 22/54] ML refactor DatafeedsConfig(Update) so defaults are not populated in queries or aggs (#38822) * ML refactor DatafeedsConfig(Update) so defaults are not populated in queries or aggs * Addressing pr feedback --- .../core/ml/datafeed/DatafeedConfig.java | 132 +++++++++-------- .../core/ml/datafeed/DatafeedUpdate.java | 113 ++++++++++----- .../xpack/core/ml/job/messages/Messages.java | 4 +- .../core/ml/datafeed/DatafeedConfigTests.java | 134 ++++++++++++++---- .../core/ml/datafeed/DatafeedUpdateTests.java | 134 ++++++++++++++---- .../deprecation/MlDeprecationChecksTests.java | 2 +- .../ml/integration/DelayedDataDetectorIT.java | 4 +- .../rest-api-spec/test/ml/datafeeds_crud.yml | 6 +- 8 files changed, 372 insertions(+), 157 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index ed858b58dd484..597edd3675270 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.core.ml.datafeed; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; @@ -18,11 +20,9 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; @@ -71,19 +71,12 @@ public class DatafeedConfig extends AbstractDiffable implements (objectMap, id, warnings) -> { try { return QUERY_TRANSFORMER.fromMap(objectMap, warnings); - } catch (IOException | XContentParseException exception) { + } catch (Exception exception) { // Certain thrown exceptions wrap up the real Illegal argument making it hard to determine cause for the user if (exception.getCause() instanceof IllegalArgumentException) { - throw ExceptionsHelper.badRequestException( - Messages.getMessage(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, - id, - exception.getCause().getMessage()), - exception.getCause()); - } else { - throw ExceptionsHelper.badRequestException( - Messages.getMessage(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, exception, id), - exception); + exception = (Exception)exception.getCause(); } + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, id), exception); } }; @@ -92,22 +85,17 @@ public class DatafeedConfig extends AbstractDiffable implements (objectMap, id, warnings) -> { try { return AGG_TRANSFORMER.fromMap(objectMap, warnings); - } catch (IOException | XContentParseException exception) { + } catch (Exception exception) { // Certain thrown exceptions wrap up the real Illegal argument making it hard to determine cause for the user if (exception.getCause() instanceof IllegalArgumentException) { - throw ExceptionsHelper.badRequestException( - Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, - id, - exception.getCause().getMessage()), - exception.getCause()); - } else { - throw ExceptionsHelper.badRequestException( - Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, exception.getMessage(), id), - exception); + exception = (Exception)exception.getCause(); } + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, id), exception); } }; + private static final Logger logger = LogManager.getLogger(DatafeedConfig.class); + // Used for QueryPage public static final ParseField RESULTS_FIELD = new ParseField("datafeeds"); public static String TYPE = "datafeed"; @@ -164,15 +152,11 @@ private static ObjectParser createParser(boolean ignoreUnknownFie builder.setQueryDelay(TimeValue.parseTimeValue(val, QUERY_DELAY.getPreferredName())), QUERY_DELAY); parser.declareString((builder, val) -> builder.setFrequency(TimeValue.parseTimeValue(val, FREQUENCY.getPreferredName())), FREQUENCY); - if (ignoreUnknownFields) { - parser.declareObject(Builder::setQuery, (p, c) -> p.mapOrdered(), QUERY); - parser.declareObject(Builder::setAggregations, (p, c) -> p.mapOrdered(), AGGREGATIONS); - parser.declareObject(Builder::setAggregations, (p, c) -> p.mapOrdered(), AGGS); - } else { - parser.declareObject(Builder::setParsedQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), QUERY); - parser.declareObject(Builder::setParsedAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGREGATIONS); - parser.declareObject(Builder::setParsedAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGS); - } + parser.declareObject((builder, val) -> builder.setQuery(val, ignoreUnknownFields), (p, c) -> p.mapOrdered(), QUERY); + parser.declareObject((builder, val) -> builder.setAggregationsSafe(val, ignoreUnknownFields), (p, c) -> p.mapOrdered(), + AGGREGATIONS); + parser.declareObject((builder, val) -> builder.setAggregationsSafe(val, ignoreUnknownFields), (p, c) -> p.mapOrdered(), + AGGS); parser.declareObject(Builder::setScriptFields, (p, c) -> { List parsedScriptFields = new ArrayList<>(); while (p.nextToken() != XContentParser.Token.END_OBJECT) { @@ -582,7 +566,7 @@ public static class Builder { private TimeValue queryDelay; private TimeValue frequency; private List indices = Collections.emptyList(); - private Map query; + private Map query = Collections.singletonMap(MatchAllQueryBuilder.NAME, Collections.emptyMap()); private Map aggregations; private List scriptFields; private Integer scrollSize = DEFAULT_SCROLL_SIZE; @@ -590,11 +574,7 @@ public static class Builder { private Map headers = Collections.emptyMap(); private DelayedDataCheckConfig delayedDataCheckConfig = DelayedDataCheckConfig.defaultDelayedDataCheckConfig(); - public Builder() { - try { - this.query = QUERY_TRANSFORMER.toMap(QueryBuilders.matchAllQuery()); - } catch (IOException ex) { /*Should never happen*/ } - } + public Builder() { } public Builder(String id, String jobId) { this(); @@ -647,48 +627,74 @@ public void setFrequency(TimeValue frequency) { this.frequency = frequency; } - public void setParsedQuery(QueryBuilder query) { + public void setQuery(Map query) { + setQuery(query, true); + } + + public void setQuery(Map query, boolean lenient) { + this.query = ExceptionsHelper.requireNonNull(query, QUERY.getPreferredName()); try { - setQuery(QUERY_TRANSFORMER.toMap(ExceptionsHelper.requireNonNull(query, QUERY.getPreferredName()))); - } catch (IOException | XContentParseException exception) { - if (exception.getCause() instanceof IllegalArgumentException) { - // Certain thrown exceptions wrap up the real Illegal argument making it hard to determine cause for the user - throw ExceptionsHelper.badRequestException( - Messages.getMessage(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, - id, - exception.getCause().getMessage()), - exception.getCause()); + QUERY_TRANSFORMER.fromMap(query); + } catch(Exception ex) { + String msg = Messages.getMessage(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, id); + + if (ex.getCause() instanceof IllegalArgumentException) { + ex = (Exception)ex.getCause(); + } + + if (lenient) { + logger.warn(msg, ex); } else { - throw ExceptionsHelper.badRequestException( - Messages.getMessage(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, id, exception.getMessage()), exception); + throw ExceptionsHelper.badRequestException(msg, ex); } } } - public void setQuery(Map query) { - this.query = ExceptionsHelper.requireNonNull(query, QUERY.getPreferredName()); - } - + // Kept for easier testing public void setParsedAggregations(AggregatorFactories.Builder aggregations) { try { setAggregations(AGG_TRANSFORMER.toMap(aggregations)); - } catch (IOException | XContentParseException exception) { + } catch (Exception exception) { // Certain thrown exceptions wrap up the real Illegal argument making it hard to determine cause for the user if (exception.getCause() instanceof IllegalArgumentException) { - throw ExceptionsHelper.badRequestException( - Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, - id, - exception.getCause().getMessage()), - exception.getCause()); - } else { - throw ExceptionsHelper.badRequestException( - Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, id, exception.getMessage()), exception); + exception = (Exception)exception.getCause(); } + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, id), exception); } } + private void setAggregationsSafe(Map aggregations, boolean lenient) { + if (this.aggregations != null) { + throw ExceptionsHelper.badRequestException("Found two aggregation definitions: [aggs] and [aggregations]"); + } + setAggregations(aggregations, lenient); + } + void setAggregations(Map aggregations) { + setAggregations(aggregations, true); + } + + void setAggregations(Map aggregations, boolean lenient) { this.aggregations = aggregations; + try { + if (aggregations != null && aggregations.isEmpty()) { + throw new Exception("[aggregations] are empty"); + } + AGG_TRANSFORMER.fromMap(aggregations); + } catch (Exception ex) { + String msg = Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, id); + + if (ex.getCause() instanceof IllegalArgumentException) { + ex = (Exception)ex.getCause(); + } + + if (lenient) { + logger.warn(msg, ex); + } else { + throw ExceptionsHelper.badRequestException(msg, ex); + } + } } public void setScriptFields(List scriptFields) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java index 23c2eeccc6a59..5468ea1ee2688 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java @@ -16,13 +16,12 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; @@ -34,6 +33,11 @@ import java.util.Objects; import java.util.stream.Collectors; +import static org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig.AGG_TRANSFORMER; +import static org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig.QUERY_TRANSFORMER; +import static org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig.lazyAggParser; +import static org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig.lazyQueryParser; + /** * A datafeed update contains partial properties to update a {@link DatafeedConfig}. * The main difference between this class and {@link DatafeedConfig} is that here all @@ -52,12 +56,9 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { TimeValue.parseTimeValue(val, DatafeedConfig.QUERY_DELAY.getPreferredName())), DatafeedConfig.QUERY_DELAY); PARSER.declareString((builder, val) -> builder.setFrequency( TimeValue.parseTimeValue(val, DatafeedConfig.FREQUENCY.getPreferredName())), DatafeedConfig.FREQUENCY); - PARSER.declareObject(Builder::setQuery, - (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), DatafeedConfig.QUERY); - PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), - DatafeedConfig.AGGREGATIONS); - PARSER.declareObject(Builder::setAggregations,(p, c) -> AggregatorFactories.parseAggregators(p), - DatafeedConfig.AGGS); + PARSER.declareObject(Builder::setQuery, (p, c) -> p.mapOrdered(), DatafeedConfig.QUERY); + PARSER.declareObject(Builder::setAggregationsSafe, (p, c) -> p.mapOrdered(), DatafeedConfig.AGGREGATIONS); + PARSER.declareObject(Builder::setAggregationsSafe,(p, c) -> p.mapOrdered(), DatafeedConfig.AGGS); PARSER.declareObject(Builder::setScriptFields, (p, c) -> { List parsedScriptFields = new ArrayList<>(); while (p.nextToken() != XContentParser.Token.END_OBJECT) { @@ -78,16 +79,16 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { private final TimeValue queryDelay; private final TimeValue frequency; private final List indices; - private final QueryBuilder query; - private final AggregatorFactories.Builder aggregations; + private final Map query; + private final Map aggregations; private final List scriptFields; private final Integer scrollSize; private final ChunkingConfig chunkingConfig; private final DelayedDataCheckConfig delayedDataCheckConfig; - private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, QueryBuilder query, - AggregatorFactories.Builder aggregations, List scriptFields, Integer scrollSize, - ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig) { + private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, + Map query, Map aggregations, List scriptFields, + Integer scrollSize, ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig) { this.id = id; this.jobId = jobId; this.queryDelay = queryDelay; @@ -117,8 +118,17 @@ public DatafeedUpdate(StreamInput in) throws IOException { in.readStringList(); } } - this.query = in.readOptionalNamedWriteable(QueryBuilder.class); - this.aggregations = in.readOptionalWriteable(AggregatorFactories.Builder::new); + if (in.getVersion().before(Version.V_7_1_0)) { + this.query = QUERY_TRANSFORMER.toMap(in.readOptionalNamedWriteable(QueryBuilder.class)); + this.aggregations = AGG_TRANSFORMER.toMap(in.readOptionalWriteable(AggregatorFactories.Builder::new)); + } else { + this.query = in.readMap(); + if (in.readBoolean()) { + this.aggregations = in.readMap(); + } else { + this.aggregations = null; + } + } if (in.readBoolean()) { this.scriptFields = in.readList(SearchSourceBuilder.ScriptField::new); } else { @@ -158,8 +168,16 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(true); out.writeStringCollection(Collections.emptyList()); } - out.writeOptionalNamedWriteable(query); - out.writeOptionalWriteable(aggregations); + if (out.getVersion().before(Version.V_7_1_0)) { + out.writeOptionalNamedWriteable(lazyQueryParser.apply(query, id, new ArrayList<>())); + out.writeOptionalWriteable(lazyAggParser.apply(aggregations, id, new ArrayList<>())); + } else { + out.writeMap(query); + out.writeBoolean(aggregations != null); + if (aggregations != null) { + out.writeMap(aggregations); + } + } if (scriptFields != null) { out.writeBoolean(true); out.writeList(scriptFields); @@ -227,27 +245,20 @@ Integer getScrollSize() { return scrollSize; } - QueryBuilder getQuery() { + Map getQuery() { return query; } - AggregatorFactories.Builder getAggregations() { + Map getAggregations() { return aggregations; } - /** - * Returns the histogram's interval as epoch millis. - */ - long getHistogramIntervalMillis() { - return ExtractorUtils.getHistogramIntervalMillis(aggregations); - } - /** * @return {@code true} when there are non-empty aggregations, {@code false} * otherwise */ boolean hasAggregations() { - return aggregations != null && aggregations.count() > 0; + return aggregations != null && aggregations.size() > 0; } List getScriptFields() { @@ -285,11 +296,11 @@ public DatafeedConfig apply(DatafeedConfig datafeedConfig, Map h builder.setIndices(indices); } if (query != null) { - builder.setParsedQuery(query); + builder.setQuery(query); } if (aggregations != null) { - DatafeedConfig.validateAggregations(aggregations); - builder.setParsedAggregations(aggregations); + DatafeedConfig.validateAggregations(lazyAggParser.apply(aggregations, id, new ArrayList<>())); + builder.setAggregations(aggregations); } if (scriptFields != null) { builder.setScriptFields(scriptFields); @@ -360,9 +371,9 @@ boolean isNoop(DatafeedConfig datafeed) { return (frequency == null || Objects.equals(frequency, datafeed.getFrequency())) && (queryDelay == null || Objects.equals(queryDelay, datafeed.getQueryDelay())) && (indices == null || Objects.equals(indices, datafeed.getIndices())) - && (query == null || Objects.equals(query, datafeed.getParsedQuery())) + && (query == null || Objects.equals(query, datafeed.getQuery())) && (scrollSize == null || Objects.equals(scrollSize, datafeed.getQueryDelay())) - && (aggregations == null || Objects.equals(aggregations, datafeed.getParsedAggregations())) + && (aggregations == null || Objects.equals(aggregations, datafeed.getAggregations())) && (scriptFields == null || Objects.equals(scriptFields, datafeed.getScriptFields())) && (delayedDataCheckConfig == null || Objects.equals(delayedDataCheckConfig, datafeed.getDelayedDataCheckConfig())) && (chunkingConfig == null || Objects.equals(chunkingConfig, datafeed.getChunkingConfig())); @@ -375,8 +386,8 @@ public static class Builder { private TimeValue queryDelay; private TimeValue frequency; private List indices; - private QueryBuilder query; - private AggregatorFactories.Builder aggregations; + private Map query; + private Map aggregations; private List scriptFields; private Integer scrollSize; private ChunkingConfig chunkingConfig; @@ -423,12 +434,42 @@ public void setFrequency(TimeValue frequency) { this.frequency = frequency; } - public void setQuery(QueryBuilder query) { + public void setQuery(Map query) { this.query = query; + try { + QUERY_TRANSFORMER.fromMap(query); + } catch(Exception ex) { + String msg = Messages.getMessage(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, id); + + if (ex.getCause() instanceof IllegalArgumentException) { + ex = (Exception)ex.getCause(); + } + throw ExceptionsHelper.badRequestException(msg, ex); + } } - public void setAggregations(AggregatorFactories.Builder aggregations) { + private void setAggregationsSafe(Map aggregations) { + if (this.aggregations != null) { + throw ExceptionsHelper.badRequestException("Found two aggregation definitions: [aggs] and [aggregations]"); + } + setAggregations(aggregations); + } + + public void setAggregations(Map aggregations) { this.aggregations = aggregations; + try { + if (aggregations != null && aggregations.isEmpty()) { + throw new Exception("[aggregations] are empty"); + } + AGG_TRANSFORMER.fromMap(aggregations); + } catch(Exception ex) { + String msg = Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, id); + + if (ex.getCause() instanceof IllegalArgumentException) { + ex = (Exception)ex.getCause(); + } + throw ExceptionsHelper.badRequestException(msg, ex); + } } public void setScriptFields(List scriptFields) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index 77ae8cb26eae9..09874ec611b22 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -26,8 +26,8 @@ public final class Messages { "delayed_data_check_config: check_window [{0}] must be greater than the bucket_span [{1}]"; public static final String DATAFEED_CONFIG_DELAYED_DATA_CHECK_SPANS_TOO_MANY_BUCKETS = "delayed_data_check_config: check_window [{0}] must be less than 10,000x the bucket_span [{1}]"; - public static final String DATAFEED_CONFIG_QUERY_BAD_FORMAT = "Datafeed [{0}] query is not parsable: {1}"; - public static final String DATAFEED_CONFIG_AGG_BAD_FORMAT = "Datafeed [{0}] aggregations are not parsable: {1}"; + public static final String DATAFEED_CONFIG_QUERY_BAD_FORMAT = "Datafeed [{0}] query is not parsable"; + public static final String DATAFEED_CONFIG_AGG_BAD_FORMAT = "Datafeed [{0}] aggregations are not parsable"; public static final String DATAFEED_DOES_NOT_SUPPORT_JOB_WITH_LATENCY = "A job configured with datafeed cannot support latency"; public static final String DATAFEED_NOT_FOUND = "No datafeed with id [{0}] exists"; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java index 788870013885e..40b7ce88df0a8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java @@ -7,6 +7,7 @@ import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -26,9 +27,8 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.Script; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregationBuilders; @@ -58,6 +58,8 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig.QUERY_TRANSFORMER; +import static org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig.lazyQueryParser; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -89,7 +91,8 @@ private static DatafeedConfig.Builder createRandomizedDatafeedConfigBuilder(Stri DatafeedConfig.Builder builder = new DatafeedConfig.Builder(randomValidDatafeedId(), jobId); builder.setIndices(randomStringList(1, 10)); if (randomBoolean()) { - builder.setParsedQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10))); + builder.setQuery(Collections.singletonMap(TermQueryBuilder.NAME, + Collections.singletonMap(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10)))); } boolean addScriptFields = randomBoolean(); if (addScriptFields) { @@ -214,6 +217,41 @@ protected DatafeedConfig doParseInstance(XContentParser parser) { " }\n" + "}"; + private static final String MULTIPLE_AGG_DEF_DATAFEED = "{\n" + + " \"datafeed_id\": \"farequote-datafeed\",\n" + + " \"job_id\": \"farequote\",\n" + + " \"frequency\": \"1h\",\n" + + " \"indices\": [\"farequote1\", \"farequote2\"],\n" + + " \"aggregations\": {\n" + + " \"buckets\": {\n" + + " \"date_histogram\": {\n" + + " \"field\": \"time\",\n" + + " \"interval\": \"360s\",\n" + + " \"time_zone\": \"UTC\"\n" + + " },\n" + + " \"aggregations\": {\n" + + " \"time\": {\n" + + " \"max\": {\"field\": \"time\"}\n" + + " }\n" + + " }\n" + + " }\n" + + " }," + + " \"aggs\": {\n" + + " \"buckets2\": {\n" + + " \"date_histogram\": {\n" + + " \"field\": \"time\",\n" + + " \"interval\": \"360s\",\n" + + " \"time_zone\": \"UTC\"\n" + + " },\n" + + " \"aggregations\": {\n" + + " \"time\": {\n" + + " \"max\": {\"field\": \"time\"}\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + "}"; + public void testFutureConfigParse() throws IOException { XContentParser parser = XContentFactory.xContent(XContentType.JSON) .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_DATAFEED); @@ -228,7 +266,8 @@ public void testPastQueryConfigParse() throws IOException { DatafeedConfig config = DatafeedConfig.LENIENT_PARSER.apply(parser, null).build(); ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> config.getParsedQuery()); - assertEquals("[match] query doesn't support multiple fields, found [query] and [type]", e.getMessage()); + assertNotNull(e.getCause()); + assertEquals("[match] query doesn't support multiple fields, found [query] and [type]", e.getCause().getMessage()); } try(XContentParser parser = XContentFactory.xContent(XContentType.JSON) @@ -236,7 +275,7 @@ public void testPastQueryConfigParse() throws IOException { XContentParseException e = expectThrows(XContentParseException.class, () -> DatafeedConfig.STRICT_PARSER.apply(parser, null).build()); - assertEquals("[6:25] [datafeed_config] failed to parse field [query]", e.getMessage()); + assertEquals("[6:64] [datafeed_config] failed to parse field [query]", e.getMessage()); } } @@ -246,9 +285,10 @@ public void testPastAggConfigParse() throws IOException { DatafeedConfig.Builder configBuilder = DatafeedConfig.LENIENT_PARSER.apply(parser, null); ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> configBuilder.build()); + assertNotNull(e.getCause()); assertEquals( - "Datafeed [farequote-datafeed] aggregations are not parsable: [size] must be greater than 0. Found [0] in [airline]", - e.getMessage()); + "[size] must be greater than 0. Found [0] in [airline]", + e.getCause().getMessage()); } try(XContentParser parser = XContentFactory.xContent(XContentType.JSON) @@ -256,7 +296,7 @@ public void testPastAggConfigParse() throws IOException { XContentParseException e = expectThrows(XContentParseException.class, () -> DatafeedConfig.STRICT_PARSER.apply(parser, null).build()); - assertEquals("[8:25] [datafeed_config] failed to parse field [aggregations]", e.getMessage()); + assertEquals("[25:3] [datafeed_config] failed to parse field [aggregations]", e.getMessage()); } } @@ -267,6 +307,25 @@ public void testFutureMetadataParse() throws IOException { assertNotNull(DatafeedConfig.LENIENT_PARSER.apply(parser, null).build()); } + public void testMultipleDefinedAggParse() throws IOException { + try(XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, MULTIPLE_AGG_DEF_DATAFEED)) { + XContentParseException ex = expectThrows(XContentParseException.class, + () -> DatafeedConfig.LENIENT_PARSER.apply(parser, null)); + assertThat(ex.getMessage(), equalTo("[32:3] [datafeed_config] failed to parse field [aggs]")); + assertNotNull(ex.getCause()); + assertThat(ex.getCause().getMessage(), equalTo("Found two aggregation definitions: [aggs] and [aggregations]")); + } + try(XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, MULTIPLE_AGG_DEF_DATAFEED)) { + XContentParseException ex = expectThrows(XContentParseException.class, + () -> DatafeedConfig.STRICT_PARSER.apply(parser, null)); + assertThat(ex.getMessage(), equalTo("[32:3] [datafeed_config] failed to parse field [aggs]")); + assertNotNull(ex.getCause()); + assertThat(ex.getCause().getMessage(), equalTo("Found two aggregation definitions: [aggs] and [aggregations]")); + } + } + public void testToXContentForInternalStorage() throws IOException { DatafeedConfig.Builder builder = createRandomizedDatafeedConfigBuilder("foo", 300); @@ -443,7 +502,8 @@ public void testBuild_GivenHistogramWithDefaultInterval() { ElasticsearchException e = expectThrows(ElasticsearchException.class, builder::build); - assertThat(e.getMessage(), containsString("[interval] must be >0 for histogram aggregation [time]")); + assertNotNull(e.getCause()); + assertThat(e.getCause().getMessage(), containsString("[interval] must be >0 for histogram aggregation [time]")); } public void testBuild_GivenDateHistogramWithInvalidTimeZone() { @@ -636,7 +696,7 @@ public void testGetQueryDeprecations() { DatafeedConfig spiedConfig = spy(datafeed); spiedConfig.getQueryDeprecations(); - verify(spiedConfig).getQueryDeprecations(DatafeedConfig.lazyQueryParser); + verify(spiedConfig).getQueryDeprecations(lazyQueryParser); } public void testSerializationOfComplexAggs() throws IOException { @@ -656,9 +716,11 @@ public void testSerializationOfComplexAggs() throws IOException { .subAggregation(derivativePipelineAggregationBuilder) .subAggregation(bucketScriptPipelineAggregationBuilder); DatafeedConfig.Builder datafeedConfigBuilder = createDatafeedBuilderWithDateHistogram(dateHistogram); - QueryBuilder terms = - new BoolQueryBuilder().filter(new TermQueryBuilder(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))); - datafeedConfigBuilder.setParsedQuery(terms); + Map terms = Collections.singletonMap(BoolQueryBuilder.NAME, + Collections.singletonMap("filter", + Collections.singletonMap(TermQueryBuilder.NAME, + Collections.singletonMap(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))))); + datafeedConfigBuilder.setQuery(terms); DatafeedConfig datafeedConfig = datafeedConfigBuilder.build(); AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder().addAggregator(dateHistogram); @@ -675,7 +737,7 @@ public void testSerializationOfComplexAggs() throws IOException { // Assert that the parsed versions of our aggs and queries work as well assertEquals(aggBuilder, parsedDatafeedConfig.getParsedAggregations()); - assertEquals(terms, parsedDatafeedConfig.getParsedQuery()); + assertEquals(terms, parsedDatafeedConfig.getQuery()); try(BytesStreamOutput output = new BytesStreamOutput()) { datafeedConfig.writeTo(output); @@ -685,7 +747,7 @@ public void testSerializationOfComplexAggs() throws IOException { // Assert that the parsed versions of our aggs and queries work as well assertEquals(aggBuilder, streamedDatafeedConfig.getParsedAggregations()); - assertEquals(terms, streamedDatafeedConfig.getParsedQuery()); + assertEquals(terms, streamedDatafeedConfig.getQuery()); } } } @@ -707,9 +769,15 @@ public void testSerializationOfComplexAggsBetweenVersions() throws IOException { .subAggregation(derivativePipelineAggregationBuilder) .subAggregation(bucketScriptPipelineAggregationBuilder); DatafeedConfig.Builder datafeedConfigBuilder = createDatafeedBuilderWithDateHistogram(dateHistogram); - QueryBuilder terms = - new BoolQueryBuilder().filter(new TermQueryBuilder(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))); - datafeedConfigBuilder.setParsedQuery(terms); + Map terms = Collections.singletonMap(BoolQueryBuilder.NAME, + Collections.singletonMap("filter", + Collections.singletonList( + Collections.singletonMap(TermQueryBuilder.NAME, + Collections.singletonMap(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10)))))); + // So equality check between the streamed and current passes + // Streamed DatafeedConfigs when they are before 6.6.0 require a parsed object for aggs and queries, consequently all the default + // values are added between them + datafeedConfigBuilder.setQuery(QUERY_TRANSFORMER.toMap(QUERY_TRANSFORMER.fromMap(terms))); DatafeedConfig datafeedConfig = datafeedConfigBuilder.build(); SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); @@ -726,7 +794,7 @@ public void testSerializationOfComplexAggsBetweenVersions() throws IOException { // Assert that the parsed versions of our aggs and queries work as well assertEquals(new AggregatorFactories.Builder().addAggregator(dateHistogram), streamedDatafeedConfig.getParsedAggregations()); - assertEquals(terms, streamedDatafeedConfig.getParsedQuery()); + assertEquals(datafeedConfig.getParsedQuery(), streamedDatafeedConfig.getParsedQuery()); } } } @@ -738,6 +806,22 @@ public void testCopyingDatafeedDoesNotCauseStackOverflow() { } } + public void testEmptyQueryMap() { + DatafeedConfig.Builder builder = new DatafeedConfig.Builder("empty_query_map", "job1"); + ElasticsearchStatusException ex = expectThrows(ElasticsearchStatusException.class, + () -> builder.setQuery(Collections.emptyMap(), false)); + assertThat(ex.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(ex.getMessage(), equalTo("Datafeed [empty_query_map] query is not parsable")); + } + + public void testEmptyAggMap() { + DatafeedConfig.Builder builder = new DatafeedConfig.Builder("empty_agg_map", "job1"); + ElasticsearchStatusException ex = expectThrows(ElasticsearchStatusException.class, + () -> builder.setAggregations(Collections.emptyMap(), false)); + assertThat(ex.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(ex.getMessage(), equalTo("Datafeed [empty_agg_map] aggregations are not parsable")); + } + public static String randomValidDatafeedId() { CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz".toCharArray()); return generator.ofCodePointsLength(random(), 10, 10); @@ -800,12 +884,14 @@ protected DatafeedConfig mutateInstance(DatafeedConfig instance) throws IOExcept builder.setIndices(indices); break; case 5: - BoolQueryBuilder query = new BoolQueryBuilder(); - if (instance.getParsedQuery() != null) { - query.must(instance.getParsedQuery()); + Map query = new HashMap<>(); + if (instance.getQuery() != null) { + query.put("must", instance.getQuery()); } - query.filter(new TermQueryBuilder(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))); - builder.setParsedQuery(query); + query.put("filter", Collections.singletonList( + Collections.singletonMap(TermQueryBuilder.NAME, + Collections.singletonMap(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))))); + builder.setQuery(query); break; case 6: if (instance.hasAggregations()) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java index 302bfefc7c42a..96798b251d345 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java @@ -5,31 +5,34 @@ */ package org.elasticsearch.xpack.core.ml.datafeed; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.Script; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.search.aggregations.AggregationBuilders; -import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField; import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig.Mode; import org.elasticsearch.xpack.core.ml.job.config.JobTests; +import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -61,7 +64,8 @@ public static DatafeedUpdate createRandomized(String datafeedId, @Nullable Dataf builder.setIndices(DatafeedConfigTests.randomStringList(1, 10)); } if (randomBoolean()) { - builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10))); + builder.setQuery(Collections.singletonMap(TermQueryBuilder.NAME, + Collections.singletonMap(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10)))); } if (randomBoolean()) { int scriptsSize = randomInt(3); @@ -75,10 +79,9 @@ public static DatafeedUpdate createRandomized(String datafeedId, @Nullable Dataf if (randomBoolean() && datafeed == null) { // can only test with a single agg as the xcontent order gets randomized by test base class and then // the actual xcontent isn't the same and test fail. - // Testing with a single agg is ok as we don't have special list writeable / xconent logic - AggregatorFactories.Builder aggs = new AggregatorFactories.Builder(); - aggs.addAggregator(AggregationBuilders.avg(randomAlphaOfLength(10)).field(randomAlphaOfLength(10))); - builder.setAggregations(aggs); + // Testing with a single agg is ok as we don't have special list writeable / xcontent logic + builder.setAggregations(Collections.singletonMap(randomAlphaOfLength(10), + Collections.singletonMap("avg", Collections.singletonMap("field", randomAlphaOfLength(10))))); } if (randomBoolean()) { builder.setScrollSize(randomIntBetween(0, Integer.MAX_VALUE)); @@ -114,6 +117,52 @@ protected NamedXContentRegistry xContentRegistry() { return new NamedXContentRegistry(searchModule.getNamedXContents()); } + private static final String MULTIPLE_AGG_DEF_DATAFEED = "{\n" + + " \"datafeed_id\": \"farequote-datafeed\",\n" + + " \"job_id\": \"farequote\",\n" + + " \"frequency\": \"1h\",\n" + + " \"indices\": [\"farequote1\", \"farequote2\"],\n" + + " \"aggregations\": {\n" + + " \"buckets\": {\n" + + " \"date_histogram\": {\n" + + " \"field\": \"time\",\n" + + " \"interval\": \"360s\",\n" + + " \"time_zone\": \"UTC\"\n" + + " },\n" + + " \"aggregations\": {\n" + + " \"time\": {\n" + + " \"max\": {\"field\": \"time\"}\n" + + " }\n" + + " }\n" + + " }\n" + + " }," + + " \"aggs\": {\n" + + " \"buckets2\": {\n" + + " \"date_histogram\": {\n" + + " \"field\": \"time\",\n" + + " \"interval\": \"360s\",\n" + + " \"time_zone\": \"UTC\"\n" + + " },\n" + + " \"aggregations\": {\n" + + " \"time\": {\n" + + " \"max\": {\"field\": \"time\"}\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + "}"; + + public void testMultipleDefinedAggParse() throws IOException { + try(XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, MULTIPLE_AGG_DEF_DATAFEED)) { + XContentParseException ex = expectThrows(XContentParseException.class, + () -> DatafeedUpdate.PARSER.apply(parser, null)); + assertThat(ex.getMessage(), equalTo("[32:3] [datafeed_update] failed to parse field [aggs]")); + assertNotNull(ex.getCause()); + assertThat(ex.getCause().getMessage(), equalTo("Found two aggregation definitions: [aggs] and [aggregations]")); + } + } + public void testApply_failBecauseTargetDatafeedHasDifferentId() { DatafeedConfig datafeed = DatafeedConfigTests.createRandomizedDatafeedConfig("foo"); expectThrows(IllegalArgumentException.class, () -> createRandomized(datafeed.getId() + "_2").apply(datafeed, null)); @@ -149,7 +198,7 @@ public void testApply_givenFullUpdateNoAggregations() { update.setIndices(Collections.singletonList("i_2")); update.setQueryDelay(TimeValue.timeValueSeconds(42)); update.setFrequency(TimeValue.timeValueSeconds(142)); - update.setQuery(QueryBuilders.termQuery("a", "b")); + update.setQuery(Collections.singletonMap(TermQueryBuilder.NAME, Collections.singletonMap("a", "b"))); update.setScriptFields(Collections.singletonList(new SearchSourceBuilder.ScriptField("a", mockScript("b"), false))); update.setScrollSize(8000); update.setChunkingConfig(ChunkingConfig.newManual(TimeValue.timeValueHours(1))); @@ -161,7 +210,8 @@ public void testApply_givenFullUpdateNoAggregations() { assertThat(updatedDatafeed.getIndices(), equalTo(Collections.singletonList("i_2"))); assertThat(updatedDatafeed.getQueryDelay(), equalTo(TimeValue.timeValueSeconds(42))); assertThat(updatedDatafeed.getFrequency(), equalTo(TimeValue.timeValueSeconds(142))); - assertThat(updatedDatafeed.getParsedQuery(), equalTo(QueryBuilders.termQuery("a", "b"))); + assertThat(updatedDatafeed.getQuery(), + equalTo(Collections.singletonMap(TermQueryBuilder.NAME, Collections.singletonMap("a", "b")))); assertThat(updatedDatafeed.hasAggregations(), is(false)); assertThat(updatedDatafeed.getScriptFields(), equalTo(Collections.singletonList(new SearchSourceBuilder.ScriptField("a", mockScript("b"), false)))); @@ -177,16 +227,21 @@ public void testApply_givenAggregations() { DatafeedConfig datafeed = datafeedBuilder.build(); DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeed.getId()); - MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); - update.setAggregations(new AggregatorFactories.Builder().addAggregator( - AggregationBuilders.histogram("a").interval(300000).field("time").subAggregation(maxTime))); + Map maxTime = Collections.singletonMap("time", + Collections.singletonMap("max", Collections.singletonMap("field", "time"))); + Map histoDefinition = new HashMap<>(); + histoDefinition.put("interval", 300000); + histoDefinition.put("field", "time"); + Map aggBody = new HashMap<>(); + aggBody.put("histogram", histoDefinition); + aggBody.put("aggs", maxTime); + Map aggMap = Collections.singletonMap("a", aggBody); + update.setAggregations(aggMap); DatafeedConfig updatedDatafeed = update.build().apply(datafeed, Collections.emptyMap()); assertThat(updatedDatafeed.getIndices(), equalTo(Collections.singletonList("i_1"))); - assertThat(updatedDatafeed.getParsedAggregations(), - equalTo(new AggregatorFactories.Builder().addAggregator( - AggregationBuilders.histogram("a").interval(300000).field("time").subAggregation(maxTime)))); + assertThat(updatedDatafeed.getAggregations(), equalTo(aggMap)); } public void testApply_GivenRandomUpdates_AssertImmutability() { @@ -208,6 +263,22 @@ public void testApply_GivenRandomUpdates_AssertImmutability() { } } + public void testEmptyQueryMap() { + DatafeedUpdate.Builder builder = new DatafeedUpdate.Builder("empty_query_map"); + ElasticsearchStatusException ex = expectThrows(ElasticsearchStatusException.class, + () -> builder.setQuery(Collections.emptyMap())); + assertThat(ex.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(ex.getMessage(), equalTo("Datafeed [empty_query_map] query is not parsable")); + } + + public void testEmptyAggMap() { + DatafeedUpdate.Builder builder = new DatafeedUpdate.Builder("empty_agg_map"); + ElasticsearchStatusException ex = expectThrows(ElasticsearchStatusException.class, + () -> builder.setAggregations(Collections.emptyMap())); + assertThat(ex.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(ex.getMessage(), equalTo("Datafeed [empty_agg_map] aggregations are not parsable")); + } + @Override protected DatafeedUpdate mutateInstance(DatafeedUpdate instance) { DatafeedUpdate.Builder builder = new DatafeedUpdate.Builder(instance); @@ -243,22 +314,31 @@ protected DatafeedUpdate mutateInstance(DatafeedUpdate instance) { builder.setIndices(indices); break; case 5: - BoolQueryBuilder query = new BoolQueryBuilder(); + Map boolQuery = new HashMap<>(); if (instance.getQuery() != null) { - query.must(instance.getQuery()); + boolQuery.put("must", instance.getQuery()); } - query.filter(new TermQueryBuilder(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))); - builder.setQuery(query); + boolQuery.put("filter", + Collections.singletonList( + Collections.singletonMap(TermQueryBuilder.NAME, + Collections.singletonMap(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))))); + builder.setQuery(Collections.singletonMap("bool", boolQuery)); break; case 6: if (instance.hasAggregations()) { builder.setAggregations(null); } else { - AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); String timeField = randomAlphaOfLength(10); - aggBuilder.addAggregator(new DateHistogramAggregationBuilder(timeField).field(timeField).interval(between(10000, 3600000)) - .subAggregation(new MaxAggregationBuilder(timeField).field(timeField))); - builder.setAggregations(aggBuilder); + Map maxTime = Collections.singletonMap(timeField, + Collections.singletonMap("max", Collections.singletonMap("field", timeField))); + Map histoDefinition = new HashMap<>(); + histoDefinition.put("interval", between(10000, 3600000)); + histoDefinition.put("field", timeField); + Map aggBody = new HashMap<>(); + aggBody.put("aggs", maxTime); + aggBody.put("date_histogram", histoDefinition); + Map aggMap = Collections.singletonMap(timeField, aggBody); + builder.setAggregations(aggMap); if (instance.getScriptFields().isEmpty() == false) { builder.setScriptFields(Collections.emptyList()); } diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecksTests.java index 6d93ed1873184..bf868c86bae88 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecksTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecksTests.java @@ -22,7 +22,7 @@ protected boolean enableWarningsCheck() { public void testCheckDataFeedQuery() { DatafeedConfig.Builder goodDatafeed = new DatafeedConfig.Builder("good-df", "job-id"); goodDatafeed.setIndices(Collections.singletonList("some-index")); - goodDatafeed.setParsedQuery(new TermQueryBuilder("foo", "bar")); + goodDatafeed.setQuery(Collections.singletonMap(TermQueryBuilder.NAME, Collections.singletonMap("foo", "bar"))); assertNull(MlDeprecationChecks.checkDataFeedQuery(goodDatafeed.build())); DatafeedConfig.Builder deprecatedDatafeed = new DatafeedConfig.Builder("df-with-deprecated-query", "job-id"); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DelayedDataDetectorIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DelayedDataDetectorIT.java index ddebbe6038f19..aa25cb0619377 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DelayedDataDetectorIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DelayedDataDetectorIT.java @@ -159,7 +159,9 @@ public void testMissingDataDetectionWithAggregationsAndQuery() throws Exception .subAggregation(avgAggregationBuilder) .field("time") .interval(TimeValue.timeValueMinutes(5).millis()))); - datafeedConfigBuilder.setParsedQuery(new RangeQueryBuilder("value").gte(numDocs/2)); + datafeedConfigBuilder.setQuery(Collections.singletonMap(RangeQueryBuilder.NAME, + Collections.singletonMap("value", + Collections.singletonMap(RangeQueryBuilder.GTE_FIELD.getPreferredName(), numDocs/2)))); datafeedConfigBuilder.setFrequency(TimeValue.timeValueMinutes(5)); datafeedConfigBuilder.setDelayedDataCheckConfig(DelayedDataCheckConfig.enabledDelayedDataCheckConfig(TimeValue.timeValueHours(12))); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml index 742fc00beda74..5dda4f3def672 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml @@ -356,9 +356,9 @@ setup: datafeed_id: test-datafeed-aggs-1 - match: { datafeeds.0.datafeed_id: "test-datafeed-aggs-1" } - match: { datafeeds.0.aggregations.histogram_buckets.date_histogram.field: "@timestamp" } - - match: { datafeeds.0.aggregations.histogram_buckets.aggregations.@timestamp.max.field: "@timestamp" } - - match: { datafeeds.0.aggregations.histogram_buckets.aggregations.bytes_in_avg.avg.field: "system.network.in.bytes" } - - match: { datafeeds.0.aggregations.histogram_buckets.aggregations.non_negative_bytes.bucket_script.buckets_path.bytes: "bytes_in_derivative" } + - match: { datafeeds.0.aggregations.histogram_buckets.aggs.@timestamp.max.field: "@timestamp" } + - match: { datafeeds.0.aggregations.histogram_buckets.aggs.bytes_in_avg.avg.field: "system.network.in.bytes" } + - match: { datafeeds.0.aggregations.histogram_buckets.aggs.non_negative_bytes.bucket_script.buckets_path.bytes: "bytes_in_derivative" } --- "Test delete datafeed": From 92ef753b56d25215f19098cbce91553fcae74c64 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 19 Feb 2019 09:53:36 -0500 Subject: [PATCH 23/54] Allow retention lease operations under blocks (#39089) This commit allows manipulating retention leases under blocks. --- .../TransportResyncReplicationAction.java | 2 +- .../TransportReplicationAction.java | 2 +- .../replication/TransportWriteAction.java | 2 +- .../index/seqno/RetentionLeaseSyncAction.java | 10 +- .../TransportReplicationActionTests.java | 4 +- ...ReplicationAllPermitsAcquisitionTests.java | 2 +- ...tentionLeaseBackgroundSyncActionTests.java | 27 ++++ .../index/seqno/RetentionLeaseIT.java | 126 +++++++++++++++++- .../seqno/RetentionLeaseSyncActionTests.java | 27 ++++ 9 files changed, 193 insertions(+), 9 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java index bd996377c39c1..e9a6e7b48152d 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java @@ -110,7 +110,7 @@ protected ClusterBlockLevel globalBlockLevel() { } @Override - protected ClusterBlockLevel indexBlockLevel() { + public ClusterBlockLevel indexBlockLevel() { // resync should never be blocked because it's an internal action return null; } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index a8c187745ac4a..326f7bacdb8f6 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -222,7 +222,7 @@ protected ClusterBlockLevel globalBlockLevel() { * Index level block to check before request execution. Returning null means that no blocks need to be checked. */ @Nullable - protected ClusterBlockLevel indexBlockLevel() { + public ClusterBlockLevel indexBlockLevel() { return null; } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 279a616160000..f44694f55d960 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -245,7 +245,7 @@ protected ClusterBlockLevel globalBlockLevel() { } @Override - protected ClusterBlockLevel indexBlockLevel() { + public ClusterBlockLevel indexBlockLevel() { return ClusterBlockLevel.WRITE; } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java index 760271e53ee1e..4cd11de4574a0 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; @@ -127,7 +128,7 @@ protected WritePrimaryResult shardOperationOnPrimary( Objects.requireNonNull(request); Objects.requireNonNull(primary); primary.persistRetentionLeases(); - return new WritePrimaryResult<>(request, new Response(), null, null, primary, logger); + return new WritePrimaryResult<>(request, new Response(), null, null, primary, getLogger()); } @Override @@ -138,7 +139,12 @@ protected WriteReplicaResult shardOperationOnReplica( Objects.requireNonNull(replica); replica.updateRetentionLeasesOnReplica(request.getRetentionLeases()); replica.persistRetentionLeases(); - return new WriteReplicaResult<>(request, null, null, replica, logger); + return new WriteReplicaResult<>(request, null, null, replica, getLogger()); + } + + @Override + public ClusterBlockLevel indexBlockLevel() { + return null; } public static final class Request extends ReplicatedWriteRequest { diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 41a300c28f3a9..110ab9bcb99a2 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -221,7 +221,7 @@ protected ClusterBlockLevel globalBlockLevel() { } @Override - protected ClusterBlockLevel indexBlockLevel() { + public ClusterBlockLevel indexBlockLevel() { return globalBlock == false ? ClusterBlockLevel.WRITE : null; } }; @@ -305,7 +305,7 @@ protected ClusterBlockLevel globalBlockLevel() { } @Override - protected ClusterBlockLevel indexBlockLevel() { + public ClusterBlockLevel indexBlockLevel() { return globalBlock == false ? ClusterBlockLevel.WRITE : null; } }; diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java index 8cad76bcdfe5e..1cb1bfde34ea8 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java @@ -459,7 +459,7 @@ protected ClusterBlockLevel globalBlockLevel() { } @Override - protected ClusterBlockLevel indexBlockLevel() { + public ClusterBlockLevel indexBlockLevel() { return globalBlock == false ? ClusterBlockLevel.WRITE : super.indexBlockLevel(); } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java index 4567f3e382337..6ad7d5039ae8b 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java @@ -228,4 +228,31 @@ protected Logger getLogger() { assertTrue(invoked.get()); } + public void testBlocks() { + final IndicesService indicesService = mock(IndicesService.class); + + final Index index = new Index("index", "uuid"); + final IndexService indexService = mock(IndexService.class); + when(indicesService.indexServiceSafe(index)).thenReturn(indexService); + + final int id = randomIntBetween(0, 4); + final IndexShard indexShard = mock(IndexShard.class); + when(indexService.getShard(id)).thenReturn(indexShard); + + final ShardId shardId = new ShardId(index, id); + when(indexShard.shardId()).thenReturn(shardId); + + final RetentionLeaseBackgroundSyncAction action = new RetentionLeaseBackgroundSyncAction( + Settings.EMPTY, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + new ActionFilters(Collections.emptySet()), + new IndexNameExpressionResolver()); + + assertNull(action.indexBlockLevel()); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java index a05d383eee080..ee6cab9a6872b 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java @@ -44,6 +44,10 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; +import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -266,7 +270,7 @@ public void testBackgroundRetentionLeaseSync() throws Exception { final Settings settings = Settings.builder() .put("index.number_of_shards", 1) .put("index.number_of_replicas", numberOfReplicas) - .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(1)) .build(); createIndex("index", settings); ensureGreen("index"); @@ -370,4 +374,124 @@ public void testRetentionLeasesSyncOnRecovery() throws Exception { } } + public void testCanAddRetentionLeaseUnderBlock() throws InterruptedException { + final String idForInitialRetentionLease = randomAlphaOfLength(8); + runUnderBlockTest( + idForInitialRetentionLease, + randomLongBetween(0, Long.MAX_VALUE), + (primary, listener) -> { + final String nextId = randomValueOtherThan(idForInitialRetentionLease, () -> randomAlphaOfLength(8)); + final long nextRetainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); + final String nextSource = randomAlphaOfLength(8); + primary.addRetentionLease(nextId, nextRetainingSequenceNumber, nextSource, listener); + }, + primary -> {}); + } + + public void testCanRenewRetentionLeaseUnderBlock() throws InterruptedException { + final String idForInitialRetentionLease = randomAlphaOfLength(8); + final long initialRetainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); + final AtomicReference retentionLease = new AtomicReference<>(); + runUnderBlockTest( + idForInitialRetentionLease, + initialRetainingSequenceNumber, + (primary, listener) -> { + final long nextRetainingSequenceNumber = randomLongBetween(initialRetainingSequenceNumber, Long.MAX_VALUE); + final String nextSource = randomAlphaOfLength(8); + retentionLease.set(primary.renewRetentionLease(idForInitialRetentionLease, nextRetainingSequenceNumber, nextSource)); + listener.onResponse(new ReplicationResponse()); + }, + primary -> { + try { + /* + * If the background renew was able to execute, then the retention leases were persisted to disk. There is no other + * way for the current retention leases to end up written to disk so we assume that if they are written to disk, it + * implies that the background sync was able to execute under a block. + */ + assertBusy(() -> assertThat(primary.loadRetentionLeases().leases(), contains(retentionLease.get()))); + } catch (final Exception e) { + fail(e.toString()); + } + }); + + } + + public void testCanRemoveRetentionLeasesUnderBlock() throws InterruptedException { + final String idForInitialRetentionLease = randomAlphaOfLength(8); + runUnderBlockTest( + idForInitialRetentionLease, + randomLongBetween(0, Long.MAX_VALUE), + (primary, listener) -> primary.removeRetentionLease(idForInitialRetentionLease, listener), + indexShard -> {}); + } + + private void runUnderBlockTest( + final String idForInitialRetentionLease, + final long initialRetainingSequenceNumber, + final BiConsumer> indexShard, + final Consumer afterSync) throws InterruptedException { + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .build(); + assertAcked(prepareCreate("index").setSettings(settings)); + ensureGreen("index"); + + final String primaryShardNodeId = clusterService().state().routingTable().index("index").shard(0).primaryShard().currentNodeId(); + final String primaryShardNodeName = clusterService().state().nodes().get(primaryShardNodeId).getName(); + final IndexShard primary = internalCluster() + .getInstance(IndicesService.class, primaryShardNodeName) + .getShardOrNull(new ShardId(resolveIndex("index"), 0)); + + final String id = idForInitialRetentionLease; + final long retainingSequenceNumber = initialRetainingSequenceNumber; + final String source = randomAlphaOfLength(8); + final CountDownLatch latch = new CountDownLatch(1); + final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); + primary.addRetentionLease(id, retainingSequenceNumber, source, listener); + latch.await(); + + final String block = randomFrom("read_only", "read_only_allow_delete", "read", "write", "metadata"); + + client() + .admin() + .indices() + .prepareUpdateSettings("index") + .setSettings(Settings.builder().put("index.blocks." + block, true).build()) + .get(); + + try { + final CountDownLatch actionLatch = new CountDownLatch(1); + final AtomicBoolean success = new AtomicBoolean(); + + indexShard.accept( + primary, + new ActionListener() { + + @Override + public void onResponse(final ReplicationResponse replicationResponse) { + success.set(true); + actionLatch.countDown(); + } + + @Override + public void onFailure(final Exception e) { + fail(e.toString()); + } + + }); + actionLatch.await(); + assertTrue(success.get()); + afterSync.accept(primary); + } finally { + client() + .admin() + .indices() + .prepareUpdateSettings("index") + .setSettings(Settings.builder().putNull("index.blocks." + block).build()) + .get(); + } + } + } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java index 80baa23a4d7ac..9b9ad6a0962c1 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -228,4 +228,31 @@ protected Logger getLogger() { assertTrue(invoked.get()); } + public void testBlocks() { + final IndicesService indicesService = mock(IndicesService.class); + + final Index index = new Index("index", "uuid"); + final IndexService indexService = mock(IndexService.class); + when(indicesService.indexServiceSafe(index)).thenReturn(indexService); + + final int id = randomIntBetween(0, 4); + final IndexShard indexShard = mock(IndexShard.class); + when(indexService.getShard(id)).thenReturn(indexShard); + + final ShardId shardId = new ShardId(index, id); + when(indexShard.shardId()).thenReturn(shardId); + + final RetentionLeaseSyncAction action = new RetentionLeaseSyncAction( + Settings.EMPTY, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + new ActionFilters(Collections.emptySet()), + new IndexNameExpressionResolver()); + + assertNull(action.indexBlockLevel()); + } + } From ed98ee087becc32ca579857fc6744eb02ddb6436 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Tue, 19 Feb 2019 15:11:10 +0000 Subject: [PATCH 24/54] [ML] Stop the ML memory tracker before closing node (#39111) The ML memory tracker does searches against ML results and config indices. These searches can be asynchronous, and if they are running while the node is closing then they can cause problems for other components. This change adds a stop() method to the MlMemoryTracker that waits for in-flight searches to complete. Once stop() has returned the MlMemoryTracker will not kick off any new searches. The MlLifeCycleService now calls MlMemoryTracker.stop() before stopping stopping the node. Fixes #37117 --- .../xpack/ml/MachineLearning.java | 4 +- .../xpack/ml/MlLifeCycleService.java | 12 +++-- .../xpack/ml/process/MlMemoryTracker.java | 50 ++++++++++++++++--- .../ml/process/MlMemoryTrackerTests.java | 15 ++++++ 4 files changed, 68 insertions(+), 13 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index e5376bccb1745..6b8e9e44f5996 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -435,10 +435,10 @@ public Collection createComponents(Client client, ClusterService cluster DatafeedManager datafeedManager = new DatafeedManager(threadPool, client, clusterService, datafeedJobBuilder, System::currentTimeMillis, auditor, autodetectProcessManager); this.datafeedManager.set(datafeedManager); - MlLifeCycleService mlLifeCycleService = new MlLifeCycleService(environment, clusterService, datafeedManager, - autodetectProcessManager); MlMemoryTracker memoryTracker = new MlMemoryTracker(settings, clusterService, threadPool, jobManager, jobResultsProvider); this.memoryTracker.set(memoryTracker); + MlLifeCycleService mlLifeCycleService = new MlLifeCycleService(environment, clusterService, datafeedManager, + autodetectProcessManager, memoryTracker); // This object's constructor attaches to the license state, so there's no need to retain another reference to it new InvalidLicenseEnforcer(getLicenseState(), threadPool, datafeedManager, autodetectProcessManager); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java index 8005912107ad9..06d9b749e1a89 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.ml.datafeed.DatafeedManager; +import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import org.elasticsearch.xpack.ml.process.NativeController; import org.elasticsearch.xpack.ml.process.NativeControllerHolder; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; @@ -20,16 +21,14 @@ public class MlLifeCycleService { private final Environment environment; private final DatafeedManager datafeedManager; private final AutodetectProcessManager autodetectProcessManager; - - public MlLifeCycleService(Environment environment, ClusterService clusterService) { - this(environment, clusterService, null, null); - } + private final MlMemoryTracker memoryTracker; public MlLifeCycleService(Environment environment, ClusterService clusterService, DatafeedManager datafeedManager, - AutodetectProcessManager autodetectProcessManager) { + AutodetectProcessManager autodetectProcessManager, MlMemoryTracker memoryTracker) { this.environment = environment; this.datafeedManager = datafeedManager; this.autodetectProcessManager = autodetectProcessManager; + this.memoryTracker = memoryTracker; clusterService.addLifecycleListener(new LifecycleListener() { @Override public void beforeStop() { @@ -59,5 +58,8 @@ public synchronized void stop() { } catch (IOException e) { // We're stopping anyway, so don't let this complicate the shutdown sequence } + if (memoryTracker != null) { + memoryTracker.stop(); + } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java index 441317bcbe207..50d2515046a22 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java @@ -32,6 +32,7 @@ import java.util.Iterator; import java.util.List; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Phaser; import java.util.stream.Collectors; /** @@ -55,6 +56,7 @@ public class MlMemoryTracker implements LocalNodeMasterListener { private final ClusterService clusterService; private final JobManager jobManager; private final JobResultsProvider jobResultsProvider; + private final Phaser stopPhaser; private volatile boolean isMaster; private volatile Instant lastUpdateTime; private volatile Duration reassignmentRecheckInterval; @@ -65,6 +67,7 @@ public MlMemoryTracker(Settings settings, ClusterService clusterService, ThreadP this.clusterService = clusterService; this.jobManager = jobManager; this.jobResultsProvider = jobResultsProvider; + this.stopPhaser = new Phaser(1); setReassignmentRecheckInterval(PersistentTasksClusterService.CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING.get(settings)); clusterService.addLocalNodeMasterListener(this); clusterService.getClusterSettings().addSettingsUpdateConsumer( @@ -89,6 +92,23 @@ public void offMaster() { lastUpdateTime = null; } + /** + * Wait for all outstanding searches to complete. + * After returning, no new searches can be started. + */ + public void stop() { + logger.trace("ML memory tracker stop called"); + // We never terminate the phaser + assert stopPhaser.isTerminated() == false; + // If there are no registered parties or no unarrived parties then there is a flaw + // in the register/arrive/unregister logic in another method that uses the phaser + assert stopPhaser.getRegisteredParties() > 0; + assert stopPhaser.getUnarrivedParties() > 0; + stopPhaser.arriveAndAwaitAdvance(); + assert stopPhaser.getPhase() > 0; + logger.debug("ML memory tracker stopped"); + } + @Override public String executorName() { return MachineLearning.UTILITY_THREAD_POOL_NAME; @@ -146,13 +166,13 @@ public boolean asyncRefresh() { try { ActionListener listener = ActionListener.wrap( aVoid -> logger.trace("Job memory requirement refresh request completed successfully"), - e -> logger.error("Failed to refresh job memory requirements", e) + e -> logger.warn("Failed to refresh job memory requirements", e) ); threadPool.executor(executorName()).execute( () -> refresh(clusterService.state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE), listener)); return true; } catch (EsRejectedExecutionException e) { - logger.debug("Couldn't schedule ML memory update - node might be shutting down", e); + logger.warn("Couldn't schedule ML memory update - node might be shutting down", e); } } @@ -246,25 +266,43 @@ public void refreshJobMemory(String jobId, ActionListener listener) { return; } + // The phaser prevents searches being started after the memory tracker's stop() method has returned + if (stopPhaser.register() != 0) { + // Phases above 0 mean we've been stopped, so don't do any operations that involve external interaction + stopPhaser.arriveAndDeregister(); + listener.onFailure(new EsRejectedExecutionException("Couldn't run ML memory update - node is shutting down")); + return; + } + ActionListener phaserListener = ActionListener.wrap( + r -> { + stopPhaser.arriveAndDeregister(); + listener.onResponse(r); + }, + e -> { + stopPhaser.arriveAndDeregister(); + listener.onFailure(e); + } + ); + try { jobResultsProvider.getEstablishedMemoryUsage(jobId, null, null, establishedModelMemoryBytes -> { if (establishedModelMemoryBytes <= 0L) { - setJobMemoryToLimit(jobId, listener); + setJobMemoryToLimit(jobId, phaserListener); } else { Long memoryRequirementBytes = establishedModelMemoryBytes + Job.PROCESS_MEMORY_OVERHEAD.getBytes(); memoryRequirementByJob.put(jobId, memoryRequirementBytes); - listener.onResponse(memoryRequirementBytes); + phaserListener.onResponse(memoryRequirementBytes); } }, e -> { logger.error("[" + jobId + "] failed to calculate job established model memory requirement", e); - setJobMemoryToLimit(jobId, listener); + setJobMemoryToLimit(jobId, phaserListener); } ); } catch (Exception e) { logger.error("[" + jobId + "] failed to calculate job established model memory requirement", e); - setJobMemoryToLimit(jobId, listener); + setJobMemoryToLimit(jobId, phaserListener); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java index 3e54994ac043b..1dd2ba923ef00 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.persistent.PersistentTasksClusterService; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.test.ESTestCase; @@ -29,6 +30,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import static org.hamcrest.CoreMatchers.instanceOf; import static org.mockito.Matchers.any; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.anyString; @@ -157,6 +159,19 @@ public void testRefreshOne() { assertNull(memoryTracker.getJobMemoryRequirement(jobId)); } + public void testStop() { + + memoryTracker.onMaster(); + memoryTracker.stop(); + + AtomicReference exception = new AtomicReference<>(); + memoryTracker.refreshJobMemory("job", ActionListener.wrap(ESTestCase::assertNull, exception::set)); + + assertNotNull(exception.get()); + assertThat(exception.get(), instanceOf(EsRejectedExecutionException.class)); + assertEquals("Couldn't run ML memory update - node is shutting down", exception.get().getMessage()); + } + private PersistentTasksCustomMetaData.PersistentTask makeTestTask(String jobId) { return new PersistentTasksCustomMetaData.PersistentTask<>("job-" + jobId, MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams(jobId), 0, PersistentTasksCustomMetaData.INITIAL_ASSIGNMENT); From 97971380bd906383b5e453d1a802609119223167 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Tue, 19 Feb 2019 17:43:24 +0200 Subject: [PATCH 25/54] Remove BCryptTests (#39098) This test was added to verify that we fixed a specific behavior in Bcrypt and hasn't been running for almost 4 years now. --- .../security/authc/support/BCryptTests.java | 68 ------------------- 1 file changed, 68 deletions(-) delete mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/BCryptTests.java diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/BCryptTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/BCryptTests.java deleted file mode 100644 index 40c4a8ed81646..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/BCryptTests.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.authc.support; - -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.security.authc.support.BCrypt; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.hamcrest.Matchers.is; - -/** - * Tests for the Bcrypt implementation specifically around modifications we have made - */ -public class BCryptTests extends ESTestCase { - /* - * This test checks that the BCrypt implementation can verify passwords correctly when being invoked from multiple - * threads all the time. This attempts to simulate authentication of many clients at once (without a cache). - * - * This test can be used to reproduce the issue in https://github.com/elastic/x-plugins/issues/589, but it is not - * 100% reliable unless memory parameters are changed such as lowering the heap size to something really small like - * 16M and the test is really slow since the issue depends on garbage collection and object finalization. - */ - @AwaitsFix(bugUrl = "need a better way to test this") - public void testUnderLoad() throws Exception { - final String password = randomAlphaOfLengthBetween(10, 32); - final String bcrypt = BCrypt.hashpw(new SecureString(password), BCrypt.gensalt()); - - ExecutorService threadPoolExecutor = Executors.newFixedThreadPool(100); - try { - List> callables = new ArrayList<>(100); - - final AtomicBoolean failed = new AtomicBoolean(false); - for (int i = 0; i < 100; i++) { - callables.add(new Callable() { - @Override - public Boolean call() throws Exception { - for (int i = 0; i < 10000 && !failed.get(); i++) { - if (BCrypt.checkpw(new SecureString(password), bcrypt) == false) { - failed.set(true); - return false; - } - } - return true; - } - }); - } - - List> futures = threadPoolExecutor.invokeAll(callables); - for (Future future : futures) { - assertThat(future.get(), is(true)); - } - } finally { - threadPoolExecutor.shutdownNow(); - } - - } -} From 040922831f611a590f60a84de1f0aafcd3c5c0c8 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Tue, 19 Feb 2019 19:31:25 +0200 Subject: [PATCH 26/54] Disable date parsing test in non english locale (#39052) This ensures we do not attempt to parse non english locale dates in FIPS mode. The error, originally assumed to affect only Joda, affects Java time in the same manner and manifests only with the version of BouncyCastle FIPS certified provider we use in tests. The upstream issue https://github.com/bcgit/bc-java/issues/405 indicates that the behavior is resolved in later versions of the BouncyCastle library and should be tested again when the new versions become FIPS 140 certified --- .../ingest/common/DateProcessorTests.java | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java index 7582056e0b6b6..c390f14029c39 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java @@ -126,7 +126,6 @@ public void testInvalidJavaPattern() { } public void testJavaPatternLocale() { - // @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/31724") assumeFalse("Can't run in a FIPS JVM, Joda parse date error", inFipsJvm()); DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10), templatize(ZoneId.of("Europe/Amsterdam")), templatize(Locale.ITALIAN), @@ -138,6 +137,18 @@ public void testJavaPatternLocale() { assertThat(ingestDocument.getFieldValue("date_as_date", String.class), equalTo("2010-06-12T00:00:00.000+02:00")); } + public void testJavaPatternEnglishLocale() { + // Since testJavaPatternLocale is muted in FIPS mode, test that we can correctly parse dates in english + DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10), + templatize(ZoneId.of("Europe/Amsterdam")), templatize(Locale.ENGLISH), + "date_as_string", Collections.singletonList("yyyy dd MMMM"), "date_as_date"); + Map document = new HashMap<>(); + document.put("date_as_string", "2010 12 June"); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + dateProcessor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue("date_as_date", String.class), equalTo("2010-06-12T00:00:00.000+02:00")); + } + public void testJavaPatternDefaultYear() { String format = randomFrom("dd/MM", "8dd/MM"); DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10), From 97efec36e173b0ab3a93f19f203b7655f15e7d29 Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Tue, 19 Feb 2019 09:37:02 -0800 Subject: [PATCH 27/54] fix RethrottleTests retry (#38978) the RethrottleTests assumed that tasks that were unprepared to rethrottle would bubble up into the Rethrottle response as an ElasticsearchException wrapping an IllegalArgumentException. This seems to have changed to potentially involve further levels of wrapping. This change makes the retry logic more resilient to arbitrary nesting of the underlying IllegalArgumentException --- .../index/reindex/RethrottleTests.java | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java index 6572313308b32..58067cd2cdbbf 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; @@ -37,6 +38,7 @@ import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; @@ -191,13 +193,15 @@ private ListTasksResponse rethrottleTask(TaskId taskToRethrottle, float newReque assertThat(rethrottleResponse.getTasks(), hasSize(1)); response.set(rethrottleResponse); } catch (ElasticsearchException e) { - if (e.getCause() instanceof IllegalArgumentException) { - // We want to retry in this case so we throw an assertion error - logger.info("caught unprepared task, retrying until prepared"); - throw new AssertionError("Rethrottle request for task [" + taskToRethrottle.getId() + "] failed", e); - } else { + Throwable unwrapped = ExceptionsHelper.unwrap(e, IllegalArgumentException.class); + if (unwrapped == null) { throw e; } + // We want to retry in this case so we throw an assertion error + assertThat(unwrapped.getMessage(), equalTo("task [" + taskToRethrottle.getId() + + "] has not yet been initialized to the point where it knows how to rethrottle itself")); + logger.info("caught unprepared task, retrying until prepared"); + throw new AssertionError("Rethrottle request for task [" + taskToRethrottle.getId() + "] failed", e); } }); From 106f59012de8f3b4932b53719f754612269fb04b Mon Sep 17 00:00:00 2001 From: Like Date: Wed, 20 Feb 2019 01:44:07 +0800 Subject: [PATCH 28/54] Migrate Streamable to Writeable for cluster block package (#37391) --- ...TransportVerifyShardBeforeCloseAction.java | 2 +- .../cluster/block/ClusterBlock.java | 46 +++++++++---------- .../cluster/block/ClusterBlockException.java | 2 +- .../cluster/block/ClusterBlocks.java | 2 +- .../cluster/block/ClusterBlockTests.java | 6 +-- 5 files changed, 27 insertions(+), 31 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java index 9ae7d065dd949..be757a02c3982 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java @@ -158,7 +158,7 @@ public String toString() { @Override public void readFrom(final StreamInput in) throws IOException { super.readFrom(in); - clusterBlock = ClusterBlock.readClusterBlock(in); + clusterBlock = new ClusterBlock(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java index 497e296d9eeb6..ec5d7b178e8de 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.RestStatus; @@ -34,7 +35,7 @@ import java.util.Locale; import java.util.Objects; -public class ClusterBlock implements Streamable, ToXContentFragment { +public class ClusterBlock implements Streamable, Writeable, ToXContentFragment { private int id; private @Nullable String uuid; @@ -45,7 +46,24 @@ public class ClusterBlock implements Streamable, ToXContentFragment { private boolean allowReleaseResources; private RestStatus status; - private ClusterBlock() { + public ClusterBlock(StreamInput in) throws IOException { + id = in.readVInt(); + if (in.getVersion().onOrAfter(Version.V_6_7_0)) { + uuid = in.readOptionalString(); + } else { + uuid = null; + } + description = in.readString(); + final int len = in.readVInt(); + ArrayList levels = new ArrayList<>(len); + for (int i = 0; i < len; i++) { + levels.add(in.readEnum(ClusterBlockLevel.class)); + } + this.levels = EnumSet.copyOf(levels); + retryable = in.readBoolean(); + disableStatePersistence = in.readBoolean(); + status = RestStatus.readFrom(in); + allowReleaseResources = in.readBoolean(); } public ClusterBlock(int id, String description, boolean retryable, boolean disableStatePersistence, @@ -129,31 +147,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static ClusterBlock readClusterBlock(StreamInput in) throws IOException { - ClusterBlock block = new ClusterBlock(); - block.readFrom(in); - return block; - } - @Override public void readFrom(StreamInput in) throws IOException { - id = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_6_7_0)) { - uuid = in.readOptionalString(); - } else { - uuid = null; - } - description = in.readString(); - final int len = in.readVInt(); - ArrayList levels = new ArrayList<>(len); - for (int i = 0; i < len; i++) { - levels.add(in.readEnum(ClusterBlockLevel.class)); - } - this.levels = EnumSet.copyOf(levels); - retryable = in.readBoolean(); - disableStatePersistence = in.readBoolean(); - status = RestStatus.readFrom(in); - allowReleaseResources = in.readBoolean(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java index 9ebb2286895fd..b24ec795bc474 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java @@ -43,7 +43,7 @@ public ClusterBlockException(StreamInput in) throws IOException { int totalBlocks = in.readVInt(); Set blocks = new HashSet<>(totalBlocks); for (int i = 0; i < totalBlocks;i++) { - blocks.add(ClusterBlock.readClusterBlock(in)); + blocks.add(new ClusterBlock(in)); } this.blocks = unmodifiableSet(blocks); } diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java index 0de7bce115943..c46bc291e7397 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java @@ -305,7 +305,7 @@ private static Set readBlockSet(StreamInput in) throws IOException int totalBlocks = in.readVInt(); Set blocks = new HashSet<>(totalBlocks); for (int i = 0; i < totalBlocks;i++) { - blocks.add(ClusterBlock.readClusterBlock(in)); + blocks.add(new ClusterBlock(in)); } return unmodifiableSet(blocks); } diff --git a/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java b/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java index 3fdb97b8c3558..4bd6c15853aa0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java @@ -56,7 +56,7 @@ public void testSerialization() throws Exception { StreamInput in = out.bytes().streamInput(); in.setVersion(version); - ClusterBlock result = ClusterBlock.readClusterBlock(in); + ClusterBlock result = new ClusterBlock(in); assertClusterBlockEquals(clusterBlock, result); } @@ -74,7 +74,7 @@ public void testBwcSerialization() throws Exception { expected.writeTo(out); // Deserialize and check the cluster block - final ClusterBlock actual = ClusterBlock.readClusterBlock(out.bytes().streamInput()); + final ClusterBlock actual = new ClusterBlock(out.bytes().streamInput()); assertClusterBlockEquals(expected, actual); } @@ -90,7 +90,7 @@ public void testBwcSerialization() throws Exception { // Deserialize and check the cluster block final StreamInput in = out.bytes().streamInput(); in.setVersion(out.getVersion()); - final ClusterBlock actual = ClusterBlock.readClusterBlock(in); + final ClusterBlock actual = new ClusterBlock(in); assertThat(actual.id(), equalTo(expected.id())); assertThat(actual.status(), equalTo(expected.status())); From 8864c9fcd21f664a783be43799ce7cb4dbff1bce Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 19 Feb 2019 17:52:25 +0000 Subject: [PATCH 29/54] Add some missing toString() implementations (#39124) Sometimes we turn objects into strings for logging or debugging using `toString()`, but the default implementation is often unhelpful. This change improves on this in two places I ran into recently. --- .../elasticsearch/action/ActionListenerResponseHandler.java | 5 +++++ .../action/support/replication/ReplicationOperation.java | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java b/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java index 19a0618e1c5a4..0966a9f1034a8 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java +++ b/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java @@ -68,4 +68,9 @@ public String executor() { public Response read(StreamInput in) throws IOException { return reader.read(in); } + + @Override + public String toString() { + return super.toString() + "/" + listener; + } } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 0da39a593a2c1..f001d9a29e29d 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -196,6 +196,11 @@ public void onFailure(Exception replicaException) { replicaException, ReplicationOperation.this::decPendingAndFinishIfNeeded, ReplicationOperation.this::onPrimaryDemoted, throwable -> decPendingAndFinishIfNeeded()); } + + @Override + public String toString() { + return "[" + replicaRequest + "][" + shard + "]"; + } }); } From 237d7559bbd12d409866c709777fb2d6bdffba93 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Tue, 19 Feb 2019 10:54:15 -0700 Subject: [PATCH 30/54] Document 'max_size' parameter as shard size for rollover (#38750) It was not clear that this is *primary* shard size, not the entire shard size. Resolves #37981 --- docs/reference/ilm/policy-definitions.asciidoc | 2 +- docs/reference/ilm/using-policies-rollover.asciidoc | 4 ++-- docs/reference/indices/rollover-index.asciidoc | 9 +++++++++ 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/docs/reference/ilm/policy-definitions.asciidoc b/docs/reference/ilm/policy-definitions.asciidoc index e16b414504a64..945f80babad1b 100644 --- a/docs/reference/ilm/policy-definitions.asciidoc +++ b/docs/reference/ilm/policy-definitions.asciidoc @@ -390,7 +390,7 @@ existing index meets one of the rollover conditions. [options="header"] |====== | Name | Required | Default | Description -| `max_size` | no | - | max index storage size. +| `max_size` | no | - | max primary shard index storage size. See <> for formatting | `max_docs` | no | - | max number of documents an diff --git a/docs/reference/ilm/using-policies-rollover.asciidoc b/docs/reference/ilm/using-policies-rollover.asciidoc index dbabbd3333635..dfb89eb26d121 100644 --- a/docs/reference/ilm/using-policies-rollover.asciidoc +++ b/docs/reference/ilm/using-policies-rollover.asciidoc @@ -31,8 +31,8 @@ The rollover action takes the following parameters: .`rollover` Action Parameters |=== |Name |Description -|max_size |The maximum estimated size the index is allowed to grow -to. Defaults to `null`. Optional. +|max_size |The maximum estimated size the primary shard of the index is allowed +to grow to. Defaults to `null`. Optional. |max_docs |The maximum number of document the index should contain. Defaults to `null`. Optional. |max_age |The maximum age of the index. Defaults to `null`. Optional. diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc index 1730c95e0dd24..941942a171e30 100644 --- a/docs/reference/indices/rollover-index.asciidoc +++ b/docs/reference/indices/rollover-index.asciidoc @@ -18,6 +18,15 @@ from the original (rolled-over) index. In this scenario, the write index will have its rollover alias' `is_write_index` set to `false`, while the newly created index will now have the rollover alias pointing to it as the write index with `is_write_index` as `true`. +The available conditions are: + +.`conditions` parameters +|=== +| Name | Description +| max_age | The maximum age of the index +| max_docs | The maximum number of documents the index should contain. This does not add documents multiple times for replicas +| max_size | The maximum estimated size of the primary shard of the index +|=== [source,js] -------------------------------------------------- From a5cbef9d1bc5b3ec24398c7dd793179f32951b64 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Tue, 19 Feb 2019 11:32:21 -0700 Subject: [PATCH 31/54] Rebuild remote connections on profile changes (#37678) Currently remote compression and ping schedule settings are dynamic. However, we do not listen for changes. This commit adds listeners for changes to those two settings. Additionally, when those settings change we now close existing connections and open new ones with the settings applied. Fixes #37201. --- .../modules/remote-clusters.asciidoc | 74 ++++++++++---- .../transport/ConnectionManager.java | 51 ++++------ .../transport/RemoteClusterAware.java | 30 ++++-- .../transport/RemoteClusterConnection.java | 34 +++---- .../transport/RemoteClusterService.java | 97 ++++++++++++++++--- .../transport/TransportService.java | 2 +- .../discovery/PeerFinderTests.java | 2 +- .../transport/ConnectionManagerTests.java | 2 +- .../RemoteClusterConnectionTests.java | 43 ++++---- .../transport/RemoteClusterServiceTests.java | 60 +++++++++--- .../test/transport/MockTransport.java | 2 +- .../test/transport/MockTransportService.java | 2 +- .../transport/StubbableConnectionManager.java | 2 +- .../xpack/ccr/CcrRepositoryManager.java | 4 +- .../xpack/ccr/IndexFollowingIT.java | 66 +++++++++++++ .../authz/IndicesAndAliasesResolver.java | 4 +- 16 files changed, 342 insertions(+), 133 deletions(-) diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 768eb7d6117bf..aefa750a92eb0 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -27,21 +27,16 @@ more _gateway nodes_ and uses them to federate requests to the remote cluster. [float] [[configuring-remote-clusters]] -=== Configuring Remote Clusters +=== Configuring remote clusters -Remote clusters can be specified globally using -<> (which can be updated dynamically), -or local to individual nodes using the `elasticsearch.yml` file. +You can configure remote clusters globally by using +<>, which you can update dynamically. +Alternatively, you can configure them locally on individual nodes by using the `elasticsearch.yml` file. -If a remote cluster is configured via `elasticsearch.yml` only the nodes with -that configuration will be able to connect to the remote cluster. In other -words, functionality that relies on remote cluster requests will have to be -driven specifically from those nodes. Remote clusters set via the -<> will be available on every node -in the cluster. - -The `elasticsearch.yml` config file for a node that connects to remote clusters -needs to list the remote clusters that should be connected to, for instance: +If you specify the settings in `elasticsearch.yml` files, only the nodes with +those settings can connect to the remote cluster. In other words, functionality +that relies on remote cluster requests must be driven specifically from those +nodes. For example: [source,yaml] -------------------------------- @@ -49,17 +44,22 @@ cluster: remote: cluster_one: <1> seeds: 127.0.0.1:9300 - cluster_two: <1> + transport.ping_schedule: 30s <2> + cluster_two: seeds: 127.0.0.1:9301 + transport.compress: true <3> -------------------------------- <1> `cluster_one` and `cluster_two` are arbitrary _cluster aliases_ representing the connection to each cluster. These names are subsequently used to distinguish between local and remote indices. +<2> A keep-alive ping is configured for `cluster_one`. +<3> Compression is explicitly enabled for requests to `cluster_two`. + +For more information about the optional transport settings, see +<>. -The equivalent example using the <> to add remote clusters to all nodes in the cluster would look like the -following: +If you use <>, the remote clusters are available on every node in the cluster. For example: [source,js] -------------------------------- @@ -71,12 +71,14 @@ PUT _cluster/settings "cluster_one": { "seeds": [ "127.0.0.1:9300" - ] + ], + "transport.ping_schedule": "30s" }, "cluster_two": { "seeds": [ "127.0.0.1:9301" - ] + ], + "transport.compress": true }, "cluster_three": { "seeds": [ @@ -92,6 +94,40 @@ PUT _cluster/settings // TEST[setup:host] // TEST[s/127.0.0.1:9300/\${transport_host}/] +You can dynamically update the compression and ping schedule settings. However, +you must re-include seeds in the settings update request. For example: + +[source,js] +-------------------------------- +PUT _cluster/settings +{ + "persistent": { + "cluster": { + "remote": { + "cluster_one": { + "seeds": [ + "127.0.0.1:9300" + ], + "transport.ping_schedule": "60s" + }, + "cluster_two": { + "seeds": [ + "127.0.0.1:9301" + ], + "transport.compress": false + } + } + } + } +} +-------------------------------- +// CONSOLE +// TEST[continued] + +NOTE: When the compression or ping schedule settings change, all the existing +node connections must close and re-open, which can cause in-flight requests to +fail. + A remote cluster can be deleted from the cluster settings by setting its seeds to `null`: diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java index f1067a0c5575f..da86ed076e396 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; import java.io.IOException; @@ -38,8 +37,6 @@ import java.util.Map; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -56,19 +53,17 @@ public class ConnectionManager implements Closeable { private final ConcurrentMap connectedNodes = ConcurrentCollections.newConcurrentMap(); private final KeyedLock connectionLock = new KeyedLock<>(); private final Transport transport; - private final ThreadPool threadPool; private final ConnectionProfile defaultProfile; private final AtomicBoolean isClosed = new AtomicBoolean(false); private final ReadWriteLock closeLock = new ReentrantReadWriteLock(); private final DelegatingNodeConnectionListener connectionListener = new DelegatingNodeConnectionListener(); - public ConnectionManager(Settings settings, Transport transport, ThreadPool threadPool) { - this(ConnectionProfile.buildDefaultConnectionProfile(settings), transport, threadPool); + public ConnectionManager(Settings settings, Transport transport) { + this(ConnectionProfile.buildDefaultConnectionProfile(settings), transport); } - public ConnectionManager(ConnectionProfile connectionProfile, Transport transport, ThreadPool threadPool) { + public ConnectionManager(ConnectionProfile connectionProfile, Transport transport) { this.transport = transport; - this.threadPool = threadPool; this.defaultProfile = connectionProfile; } @@ -185,35 +180,23 @@ public int size() { @Override public void close() { + Transports.assertNotTransportThread("Closing ConnectionManager"); if (isClosed.compareAndSet(false, true)) { - CountDownLatch latch = new CountDownLatch(1); - - // TODO: Consider moving all read/write lock (in Transport and this class) to the TransportService - threadPool.generic().execute(() -> { - closeLock.writeLock().lock(); - try { - // we are holding a write lock so nobody adds to the connectedNodes / openConnections map - it's safe to first close - // all instances and then clear them maps - Iterator> iterator = connectedNodes.entrySet().iterator(); - while (iterator.hasNext()) { - Map.Entry next = iterator.next(); - try { - IOUtils.closeWhileHandlingException(next.getValue()); - } finally { - iterator.remove(); - } + closeLock.writeLock().lock(); + try { + // we are holding a write lock so nobody adds to the connectedNodes / openConnections map - it's safe to first close + // all instances and then clear them maps + Iterator> iterator = connectedNodes.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry next = iterator.next(); + try { + IOUtils.closeWhileHandlingException(next.getValue()); + } finally { + iterator.remove(); } - } finally { - closeLock.writeLock().unlock(); - latch.countDown(); } - }); - - try { - latch.await(30, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - // ignore + } finally { + closeLock.writeLock().unlock(); } } } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index ff06e59d4f729..1bf47d1a42f94 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.settings.SettingUpgrader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; import java.net.InetAddress; import java.net.InetSocketAddress; @@ -283,21 +284,38 @@ protected Map> groupClusterIndices(Set remoteCluste return perClusterIndices; } + void updateRemoteCluster(String clusterAlias, List addresses, String proxy) { + Boolean compress = TransportSettings.TRANSPORT_COMPRESS.get(settings); + TimeValue pingSchedule = TransportSettings.PING_SCHEDULE.get(settings); + updateRemoteCluster(clusterAlias, addresses, proxy, compress, pingSchedule); + } + + void updateRemoteCluster(String clusterAlias, Settings settings) { + String proxy = REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(clusterAlias).get(settings); + List addresses = REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(clusterAlias).get(settings); + Boolean compress = RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace(clusterAlias).get(settings); + TimeValue pingSchedule = RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE + .getConcreteSettingForNamespace(clusterAlias) + .get(settings); + + updateRemoteCluster(clusterAlias, addresses, proxy, compress, pingSchedule); + } + /** * Subclasses must implement this to receive information about updated cluster aliases. If the given address list is * empty the cluster alias is unregistered and should be removed. */ - protected abstract void updateRemoteCluster(String clusterAlias, List addresses, String proxy); + protected abstract void updateRemoteCluster(String clusterAlias, List addresses, String proxy, boolean compressionEnabled, + TimeValue pingSchedule); /** * Registers this instance to listen to updates on the cluster settings. */ public void listenForUpdates(ClusterSettings clusterSettings) { - clusterSettings.addAffixUpdateConsumer( - RemoteClusterAware.REMOTE_CLUSTERS_PROXY, - RemoteClusterAware.REMOTE_CLUSTERS_SEEDS, - (key, value) -> updateRemoteCluster(key, value.v2(), value.v1()), - (namespace, value) -> {}); + List> remoteClusterSettings = Arrays.asList(RemoteClusterAware.REMOTE_CLUSTERS_PROXY, + RemoteClusterAware.REMOTE_CLUSTERS_SEEDS, RemoteClusterService.REMOTE_CLUSTER_COMPRESS, + RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE); + clusterSettings.addAffixGroupUpdateConsumer(remoteClusterSettings, this::updateRemoteCluster); clusterSettings.addAffixUpdateConsumer( RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY, RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_SEEDS, diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 57820a8ca48a9..f4a1b250e7f5e 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -64,9 +64,6 @@ import java.util.function.Supplier; import java.util.stream.Collectors; -import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_COMPRESS; -import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE; - /** * Represents a connection to a single remote cluster. In contrast to a local cluster a remote cluster is not joined such that the * current node is part of the cluster and it won't receive cluster state updates from the remote cluster. Remote clusters are also not @@ -107,12 +104,13 @@ final class RemoteClusterConnection implements TransportConnectionListener, Clos * @param maxNumRemoteConnections the maximum number of connections to the remote cluster * @param nodePredicate a predicate to filter eligible remote nodes to connect to * @param proxyAddress the proxy address + * @param connectionProfile the connection profile to use */ RemoteClusterConnection(Settings settings, String clusterAlias, List>> seedNodes, TransportService transportService, int maxNumRemoteConnections, Predicate nodePredicate, - String proxyAddress) { + String proxyAddress, ConnectionProfile connectionProfile) { this(settings, clusterAlias, seedNodes, transportService, maxNumRemoteConnections, nodePredicate, proxyAddress, - createConnectionManager(settings, clusterAlias, transportService)); + createConnectionManager(connectionProfile, transportService)); } // Public for tests to pass a StubbableConnectionManager @@ -309,13 +307,23 @@ Transport.Connection getConnection() { @Override public void close() throws IOException { - IOUtils.close(connectHandler, connectionManager); + IOUtils.close(connectHandler); + // In the ConnectionManager we wait on connections being closed. + threadPool.generic().execute(connectionManager::close); } public boolean isClosed() { return connectHandler.isClosed(); } + public String getProxyAddress() { + return proxyAddress; + } + + public List>> getSeedNodes() { + return seedNodes; + } + /** * The connect handler manages node discovery and the actual connect to the remote cluster. * There is at most one connect job running at any time. If such a connect job is triggered @@ -697,18 +705,8 @@ private synchronized void ensureIteratorAvailable() { } } - private static ConnectionManager createConnectionManager(Settings settings, String clusterAlias, TransportService transportService) { - ConnectionProfile.Builder builder = new ConnectionProfile.Builder() - .setConnectTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)) - .setHandshakeTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)) - .addConnections(6, TransportRequestOptions.Type.REG, TransportRequestOptions.Type.PING) // TODO make this configurable? - // we don't want this to be used for anything else but search - .addConnections(0, TransportRequestOptions.Type.BULK, - TransportRequestOptions.Type.STATE, - TransportRequestOptions.Type.RECOVERY) - .setCompressionEnabled(REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace(clusterAlias).get(settings)) - .setPingInterval(REMOTE_CLUSTER_PING_SCHEDULE.getConcreteSettingForNamespace(clusterAlias).get(settings)); - return new ConnectionManager(builder.build(), transportService.transport, transportService.threadPool); + private static ConnectionManager createConnectionManager(ConnectionProfile connectionProfile, TransportService transportService) { + return new ConnectionManager(connectionProfile, transportService.transport); } ConnectionManager getConnectionManager() { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index a126337aa0e54..fab7db20a3322 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -47,6 +47,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -67,6 +68,8 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl private static final Logger logger = LogManager.getLogger(RemoteClusterService.class); + private static final ActionListener noopListener = ActionListener.wrap((x) -> {}, (x) -> {}); + static { // remove search.remote.* settings in 8.0.0 // TODO @@ -186,6 +189,7 @@ public String getKey(final String key) { private final TransportService transportService; private final int numRemoteConnections; private volatile Map remoteClusters = Collections.emptyMap(); + private volatile Map remoteClusterConnectionProfiles = Collections.emptyMap(); RemoteClusterService(Settings settings, TransportService transportService) { super(settings); @@ -213,21 +217,33 @@ private synchronized void updateRemoteClusters(Map>> seedList = entry.getValue().v2(); String proxyAddress = entry.getValue().v1(); - RemoteClusterConnection remote = this.remoteClusters.get(entry.getKey()); + String clusterAlias = entry.getKey(); + RemoteClusterConnection remote = this.remoteClusters.get(clusterAlias); + ConnectionProfile connectionProfile = this.remoteClusterConnectionProfiles.get(clusterAlias); if (seedList.isEmpty()) { // with no seed nodes we just remove the connection try { IOUtils.close(remote); } catch (IOException e) { - logger.warn("failed to close remote cluster connections for cluster: " + entry.getKey(), e); + logger.warn("failed to close remote cluster connections for cluster: " + clusterAlias, e); } - remoteClusters.remove(entry.getKey()); + remoteClusters.remove(clusterAlias); continue; } if (remote == null) { // this is a new cluster we have to add a new representation - String clusterAlias = entry.getKey(); remote = new RemoteClusterConnection(settings, clusterAlias, seedList, transportService, numRemoteConnections, - getNodePredicate(settings), proxyAddress); + getNodePredicate(settings), proxyAddress, connectionProfile); + remoteClusters.put(clusterAlias, remote); + } else if (connectionProfileChanged(remote.getConnectionManager().getConnectionProfile(), connectionProfile)) { + // New ConnectionProfile. Must tear down existing connection + try { + IOUtils.close(remote); + } catch (IOException e) { + logger.warn("failed to close remote cluster connections for cluster: " + clusterAlias, e); + } + remoteClusters.remove(clusterAlias); + remote = new RemoteClusterConnection(settings, clusterAlias, seedList, transportService, numRemoteConnections, + getNodePredicate(settings), proxyAddress, connectionProfile); remoteClusters.put(clusterAlias, remote); } @@ -244,7 +260,7 @@ private synchronized void updateRemoteClusters(Map addresses, String proxyAddress) { - updateRemoteCluster(clusterAlias, addresses, proxyAddress, ActionListener.wrap((x) -> {}, (x) -> {})); + protected void updateRemoteCluster(String clusterAlias, List addresses, String proxyAddress, boolean compressionEnabled, + TimeValue pingSchedule) { + if (LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias)) { + throw new IllegalArgumentException("remote clusters must not have the empty string as its key"); + } + ConnectionProfile oldProfile = remoteClusterConnectionProfiles.get(clusterAlias); + ConnectionProfile newProfile; + if (oldProfile != null) { + ConnectionProfile.Builder builder = new ConnectionProfile.Builder(oldProfile); + builder.setCompressionEnabled(compressionEnabled); + builder.setPingInterval(pingSchedule); + newProfile = builder.build(); + } else { + ConnectionProfile.Builder builder = new ConnectionProfile.Builder(buildConnectionProfileFromSettings(clusterAlias)); + builder.setCompressionEnabled(compressionEnabled); + builder.setPingInterval(pingSchedule); + newProfile = builder.build(); + } + updateRemoteCluster(clusterAlias, addresses, proxyAddress, newProfile, noopListener); } - void updateRemoteCluster( - final String clusterAlias, - final List addresses, - final String proxyAddress, - final ActionListener connectionListener) { + void updateRemoteCluster(final String clusterAlias, final List addresses, final String proxyAddress, + final ConnectionProfile connectionProfile, final ActionListener connectionListener) { + HashMap connectionProfiles = new HashMap<>(remoteClusterConnectionProfiles); + connectionProfiles.put(clusterAlias, connectionProfile); + this.remoteClusterConnectionProfiles = Collections.unmodifiableMap(connectionProfiles); final List>> nodes = - addresses.stream().>>map(address -> Tuple.tuple(address, () -> - buildSeedNode(clusterAlias, address, Strings.hasLength(proxyAddress))) - ).collect(Collectors.toList()); + addresses.stream().>>map(address -> Tuple.tuple(address, () -> + buildSeedNode(clusterAlias, address, Strings.hasLength(proxyAddress))) + ).collect(Collectors.toList()); updateRemoteClusters(Collections.singletonMap(clusterAlias, new Tuple<>(proxyAddress, nodes)), connectionListener); } @@ -387,6 +420,7 @@ void initializeRemoteClusters() { final PlainActionFuture future = new PlainActionFuture<>(); Map>>>> seeds = RemoteClusterAware.buildRemoteClustersDynamicConfig(settings); + initializeConnectionProfiles(seeds.keySet()); updateRemoteClusters(seeds, future); try { future.get(timeValue.millis(), TimeUnit.MILLISECONDS); @@ -399,6 +433,32 @@ void initializeRemoteClusters() { } } + private synchronized void initializeConnectionProfiles(Set remoteClusters) { + Map connectionProfiles = new HashMap<>(remoteClusters.size()); + for (String clusterName : remoteClusters) { + connectionProfiles.put(clusterName, buildConnectionProfileFromSettings(clusterName)); + } + this.remoteClusterConnectionProfiles = Collections.unmodifiableMap(connectionProfiles); + } + + private ConnectionProfile buildConnectionProfileFromSettings(String clusterName) { + return buildConnectionProfileFromSettings(settings, clusterName); + } + + static ConnectionProfile buildConnectionProfileFromSettings(Settings settings, String clusterName) { + return new ConnectionProfile.Builder() + .setConnectTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)) + .setHandshakeTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)) + .addConnections(6, TransportRequestOptions.Type.REG, TransportRequestOptions.Type.PING) // TODO make this configurable? + // we don't want this to be used for anything else but search + .addConnections(0, TransportRequestOptions.Type.BULK, + TransportRequestOptions.Type.STATE, + TransportRequestOptions.Type.RECOVERY) + .setCompressionEnabled(REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace(clusterName).get(settings)) + .setPingInterval(REMOTE_CLUSTER_PING_SCHEDULE.getConcreteSettingForNamespace(clusterName).get(settings)) + .build(); + } + @Override public void close() throws IOException { IOUtils.close(remoteClusters.values()); @@ -408,6 +468,11 @@ public Stream getRemoteConnectionInfos() { return remoteClusters.values().stream().map(RemoteClusterConnection::getConnectionInfo); } + private boolean connectionProfileChanged(ConnectionProfile oldProfile, ConnectionProfile newProfile) { + return Objects.equals(oldProfile.getCompressionEnabled(), newProfile.getCompressionEnabled()) == false + || Objects.equals(oldProfile.getPingInterval(), newProfile.getPingInterval()) == false; + } + /** * Collects all nodes of the given clusters and returns / passes a (clusterAlias, nodeId) to {@link DiscoveryNode} * function on success. diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 3ea15bba43a84..48ef529f9c3e3 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -149,7 +149,7 @@ public TransportService(Settings settings, Transport transport, ThreadPool threa Function localNodeFactory, @Nullable ClusterSettings clusterSettings, Set taskHeaders) { this(settings, transport, threadPool, transportInterceptor, localNodeFactory, clusterSettings, taskHeaders, - new ConnectionManager(settings, transport, threadPool)); + new ConnectionManager(settings, transport)); } public TransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor transportInterceptor, diff --git a/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java b/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java index 78a2f2446c5dc..5ffe242dfb208 100644 --- a/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java @@ -211,7 +211,7 @@ public void setup() { localNode = newDiscoveryNode("local-node"); ConnectionManager innerConnectionManager - = new ConnectionManager(settings, capturingTransport, deterministicTaskQueue.getThreadPool()); + = new ConnectionManager(settings, capturingTransport); StubbableConnectionManager connectionManager = new StubbableConnectionManager(innerConnectionManager, settings, capturingTransport, deterministicTaskQueue.getThreadPool()); connectionManager.setDefaultNodeConnectedBehavior((cm, discoveryNode) -> { diff --git a/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java index 578521190e2ff..c1dd512e0232d 100644 --- a/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java @@ -55,7 +55,7 @@ public void createConnectionManager() { .build(); threadPool = new ThreadPool(settings); transport = mock(Transport.class); - connectionManager = new ConnectionManager(settings, transport, threadPool); + connectionManager = new ConnectionManager(settings, transport); TimeValue oneSecond = new TimeValue(1000); TimeValue oneMinute = TimeValue.timeValueMinutes(1); connectionProfile = ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, oneSecond, oneSecond, diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 9eddac80a17c0..5bc683d6fd708 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -104,6 +104,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + private final ConnectionProfile profile = RemoteClusterService.buildConnectionProfileFromSettings(Settings.EMPTY, "cluster"); @Override public void tearDown() throws Exception { @@ -191,7 +192,8 @@ public void testRemoteProfileIsUsedForLocalCluster() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(Tuple.tuple(seedNode.toString(), () -> seedNode)), service, Integer.MAX_VALUE, n -> true, null)) { + Arrays.asList(Tuple.tuple(seedNode.toString(), () -> seedNode)), service, Integer.MAX_VALUE, n -> true, null, + profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); updateSeedNodes(connection, seedNodes(seedNode)); assertTrue(connectionManager.nodeConnected(seedNode)); @@ -233,7 +235,8 @@ public void testRemoteProfileIsUsedForRemoteCluster() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(Tuple.tuple(seedNode.toString(), () -> seedNode)), service, Integer.MAX_VALUE, n -> true, null)) { + Arrays.asList(Tuple.tuple(seedNode.toString(), () -> seedNode)), service, Integer.MAX_VALUE, n -> true, null, + profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); updateSeedNodes(connection, seedNodes(seedNode)); assertTrue(connectionManager.nodeConnected(seedNode)); @@ -286,7 +289,8 @@ public void testDiscoverSingleNode() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(Tuple.tuple(seedNode.toString(), () -> seedNode)), service, Integer.MAX_VALUE, n -> true, null)) { + Arrays.asList(Tuple.tuple(seedNode.toString(), () -> seedNode)), service, Integer.MAX_VALUE, n -> true, null, + profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); updateSeedNodes(connection, seedNodes(seedNode)); assertTrue(connectionManager.nodeConnected(seedNode)); @@ -318,7 +322,7 @@ public void testDiscoverSingleNodeWithIncompatibleSeed() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true, null)) { + seedNodes, service, Integer.MAX_VALUE, n -> true, null, profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); updateSeedNodes(connection, seedNodes); assertTrue(connectionManager.nodeConnected(seedNode)); @@ -346,7 +350,7 @@ public void testNodeDisconnected() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null)) { + seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null, profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); updateSeedNodes(connection, seedNodes(seedNode)); assertTrue(connectionManager.nodeConnected(seedNode)); @@ -396,7 +400,7 @@ public void testFilterDiscoveredNodes() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes(seedNode), service, Integer.MAX_VALUE, n -> n.equals(rejectedNode) == false, null)) { + seedNodes(seedNode), service, Integer.MAX_VALUE, n -> n.equals(rejectedNode) == false, null, profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); updateSeedNodes(connection, seedNodes(seedNode)); if (rejectedNode.equals(seedNode)) { @@ -461,7 +465,8 @@ public void testConnectWithIncompatibleTransports() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(Tuple.tuple(seedNode.toString(), () -> seedNode)), service, Integer.MAX_VALUE, n -> true, null)) { + Arrays.asList(Tuple.tuple(seedNode.toString(), () -> seedNode)), service, Integer.MAX_VALUE, n -> true, null, + profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); expectThrows( Exception.class, @@ -502,7 +507,7 @@ public void sendRequest(long requestId, String action, TransportRequest request, } }; - ConnectionManager delegate = new ConnectionManager(Settings.EMPTY, service.transport, threadPool); + ConnectionManager delegate = new ConnectionManager(Settings.EMPTY, service.transport); StubbableConnectionManager connectionManager = new StubbableConnectionManager(delegate, Settings.EMPTY, service.transport, threadPool); @@ -559,7 +564,7 @@ public void run() { CountDownLatch listenerCalled = new CountDownLatch(1); AtomicReference exceptionReference = new AtomicReference<>(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null)) { + seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null, profile)) { ActionListener listener = ActionListener.wrap(x -> { listenerCalled.countDown(); fail("expected exception"); @@ -614,7 +619,7 @@ public void testTriggerUpdatesConcurrently() throws IOException, InterruptedExce service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true, null)) { + seedNodes, service, Integer.MAX_VALUE, n -> true, null, profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); int numThreads = randomIntBetween(4, 10); Thread[] threads = new Thread[numThreads]; @@ -694,7 +699,7 @@ public void testCloseWhileConcurrentlyConnecting() throws IOException, Interrupt service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true, null)) { + seedNodes, service, Integer.MAX_VALUE, n -> true, null, profile)) { int numThreads = randomIntBetween(4, 10); Thread[] threads = new Thread[numThreads]; CyclicBarrier barrier = new CyclicBarrier(numThreads + 1); @@ -782,7 +787,7 @@ public void testGetConnectionInfo() throws Exception { service.acceptIncomingRequests(); int maxNumConnections = randomIntBetween(1, 5); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, maxNumConnections, n -> true, null)) { + seedNodes, service, maxNumConnections, n -> true, null, profile)) { // test no nodes connected RemoteConnectionInfo remoteConnectionInfo = assertSerialization(connection.getConnectionInfo()); assertNotNull(remoteConnectionInfo); @@ -914,7 +919,7 @@ public void testEnsureConnected() throws IOException, InterruptedException { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null)) { + seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null, profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); assertFalse(connectionManager.nodeConnected(seedNode)); assertFalse(connectionManager.nodeConnected(discoverableNode)); @@ -964,7 +969,7 @@ public void testCollectNodes() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null)) { + seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null, profile)) { if (randomBoolean()) { updateSeedNodes(connection, seedNodes(seedNode)); } @@ -1012,7 +1017,7 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true, null)) { + seedNodes, service, Integer.MAX_VALUE, n -> true, null, profile)) { final int numGetThreads = randomIntBetween(4, 10); final Thread[] getThreads = new Thread[numGetThreads]; final int numModifyingThreads = randomIntBetween(4, 10); @@ -1100,7 +1105,7 @@ public void testClusterNameIsChecked() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null)) { + seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null, profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); updateSeedNodes(connection, seedNodes(seedNode)); assertTrue(connectionManager.nodeConnected(seedNode)); @@ -1156,7 +1161,7 @@ public void sendRequest(long requestId, String action, TransportRequest request, } }; - ConnectionManager delegate = new ConnectionManager(Settings.EMPTY, service.transport, threadPool); + ConnectionManager delegate = new ConnectionManager(Settings.EMPTY, service.transport); StubbableConnectionManager connectionManager = new StubbableConnectionManager(delegate, Settings.EMPTY, service.transport, threadPool); @@ -1214,7 +1219,7 @@ public void testLazyResolveTransportAddress() throws Exception { return seedNode; }); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedSupplier), service, Integer.MAX_VALUE, n -> true, null)) { + Arrays.asList(seedSupplier), service, Integer.MAX_VALUE, n -> true, null, profile)) { updateSeedNodes(connection, Arrays.asList(seedSupplier)); // Closing connections leads to RemoteClusterConnection.ConnectHandler.collectRemoteNodes // being called again so we try to resolve the same seed node's host twice @@ -1246,7 +1251,7 @@ public void testProxyMode() throws Exception { RemoteClusterAware.buildSeedNode("some-remote-cluster", "node_0:" + randomIntBetween(1, 10000), true)); assertEquals("node_0", seedSupplier.v2().get().getAttributes().get("server_name")); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedSupplier), service, Integer.MAX_VALUE, n -> true, proxyAddress)) { + Arrays.asList(seedSupplier), service, Integer.MAX_VALUE, n -> true, proxyAddress, profile)) { updateSeedNodes(connection, Arrays.asList(seedSupplier), proxyAddress); assertEquals(2, connection.getNumNodesConnected()); assertNotNull(connection.getConnection(discoverableTransport.getLocalDiscoNode())); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 3ef1e4df02bfb..25c8a5fac7299 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -400,11 +400,7 @@ public void testCustomPingSchedule() throws IOException { TimeValue.timeValueSeconds(randomIntBetween(1, 10)); builder.put("cluster.remote.cluster_2.transport.ping_schedule", pingSchedule2); try (RemoteClusterService service = new RemoteClusterService(builder.build(), transportService)) { - assertFalse(service.isCrossClusterSearchEnabled()); service.initializeRemoteClusters(); - assertTrue(service.isCrossClusterSearchEnabled()); - service.updateRemoteCluster("cluster_1", Collections.singletonList(cluster1Seed.getAddress().toString()), null); - assertTrue(service.isCrossClusterSearchEnabled()); assertTrue(service.isRemoteClusterRegistered("cluster_1")); RemoteClusterConnection remoteClusterConnection1 = service.getRemoteClusterConnection("cluster_1"); assertEquals(pingSchedule1, remoteClusterConnection1.getConnectionManager().getConnectionProfile().getPingInterval()); @@ -415,6 +411,40 @@ public void testCustomPingSchedule() throws IOException { } } + public void testChangeSettings() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT)) { + DiscoveryNode cluster1Seed = cluster1Transport.getLocalDiscoNode(); + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try (MockTransportService transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, + threadPool, null)) { + transportService.start(); + transportService.acceptIncomingRequests(); + Settings.Builder builder = Settings.builder(); + builder.putList("cluster.remote.cluster_1.seeds", cluster1Seed.getAddress().toString()); + try (RemoteClusterService service = new RemoteClusterService(builder.build(), transportService)) { + service.initializeRemoteClusters(); + RemoteClusterConnection remoteClusterConnection = service.getRemoteClusterConnection("cluster_1"); + Settings.Builder settingsChange = Settings.builder(); + TimeValue pingSchedule = TimeValue.timeValueSeconds(randomIntBetween(6, 8)); + settingsChange.put("cluster.remote.cluster_1.transport.ping_schedule", pingSchedule); + boolean compressionEnabled = true; + settingsChange.put("cluster.remote.cluster_1.transport.compress", compressionEnabled); + settingsChange.putList("cluster.remote.cluster_1.seeds", cluster1Seed.getAddress().toString()); + service.updateRemoteCluster("cluster_1", settingsChange.build()); + assertBusy(remoteClusterConnection::isClosed); + + remoteClusterConnection = service.getRemoteClusterConnection("cluster_1"); + ConnectionProfile connectionProfile = remoteClusterConnection.getConnectionManager().getConnectionProfile(); + assertEquals(pingSchedule, connectionProfile.getPingInterval()); + assertEquals(compressionEnabled, connectionProfile.getCompressionEnabled()); + } + } + } + } + public void testRemoteNodeAttribute() throws IOException, InterruptedException { final Settings settings = Settings.builder().put("cluster.remote.node.attr", "gateway").build(); @@ -460,14 +490,14 @@ public void testRemoteNodeAttribute() throws IOException, InterruptedException { service.updateRemoteCluster( "cluster_1", Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), null, - connectionListener(firstLatch)); + genericProfile("cluster_1"), connectionListener(firstLatch)); firstLatch.await(); final CountDownLatch secondLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_2", Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), null, - connectionListener(secondLatch)); + genericProfile("cluster_2"), connectionListener(secondLatch)); secondLatch.await(); assertTrue(service.isCrossClusterSearchEnabled()); @@ -525,14 +555,14 @@ public void testRemoteNodeRoles() throws IOException, InterruptedException { service.updateRemoteCluster( "cluster_1", Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), null, - connectionListener(firstLatch)); + genericProfile("cluster_1"), connectionListener(firstLatch)); firstLatch.await(); final CountDownLatch secondLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_2", Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), null, - connectionListener(secondLatch)); + genericProfile("cluster_2"), connectionListener(secondLatch)); secondLatch.await(); assertTrue(service.isCrossClusterSearchEnabled()); @@ -595,17 +625,17 @@ public void testCollectNodes() throws InterruptedException, IOException { assertFalse(service.isCrossClusterSearchEnabled()); final CountDownLatch firstLatch = new CountDownLatch(1); - service.updateRemoteCluster( - "cluster_1", + + service.updateRemoteCluster("cluster_1", Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), null, - connectionListener(firstLatch)); + genericProfile("cluster_1"), connectionListener(firstLatch)); firstLatch.await(); final CountDownLatch secondLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_2", Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), null, - connectionListener(secondLatch)); + genericProfile("cluster_2"), connectionListener(secondLatch)); secondLatch.await(); CountDownLatch latch = new CountDownLatch(1); service.collectNodes(new HashSet<>(Arrays.asList("cluster_1", "cluster_2")), @@ -911,7 +941,7 @@ private static void updateRemoteCluster(RemoteClusterService service, String clu exceptionAtomicReference.set(x); latch.countDown(); }); - service.updateRemoteCluster(clusterAlias, addresses, proxyAddress, listener); + service.updateRemoteCluster(clusterAlias, addresses, proxyAddress, genericProfile(clusterAlias), listener); latch.await(); if (exceptionAtomicReference.get() != null) { throw exceptionAtomicReference.get(); @@ -953,4 +983,8 @@ public void testSkipUnavailable() { } } } + + private static ConnectionProfile genericProfile(String clusterName) { + return RemoteClusterService.buildConnectionProfileFromSettings(Settings.EMPTY, clusterName); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java index a6dbd1561936e..e39f5d03cba07 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java @@ -80,7 +80,7 @@ public class MockTransport implements Transport, LifecycleComponent { public TransportService createTransportService(Settings settings, ThreadPool threadPool, TransportInterceptor interceptor, Function localNodeFactory, @Nullable ClusterSettings clusterSettings, Set taskHeaders) { - StubbableConnectionManager connectionManager = new StubbableConnectionManager(new ConnectionManager(settings, this, threadPool), + StubbableConnectionManager connectionManager = new StubbableConnectionManager(new ConnectionManager(settings, this), settings, this, threadPool); connectionManager.setDefaultNodeConnectedBehavior((cm, discoveryNode) -> nodeConnected(discoveryNode)); connectionManager.setDefaultGetConnectionBehavior((cm, discoveryNode) -> createConnection(discoveryNode)); diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index e3d7e72a0bb97..4b998f04a568a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -157,7 +157,7 @@ private MockTransportService(Settings settings, StubbableTransport transport, Th Function localNodeFactory, @Nullable ClusterSettings clusterSettings, Set taskHeaders) { super(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders, - new StubbableConnectionManager(new ConnectionManager(settings, transport, threadPool), settings, transport, threadPool)); + new StubbableConnectionManager(new ConnectionManager(settings, transport), settings, transport, threadPool)); this.original = transport.getDelegate(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java index 41ac87f0af576..108e1bf5e24b5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java @@ -42,7 +42,7 @@ public class StubbableConnectionManager extends ConnectionManager { private volatile NodeConnectedBehavior defaultNodeConnectedBehavior = ConnectionManager::nodeConnected; public StubbableConnectionManager(ConnectionManager delegate, Settings settings, Transport transport, ThreadPool threadPool) { - super(settings, transport, threadPool); + super(settings, transport); this.delegate = delegate; this.getConnectionBehaviors = new ConcurrentHashMap<>(); this.nodeConnectedBehaviors = new ConcurrentHashMap<>(); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRepositoryManager.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRepositoryManager.java index c1a28b72cf8fe..c241c7a9aa070 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRepositoryManager.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRepositoryManager.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.xpack.ccr.action.repositories.DeleteInternalCcrRepositoryAction; import org.elasticsearch.xpack.ccr.action.repositories.DeleteInternalCcrRepositoryRequest; @@ -75,7 +76,8 @@ void init() { } @Override - protected void updateRemoteCluster(String clusterAlias, List addresses, String proxy) { + protected void updateRemoteCluster(String clusterAlias, List addresses, String proxy, boolean compressionEnabled, + TimeValue pingSchedule) { String repositoryName = CcrRepository.NAME_PREFIX + clusterAlias; if (addresses.isEmpty()) { deleteRepository(repositoryName); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index 5a8a7feb34716..e0f71fe45155a 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -50,6 +50,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -68,6 +69,8 @@ import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.transport.NoSuchRemoteClusterException; +import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.CcrIntegTestCase; import org.elasticsearch.xpack.ccr.action.ShardFollowTask; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; @@ -1122,6 +1125,69 @@ private void runFallBehindTest( } } + public void testUpdateRemoteConfigsDuringFollowing() throws Exception { + final int numberOfPrimaryShards = randomIntBetween(1, 3); + int numberOfReplicas = between(0, 1); + + final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, numberOfReplicas, + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureLeaderYellow("index1"); + + final int firstBatchNumDocs = randomIntBetween(200, 800); + + logger.info("Executing put follow"); + final PutFollowAction.Request followRequest = putFollow("index1", "index2"); + PutFollowAction.Response response = followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); + assertTrue(response.isFollowIndexCreated()); + assertTrue(response.isFollowIndexShardsAcked()); + assertTrue(response.isIndexFollowingStarted()); + + logger.info("Indexing [{}] docs while updateing remote config", firstBatchNumDocs); + try (BackgroundIndexer indexer = new BackgroundIndexer("index1", "_doc", leaderClient(), firstBatchNumDocs, + randomIntBetween(1, 5))) { + + ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(); + String address = getLeaderCluster().getDataNodeInstance(TransportService.class).boundAddress().publishAddress().toString(); + Setting compress = RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace("leader_cluster"); + Setting> seeds = RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("leader_cluster"); + settingsRequest.persistentSettings(Settings.builder().put(compress.getKey(), true).put(seeds.getKey(), address)); + assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + + waitForDocs(firstBatchNumDocs, indexer); + indexer.assertNoFailures(); + + final Map firstBatchNumDocsPerShard = new HashMap<>(); + final ShardStats[] firstBatchShardStats = + leaderClient().admin().indices().prepareStats("index1").get().getIndex("index1").getShards(); + for (final ShardStats shardStats : firstBatchShardStats) { + if (shardStats.getShardRouting().primary()) { + long value = shardStats.getStats().getIndexing().getTotal().getIndexCount() - 1; + firstBatchNumDocsPerShard.put(shardStats.getShardRouting().shardId(), value); + } + } + + assertBusy(assertTask(numberOfPrimaryShards, firstBatchNumDocsPerShard)); + + for (String docId : indexer.getIds()) { + assertBusy(() -> { + final GetResponse getResponse = followerClient().prepareGet("index2", "_doc", docId).get(); + assertTrue("Doc with id [" + docId + "] is missing", getResponse.isExists()); + }); + } + + assertMaxSeqNoOfUpdatesIsTransferred(resolveLeaderIndex("index1"), resolveFollowerIndex("index2"), numberOfPrimaryShards); + } finally { + ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(); + String address = getLeaderCluster().getDataNodeInstance(TransportService.class).boundAddress().publishAddress().toString(); + Setting compress = RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace("leader_cluster"); + Setting> seeds = RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("leader_cluster"); + settingsRequest.persistentSettings(Settings.builder().put(compress.getKey(), compress.getDefault(Settings.EMPTY)) + .put(seeds.getKey(), address)); + assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + } + } + private long getFollowTaskSettingsVersion(String followerIndex) { long settingsVersion = -1L; for (ShardFollowNodeTaskStatus status : getFollowTaskStatuses(followerIndex)) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java index 03c78ed903e81..e5d4609c13fb1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; import org.elasticsearch.transport.RemoteClusterAware; @@ -438,7 +439,8 @@ private RemoteClusterResolver(Settings settings, ClusterSettings clusterSettings } @Override - protected void updateRemoteCluster(String clusterAlias, List addresses, String proxyAddress) { + protected void updateRemoteCluster(String clusterAlias, List addresses, String proxyAddress, boolean compressionEnabled, + TimeValue pingSchedule) { if (addresses.isEmpty()) { clusters.remove(clusterAlias); } else { From f9d11ca1521cabbe97269b7d9a0c941397df85fb Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Tue, 19 Feb 2019 12:15:44 -0700 Subject: [PATCH 32/54] Revert "Mute failing test 20_mix_typless_typefull (#38781)" (#38912) This reverts commit b91e0589fe1efdaa5061a75a3674a5cc8706b703. This should be fixed by #38873 Resolves #38711 --- .../test/indices.create/20_mix_typeless_typeful.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml index e3239d8b8c332..1eab9d6159764 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml @@ -104,10 +104,8 @@ "Implicitly create a typeless index while there is a typed template": - skip: - #version: " - 6.99.99" - #reason: needs typeless index operations to work on typed indices - version: "all" - reason: "muted, waiting for #38711" + version: " - 6.99.99" + reason: needs typeless index operations to work on typed indices - do: indices.put_template: From 1aa10c4b27221bb38d2ee37f683557858ae7638e Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Tue, 19 Feb 2019 17:36:03 +0100 Subject: [PATCH 33/54] add version 6.6.2 --- server/src/main/java/org/elasticsearch/Version.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index c02062b2bbd63..789a2bb112766 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -120,6 +120,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_6_0 = new Version(V_6_6_0_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); public static final int V_6_6_1_ID = 6060199; public static final Version V_6_6_1 = new Version(V_6_6_1_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); + public static final int V_6_6_2_ID = 6060299; + public static final Version V_6_6_2 = new Version(V_6_6_2_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); public static final int V_6_7_0_ID = 6070099; public static final Version V_6_7_0 = new Version(V_6_7_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_7_0_0_ID = 7000099; @@ -150,6 +152,8 @@ public static Version fromId(int id) { return V_7_0_0; case V_6_7_0_ID: return V_6_7_0; + case V_6_6_2_ID: + return V_6_6_2; case V_6_6_1_ID: return V_6_6_1; case V_6_6_0_ID: From a3c44c0270c1552edd902c657392c507ee893f1d Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 19 Feb 2019 11:53:35 -0800 Subject: [PATCH 34/54] [DOCS] Edits the remote clusters documentation (#38996) --- docs/reference/modules/remote-clusters.asciidoc | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index aefa750a92eb0..9de8c8ec963aa 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -2,25 +2,25 @@ == Remote clusters ifndef::include-xpack[] -The _remote clusters_ module allows establishing uni-directional connections to -a remote cluster. This functionality is used in +The _remote clusters_ module enables you to establish uni-directional +connections to a remote cluster. This functionality is used in <>. endif::[] ifdef::include-xpack[] -The _remote clusters_ module allows establishing uni-directional connections to -a remote cluster. This functionality is used in cross-cluster replication, and +The _remote clusters_ module enables you to establish uni-directional +connections to a remote cluster. This functionality is used in +{stack-ov}/xpack-ccr.html[cross-cluster replication] and <>. endif::[] Remote cluster connections work by configuring a remote cluster and connecting only to a limited number of nodes in the remote cluster. Each remote cluster is -referenced by a name and a list of seed nodes. When a remote cluster is +referenced by a name and a list of seed nodes. When a remote cluster is registered, its cluster state is retrieved from one of the seed nodes so that by default up to three _gateway nodes_ are selected to be connected to as part of remote cluster requests. Remote cluster connections consist of uni-directional connections from the coordinating node to the previously selected remote nodes -only. It is possible to tag which nodes should be selected through node -attributes (see <>). +only. You can tag which nodes should be selected by using node attributes (see <>). Each node in a cluster that has remote clusters configured connects to one or more _gateway nodes_ and uses them to federate requests to the remote cluster. @@ -209,6 +209,6 @@ PUT _cluster/settings [[retrieve-remote-clusters-info]] === Retrieving remote clusters info -The <> allows to retrieve +You can use the <> to retrieve information about the configured remote clusters, as well as the remote nodes that the node is connected to. From bb796b33e4bdc3d3ec5c7b2bb0a0a21002339959 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Tue, 19 Feb 2019 16:02:11 -0500 Subject: [PATCH 35/54] Fix median calculation in MedianAbsoluteDeviationAggregatorTests (#38979) Fixes an error in median calculation in MedianAbsoluteDeviationAggregatorTests for odd number of sample points, which causes some rare test failures. Fixes #38937 --- .../MedianAbsoluteDeviationAggregatorTests.java | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java index 55cf9b16e1688..a422c41700b41 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java @@ -260,9 +260,15 @@ public static double calculateMAD(double[] sample) { private static double calculateMedian(double[] sample) { final double[] sorted = Arrays.copyOf(sample, sample.length); Arrays.sort(sorted); - - final int halfway = (int) Math.ceil(sorted.length / 2d); - final double median = (sorted[halfway - 1] + sorted[halfway]) / 2d; + final int halfway = (int) Math.ceil(sorted.length / 2d); + final double median; + if (sorted.length % 2 == 0) { + // even + median = (sorted[halfway - 1] + sorted[halfway]) / 2d; + } else { + // odd + median = (sorted[halfway - 1]); + } return median; } From e694473de41e1d0927df7806e63d65c9af07c9a5 Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Wed, 20 Feb 2019 08:27:14 +1100 Subject: [PATCH 36/54] Resolve concurrency with watcher trigger service (#39092) The watcher trigger service could attempt to modify the perWatchStats map simultaneously from multiple threads. This would cause the internal state to become inconsistent, in particular the count() method may return an incorrect value for the number of watches. This changes replaces the implementation of the map with a ConcurrentHashMap so that its internal state remains consistent even when accessed from mutiple threads. Resolves: #39087 --- .../elasticsearch/xpack/watcher/trigger/TriggerService.java | 3 ++- .../org/elasticsearch/xpack/watcher/WatcherServiceTests.java | 2 ++ .../xpack/watcher/test/integration/BootStrapTests.java | 1 - 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerService.java index 4c27ec329bbb1..837acf097bea5 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerService.java @@ -19,6 +19,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.Consumer; @@ -29,7 +30,7 @@ public class TriggerService { private final GroupedConsumer consumer = new GroupedConsumer(); private final Map engines; - private final Map perWatchStats = new HashMap<>(); + private final Map perWatchStats = new ConcurrentHashMap<>(); public TriggerService(Set engines) { Map builder = new HashMap<>(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java index 3b8d844cc1241..3d1fe78e27a00 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java @@ -231,8 +231,10 @@ public void testPausingWatcherServiceAlsoPausesTriggerService() { Trigger trigger = mock(Trigger.class); when(trigger.type()).thenReturn(engineType); + final String id = randomAlphaOfLengthBetween(3, 12); Watch watch = mock(Watch.class); when(watch.trigger()).thenReturn(trigger); + when(watch.id()).thenReturn(id); when(watch.condition()).thenReturn(InternalAlwaysCondition.INSTANCE); ExecutableNoneInput noneInput = new ExecutableNoneInput(); when(watch.input()).thenReturn(noneInput); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java index 6382909f96f33..b15b14a186a43 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java @@ -141,7 +141,6 @@ public void testLoadMalformedWatchRecord() throws Exception { }); } - @AwaitsFix(bugUrl = "Supposedly fixed; https://github.com/elastic/x-pack-elasticsearch/issues/1915") public void testLoadExistingWatchesUponStartup() throws Exception { stopWatcher(); From 2d5649d088bb36189f948eac0fd72edc887f1453 Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Tue, 19 Feb 2019 13:17:51 -0800 Subject: [PATCH 37/54] AwaitsFix XPackUsageIT#testXPackCcrUsage. relates to #39126. --- .../src/test/java/org/elasticsearch/xpack/ccr/XPackUsageIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/XPackUsageIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/XPackUsageIT.java index 84271ce0acaf1..c257b6f06f0ed 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/XPackUsageIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/XPackUsageIT.java @@ -19,6 +19,7 @@ public class XPackUsageIT extends ESCCRRestTestCase { + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39126") public void testXPackCcrUsage() throws Exception { if ("follow".equals(targetCluster) == false) { logger.info("skipping test, waiting for target cluster [follow]" ); From 92f7b9ef31ceb571101043a31815cf936988432f Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Tue, 19 Feb 2019 14:20:57 -0800 Subject: [PATCH 38/54] Mute SingleNodeTests (#39156) Relates to #36782 --- .../xpack/watcher/test/integration/SingleNodeTests.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java index 6bd9da794cdfc..b03d75af113af 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.watcher.test.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.core.watcher.watch.Watch; import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; @@ -28,7 +28,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; -@TestLogging("org.elasticsearch.xpack.watcher.execution:DEBUG") +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/36782") @ClusterScope(scope = SUITE, numClientNodes = 0, transportClientRatio = 0, maxNumDataNodes = 1, supportsDedicatedMasters = false) public class SingleNodeTests extends AbstractWatcherIntegrationTestCase { From 6aafe47191aa04575db8bde3403eefdab93a324d Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Tue, 19 Feb 2019 17:58:29 -0600 Subject: [PATCH 39/54] stronger wording for ilm+rollover in docs (#39159) --- docs/reference/ilm/set-up-lifecycle-policy.asciidoc | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/reference/ilm/set-up-lifecycle-policy.asciidoc b/docs/reference/ilm/set-up-lifecycle-policy.asciidoc index 7af686238f334..3495a6e19cb9e 100644 --- a/docs/reference/ilm/set-up-lifecycle-policy.asciidoc +++ b/docs/reference/ilm/set-up-lifecycle-policy.asciidoc @@ -107,7 +107,8 @@ PUT test-index ----------------------- // CONSOLE -IMPORTANT: Its recommended not to use the create index API with a policy that +IMPORTANT: Do not to use the create index API with a policy that defines a rollover action. If you do so, the new index as the result of the -rollover will not carry forward the policy. Always use index templates to -define policies with rollover actions. +rollover will not carry forward the policy. Always use +<> to define policies with rollover +actions. From 5eef4added1779e6b6a8a608935f68a2b48e4075 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 19 Feb 2019 17:30:25 -0800 Subject: [PATCH 40/54] Enable test logging for TransformIntegrationTests#testSearchTransform. There is already fairly detailed debug logging in the watcher framework, which should hopefully help debug the failure. Relates to #37882. --- .../xpack/watcher/transform/TransformIntegrationTests.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java index 042e82765354f..4194902a72a42 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; @@ -156,7 +157,7 @@ public void testScriptTransform() throws Exception { assertThat(response.getHits().getAt(0).getSourceAsMap().get("key3").toString(), equalTo("20")); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37882") + @TestLogging("org.elasticsearch.xpack.watcher:DEBUG") public void testSearchTransform() throws Exception { createIndex("my-condition-index", "my-payload-index"); ensureGreen("my-condition-index", "my-payload-index"); From 18c5f93c0fbc34333be4618e39178447f2139acd Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 20 Feb 2019 05:33:35 +0100 Subject: [PATCH 41/54] Add Checks for Closed Channel in Selector Loop (#39096) * A few warnings could be observed in test logs about `NoSuchElementException` being thrown in `InboundChannelBuffer#sliceBuffersTo`. These were the result of calls to this method after the relevant channel and hence the buffer was closed already as a result of a failed IO operation. * Fixed by adding the necessary guard statements to break out in these cases. I don't think there is a need here to do any additional error handling since `eventHandler.postHandling(channelContext);` at the end of the `processKey` call in the main selection loop handles closing channels and invoking callbacks for writes that failed to go through already. --- .../java/org/elasticsearch/nio/NioSelector.java | 17 ++++++++++------- .../elasticsearch/nio/SocketChannelContext.java | 2 +- .../nio/SocketChannelContextTests.java | 2 +- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/NioSelector.java b/libs/nio/src/main/java/org/elasticsearch/nio/NioSelector.java index 1484ba2198f12..c89703c78c8ac 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/NioSelector.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/NioSelector.java @@ -238,12 +238,13 @@ void processKey(SelectionKey selectionKey) { } if (channelContext.isConnectComplete()) { - if ((ops & SelectionKey.OP_WRITE) != 0) { - handleWrite(channelContext); - } - - if ((ops & SelectionKey.OP_READ) != 0) { - handleRead(channelContext); + if (channelContext.selectorShouldClose() == false) { + if ((ops & SelectionKey.OP_WRITE) != 0) { + handleWrite(channelContext); + } + if (channelContext.selectorShouldClose() == false && (ops & SelectionKey.OP_READ) != 0) { + handleRead(channelContext); + } } } eventHandler.postHandling(channelContext); @@ -336,7 +337,9 @@ public void writeToChannel(WriteOperation writeOperation) { } if (shouldFlushAfterQueuing) { - handleWrite(context); + if (context.selectorShouldClose() == false) { + handleWrite(context); + } eventHandler.postHandling(context); } } diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java b/libs/nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java index a43a799423f06..816f4adc8cbb1 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java @@ -209,7 +209,7 @@ public void closeFromSelector() throws IOException { protected void handleReadBytes() throws IOException { int bytesConsumed = Integer.MAX_VALUE; - while (bytesConsumed > 0 && channelBuffer.getIndex() > 0) { + while (isOpen() && bytesConsumed > 0 && channelBuffer.getIndex() > 0) { bytesConsumed = readWriteHandler.consumeReads(channelBuffer); channelBuffer.release(bytesConsumed); } diff --git a/libs/nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java index 2654928e8fbc4..345c5197c76b8 100644 --- a/libs/nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java +++ b/libs/nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java @@ -476,7 +476,7 @@ public void flushChannel() throws IOException { @Override public boolean selectorShouldClose() { - return false; + return isClosing.get(); } @Override From f5fc1632280cd933a8295151a89da0b5aca02cf5 Mon Sep 17 00:00:00 2001 From: Henning Andersen <33268011+henningandersen@users.noreply.github.com> Date: Wed, 20 Feb 2019 08:27:07 +0100 Subject: [PATCH 42/54] Blob store compression fix (#39073) Blob store compression was not enabled for some of the files in snapshots due to constructor accessing sub-class fields. Fixed to instead accept compress field as constructor param. Also fixed chunk size validation to work. Deprecated repositories.fs.compress setting as well to be able to unify in a future commit. --- .../repositories/url/URLRepository.java | 2 +- .../repositories/azure/AzureRepository.java | 15 ++------ .../gcs/GoogleCloudStorageRepository.java | 11 ++---- .../repositories/hdfs/HdfsRepository.java | 9 +---- .../repositories/s3/S3Repository.java | 12 ++----- .../blobstore/BlobStoreRepository.java | 30 +++++++++------- .../repositories/fs/FsRepository.java | 18 ++++------ .../blobstore/BlobStoreRepositoryTests.java | 34 +++++++++++++++++++ 8 files changed, 67 insertions(+), 64 deletions(-) diff --git a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java index 4728e1b0d9eb6..d314ce912ef66 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java @@ -83,7 +83,7 @@ public class URLRepository extends BlobStoreRepository { */ public URLRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry) { - super(metadata, environment.settings(), namedXContentRegistry); + super(metadata, environment.settings(), false, namedXContentRegistry); if (URL_SETTING.exists(metadata.settings()) == false && REPOSITORIES_URL_SETTING.exists(environment.settings()) == false) { throw new RepositoryException(metadata.name(), "missing url"); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 28af721b1fdd6..078e0e698aa51 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -21,7 +21,6 @@ import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -82,16 +81,14 @@ public static final class Repository { private final BlobPath basePath; private final ByteSizeValue chunkSize; - private final boolean compress; private final Environment environment; private final AzureStorageService storageService; private final boolean readonly; public AzureRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, AzureStorageService storageService) { - super(metadata, environment.settings(), namedXContentRegistry); + super(metadata, environment.settings(), Repository.COMPRESS_SETTING.get(metadata.settings()), namedXContentRegistry); this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings()); - this.compress = Repository.COMPRESS_SETTING.get(metadata.settings()); this.environment = environment; this.storageService = storageService; @@ -132,7 +129,7 @@ protected AzureBlobStore createBlobStore() throws URISyntaxException, StorageExc logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", - blobStore, chunkSize, compress, basePath)); + blobStore, chunkSize, isCompress(), basePath)); return blobStore; } @@ -141,14 +138,6 @@ protected BlobPath basePath() { return basePath; } - /** - * {@inheritDoc} - */ - @Override - protected boolean isCompress() { - return compress; - } - /** * {@inheritDoc} */ diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index 8e9e5656f25b4..3192691d84389 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -62,7 +62,6 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { private final Settings settings; private final GoogleCloudStorageService storageService; private final BlobPath basePath; - private final boolean compress; private final ByteSizeValue chunkSize; private final String bucket; private final String clientName; @@ -70,7 +69,7 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { GoogleCloudStorageRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, GoogleCloudStorageService storageService) { - super(metadata, environment.settings(), namedXContentRegistry); + super(metadata, environment.settings(), getSetting(COMPRESS, metadata), namedXContentRegistry); this.settings = environment.settings(); this.storageService = storageService; @@ -85,11 +84,10 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { this.basePath = BlobPath.cleanPath(); } - this.compress = getSetting(COMPRESS, metadata); this.chunkSize = getSetting(CHUNK_SIZE, metadata); this.bucket = getSetting(BUCKET, metadata); this.clientName = CLIENT_NAME.get(metadata.settings()); - logger.debug("using bucket [{}], base_path [{}], chunk_size [{}], compress [{}]", bucket, basePath, chunkSize, compress); + logger.debug("using bucket [{}], base_path [{}], chunk_size [{}], compress [{}]", bucket, basePath, chunkSize, isCompress()); } @Override @@ -102,11 +100,6 @@ protected BlobPath basePath() { return basePath; } - @Override - protected boolean isCompress() { - return compress; - } - @Override protected ByteSizeValue chunkSize() { return chunkSize; diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java index b588f0d002ccc..bba1b0031c85a 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java @@ -58,7 +58,6 @@ public final class HdfsRepository extends BlobStoreRepository { private final Environment environment; private final ByteSizeValue chunkSize; - private final boolean compress; private final BlobPath basePath = BlobPath.cleanPath(); private final URI uri; private final String pathSetting; @@ -69,11 +68,10 @@ public final class HdfsRepository extends BlobStoreRepository { public HdfsRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry) { - super(metadata, environment.settings(), namedXContentRegistry); + super(metadata, environment.settings(), metadata.settings().getAsBoolean("compress", false), namedXContentRegistry); this.environment = environment; this.chunkSize = metadata.settings().getAsBytesSize("chunk_size", null); - this.compress = metadata.settings().getAsBoolean("compress", false); String uriSetting = getMetadata().settings().get("uri"); if (Strings.hasText(uriSetting) == false) { @@ -239,11 +237,6 @@ protected BlobPath basePath() { return basePath; } - @Override - protected boolean isCompress() { - return compress; - } - @Override protected ByteSizeValue chunkSize() { return chunkSize; diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index b1d29d89a59c0..72ce6f8bf1f3e 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -19,8 +19,8 @@ package org.elasticsearch.repositories.s3; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; @@ -155,8 +155,6 @@ class S3Repository extends BlobStoreRepository { private final ByteSizeValue chunkSize; - private final boolean compress; - private final BlobPath basePath; private final boolean serverSideEncryption; @@ -174,7 +172,7 @@ class S3Repository extends BlobStoreRepository { final Settings settings, final NamedXContentRegistry namedXContentRegistry, final S3Service service) { - super(metadata, settings, namedXContentRegistry); + super(metadata, settings, COMPRESS_SETTING.get(metadata.settings()), namedXContentRegistry); this.service = service; this.repositoryMetaData = metadata; @@ -187,7 +185,6 @@ class S3Repository extends BlobStoreRepository { this.bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings()); this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); - this.compress = COMPRESS_SETTING.get(metadata.settings()); // We make sure that chunkSize is bigger or equal than/to bufferSize if (this.chunkSize.getBytes() < bufferSize.getBytes()) { @@ -245,11 +242,6 @@ protected BlobPath basePath() { return basePath; } - @Override - protected boolean isCompress() { - return compress; - } - @Override protected ByteSizeValue chunkSize() { return chunkSize; diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index c8cdf0d4e0308..8858f46a39e82 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -195,6 +195,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private final Settings settings; + private final boolean compress; + private final RateLimiter snapshotRateLimiter; private final RateLimiter restoreRateLimiter; @@ -226,33 +228,37 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp * * @param metadata The metadata for this repository including name and settings * @param settings Settings for the node this repository object is created on + * @param compress true if metadata and snapshot files should be compressed */ - protected BlobStoreRepository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry) { + protected BlobStoreRepository(RepositoryMetaData metadata, Settings settings, boolean compress, + NamedXContentRegistry namedXContentRegistry) { this.settings = settings; + this.compress = compress; this.metadata = metadata; this.namedXContentRegistry = namedXContentRegistry; snapshotRateLimiter = getRateLimiter(metadata.settings(), "max_snapshot_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB)); restoreRateLimiter = getRateLimiter(metadata.settings(), "max_restore_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB)); readOnly = metadata.settings().getAsBoolean("readonly", false); + indexShardSnapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT, - BlobStoreIndexShardSnapshot::fromXContent, namedXContentRegistry, isCompress()); + BlobStoreIndexShardSnapshot::fromXContent, namedXContentRegistry, compress); indexShardSnapshotsFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_INDEX_CODEC, SNAPSHOT_INDEX_NAME_FORMAT, - BlobStoreIndexShardSnapshots::fromXContent, namedXContentRegistry, isCompress()); - ByteSizeValue chunkSize = chunkSize(); - if (chunkSize != null && chunkSize.getBytes() <= 0) { - throw new IllegalArgumentException("the chunk size cannot be negative: [" + chunkSize + "]"); - } + BlobStoreIndexShardSnapshots::fromXContent, namedXContentRegistry, compress); } @Override protected void doStart() { + ByteSizeValue chunkSize = chunkSize(); + if (chunkSize != null && chunkSize.getBytes() <= 0) { + throw new IllegalArgumentException("the chunk size cannot be negative: [" + chunkSize + "]"); + } globalMetaDataFormat = new ChecksumBlobStoreFormat<>(METADATA_CODEC, METADATA_NAME_FORMAT, - MetaData::fromXContent, namedXContentRegistry, isCompress()); + MetaData::fromXContent, namedXContentRegistry, compress); indexMetaDataFormat = new ChecksumBlobStoreFormat<>(INDEX_METADATA_CODEC, METADATA_NAME_FORMAT, - IndexMetaData::fromXContent, namedXContentRegistry, isCompress()); + IndexMetaData::fromXContent, namedXContentRegistry, compress); snapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT, - SnapshotInfo::fromXContentInternal, namedXContentRegistry, isCompress()); + SnapshotInfo::fromXContentInternal, namedXContentRegistry, compress); } @Override @@ -347,8 +353,8 @@ protected BlobStore blobStore() { * * @return true if compression is needed */ - protected boolean isCompress() { - return false; + protected final boolean isCompress() { + return compress; } /** diff --git a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java index 01c08fbce0044..ea438f03bf11e 100644 --- a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java @@ -63,21 +63,19 @@ public class FsRepository extends BlobStoreRepository { new ByteSizeValue(Long.MAX_VALUE), new ByteSizeValue(5), new ByteSizeValue(Long.MAX_VALUE), Property.NodeScope); public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, Property.NodeScope); public static final Setting REPOSITORIES_COMPRESS_SETTING = - Setting.boolSetting("repositories.fs.compress", false, Property.NodeScope); + Setting.boolSetting("repositories.fs.compress", false, Property.NodeScope, Property.Deprecated); private final Environment environment; private ByteSizeValue chunkSize; private final BlobPath basePath; - private boolean compress; - /** * Constructs a shared file system repository. */ public FsRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry) { - super(metadata, environment.settings(), namedXContentRegistry); + super(metadata, environment.settings(), calculateCompress(metadata, environment), namedXContentRegistry); this.environment = environment; String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings()); if (location.isEmpty()) { @@ -105,11 +103,14 @@ public FsRepository(RepositoryMetaData metadata, Environment environment, } else { this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(environment.settings()); } - this.compress = COMPRESS_SETTING.exists(metadata.settings()) - ? COMPRESS_SETTING.get(metadata.settings()) : REPOSITORIES_COMPRESS_SETTING.get(environment.settings()); this.basePath = BlobPath.cleanPath(); } + private static boolean calculateCompress(RepositoryMetaData metadata, Environment environment) { + return COMPRESS_SETTING.exists(metadata.settings()) + ? COMPRESS_SETTING.get(metadata.settings()) : REPOSITORIES_COMPRESS_SETTING.get(environment.settings()); + } + @Override protected BlobStore createBlobStore() throws Exception { final String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings()); @@ -117,11 +118,6 @@ protected BlobStore createBlobStore() throws Exception { return new FsBlobStore(environment.settings(), locationFile); } - @Override - protected boolean isCompress() { - return compress; - } - @Override protected ByteSizeValue chunkSize() { return chunkSize; diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java index 871e5071ec7b7..a09560c54ce43 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -22,8 +22,10 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; @@ -232,6 +234,38 @@ public void testIncompatibleSnapshotsBlobExists() throws Exception { assertEquals(0, repository.getRepositoryData().getIncompatibleSnapshotIds().size()); } + public void testBadChunksize() throws Exception { + final Client client = client(); + final Path location = ESIntegTestCase.randomRepoPath(node().settings()); + final String repositoryName = "test-repo"; + + expectThrows(RepositoryException.class, () -> + client.admin().cluster().preparePutRepository(repositoryName) + .setType(REPO_TYPE) + .setSettings(Settings.builder().put(node().settings()) + .put("location", location) + .put("chunk_size", randomLongBetween(-10, 0), ByteSizeUnit.BYTES)) + .get()); + } + + public void testFsRepositoryCompressDeprecated() { + final Path location = ESIntegTestCase.randomRepoPath(node().settings()); + final Settings settings = Settings.builder().put(node().settings()).put("location", location).build(); + final RepositoryMetaData metaData = new RepositoryMetaData("test-repo", REPO_TYPE, settings); + + Settings useCompressSettings = Settings.builder() + .put(node().getEnvironment().settings()) + .put(FsRepository.REPOSITORIES_COMPRESS_SETTING.getKey(), true) + .build(); + Environment useCompressEnvironment = + new Environment(useCompressSettings, node().getEnvironment().configFile()); + + new FsRepository(metaData, useCompressEnvironment, null); + + assertWarnings("[repositories.fs.compress] setting was deprecated in Elasticsearch and will be removed in a future release!" + + " See the breaking changes documentation for the next major version."); + } + private BlobStoreRepository setupRepo() { final Client client = client(); final Path location = ESIntegTestCase.randomRepoPath(node().settings()); From abefbc0cd9a12b43ca325f3f8901826e28840aa5 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Wed, 20 Feb 2019 09:49:14 +0200 Subject: [PATCH 43/54] Re-enable testTriggeredWatchLoading There is a strong indication that the test was originally failing for the same reason as testLoadExistingWatchesUponStartup. This was fixed in #39092 and the cause is explained in https://github.com/elastic/elasticsearch/pull/39092/files#r257895150 --- .../xpack/watcher/test/integration/BootStrapTests.java | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java index b15b14a186a43..905a461ee8721 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java @@ -221,7 +221,6 @@ public void testMixedTriggeredWatchLoading() throws Exception { assertSingleExecutionAndCompleteWatchHistory(numWatches, numRecords); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29846") public void testTriggeredWatchLoading() throws Exception { createIndex("output"); client().prepareIndex("my-index", "foo", "bar") From a462220297d53f1d791dbc4b9dc63173effba6a3 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 20 Feb 2019 08:55:04 +0000 Subject: [PATCH 44/54] Simplify calculation in AwarenessAllocationDecider (#38091) Today's calculation of the maximum number of shards per attribute is rather convoluted. This commit clarifies that it returns ceil(shardCount/numberOfAttributes). --- .../decider/AwarenessAllocationDecider.java | 26 +++++-------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index 06fc9c327053c..82060efd2d1e1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -136,7 +136,7 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout int shardCount = indexMetaData.getNumberOfReplicas() + 1; // 1 for primary for (String awarenessAttribute : awarenessAttributes) { // the node the shard exists on must be associated with an awareness attribute - if (!node.node().getAttributes().containsKey(awarenessAttribute)) { + if (node.node().getAttributes().containsKey(awarenessAttribute) == false) { return allocation.decision(Decision.NO, NAME, "node does not contain the awareness attribute [%s]; required attributes cluster setting [%s=%s]", awarenessAttribute, CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), @@ -160,7 +160,7 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout if (moveToNode) { if (shardRouting.assignedToNode()) { String nodeId = shardRouting.relocating() ? shardRouting.relocatingNodeId() : shardRouting.currentNodeId(); - if (!node.nodeId().equals(nodeId)) { + if (node.nodeId().equals(nodeId) == false) { // we work on different nodes, move counts around shardPerAttribute.putOrAdd(allocation.routingNodes().node(nodeId).node().getAttributes().get(awarenessAttribute), 0, -1); @@ -175,28 +175,16 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout List fullValues = forcedAwarenessAttributes.get(awarenessAttribute); if (fullValues != null) { for (String fullValue : fullValues) { - if (!shardPerAttribute.containsKey(fullValue)) { + if (shardPerAttribute.containsKey(fullValue) == false) { numberOfAttributes++; } } } // TODO should we remove ones that are not part of full list? - int averagePerAttribute = shardCount / numberOfAttributes; - int totalLeftover = shardCount % numberOfAttributes; - int requiredCountPerAttribute; - if (averagePerAttribute == 0) { - // if we have more attributes values than shard count, no leftover - totalLeftover = 0; - requiredCountPerAttribute = 1; - } else { - requiredCountPerAttribute = averagePerAttribute; - } - int leftoverPerAttribute = totalLeftover == 0 ? 0 : 1; - - int currentNodeCount = shardPerAttribute.get(node.node().getAttributes().get(awarenessAttribute)); - // if we are above with leftover, then we know we are not good, even with mod - if (currentNodeCount > (requiredCountPerAttribute + leftoverPerAttribute)) { + final int currentNodeCount = shardPerAttribute.get(node.node().getAttributes().get(awarenessAttribute)); + final int maximumNodeCount = (shardCount + numberOfAttributes - 1) / numberOfAttributes; // ceil(shardCount/numberOfAttributes) + if (currentNodeCount > maximumNodeCount) { return allocation.decision(Decision.NO, NAME, "there are too many copies of the shard allocated to nodes with attribute [%s], there are [%d] total configured " + "shard copies for this shard id and [%d] total attribute values, expected the allocated shard count per " + @@ -205,7 +193,7 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout shardCount, numberOfAttributes, currentNodeCount, - requiredCountPerAttribute + leftoverPerAttribute); + maximumNodeCount); } } From 38fbf9792bcf4fe66bb3f17589e5fe6d29748d07 Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Wed, 20 Feb 2019 11:23:47 +0200 Subject: [PATCH 45/54] Added "validate.properties" property to JDBC's list of allowed properties. (#39050) This defaults to "true" (current behavior) and will throw an exception if there is a property that cannot be recognized. If "false", it will ignore anything unrecognizable. --- docs/reference/sql/endpoints/jdbc.asciidoc | 14 ++-- .../sql/jdbc/JdbcConfigurationTests.java | 68 ++++++++++++++++++- .../sql/client/ConnectionConfiguration.java | 28 ++++++-- .../xpack/sql/client/HttpClient.java | 2 +- 4 files changed, 99 insertions(+), 13 deletions(-) diff --git a/docs/reference/sql/endpoints/jdbc.asciidoc b/docs/reference/sql/endpoints/jdbc.asciidoc index 17c93a8951758..051c608ea8045 100644 --- a/docs/reference/sql/endpoints/jdbc.asciidoc +++ b/docs/reference/sql/endpoints/jdbc.asciidoc @@ -58,9 +58,9 @@ jdbc:es://<1>[[http|https]://]*<2>[host[:port]]*<3>/[prefix]*<4>[?[option=value] <2> type of HTTP connection to make - `http` (default) or `https`. Optional. <3> host (`localhost` by default) and port (`9200` by default). Optional. <4> prefix (empty by default). Typically used when hosting {es} under a certain path. Optional. -<5> Parameters for the JDBC driver. Empty by default. Optional. +<5> Properties for the JDBC driver. Empty by default. Optional. -The driver recognized the following parameters: +The driver recognized the following properties: [[jdbc-cfg]] [float] @@ -122,6 +122,12 @@ Query timeout (in seconds). That is the maximum amount of time waiting for a que `proxy.socks`:: SOCKS proxy host name +[float] +==== Additional + +`validate.properties` (default true):: If disabled, it will ignore any misspellings or unrecognizable properties. When enabled, an exception +will be thrown if the provided property cannot be recognized. + To put all of it together, the following URL: @@ -161,10 +167,10 @@ HTTP traffic. By default 9200. instance is fine for unsecured Elasticsearch. Which one to use? Typically client applications that provide most -configuration parameters in the URL rely on the `DriverManager`-style +configuration properties in the URL rely on the `DriverManager`-style while `DataSource` is preferred when being _passed_ around since it can be configured in one place and the consumer only has to call `getConnection` -without having to worry about any other parameters. +without having to worry about any other properties. To connect to a secured Elasticsearch server the `Properties` should look like: diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java index dac9dbba61776..7e0aa243ad71a 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java @@ -18,7 +18,11 @@ import java.util.stream.Collectors; import static org.elasticsearch.xpack.sql.client.ConnectionConfiguration.CONNECT_TIMEOUT; +import static org.elasticsearch.xpack.sql.client.ConnectionConfiguration.NETWORK_TIMEOUT; +import static org.elasticsearch.xpack.sql.client.ConnectionConfiguration.PAGE_SIZE; import static org.elasticsearch.xpack.sql.client.ConnectionConfiguration.PAGE_TIMEOUT; +import static org.elasticsearch.xpack.sql.client.ConnectionConfiguration.PROPERTIES_VALIDATION; +import static org.elasticsearch.xpack.sql.client.ConnectionConfiguration.QUERY_TIMEOUT; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -69,7 +73,7 @@ public void testDebugOut() throws Exception { public void testTypeInParam() throws Exception { Exception e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es://a:1/foo/bar/tar?debug=true&debug.out=jdbc.out")); - assertEquals("Unknown parameter [debug.out] ; did you mean [debug.output]", e.getMessage()); + assertEquals("Unknown parameter [debug.out]; did you mean [debug.output]", e.getMessage()); } public void testDebugOutWithSuffix() throws Exception { @@ -113,6 +117,66 @@ public void testHttpWithSSLDisabledFromPropertyAndEnabledFromProtocol() throws E Exception e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es://https://test?ssl=false")); assertEquals("Cannot enable SSL: HTTPS protocol being used in the URL and SSL disabled in properties", e.getMessage()); } + + public void testValidatePropertiesDefault() { + Exception e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es://test:9200?pagee.size=12")); + assertEquals("Unknown parameter [pagee.size]; did you mean [page.size]", e.getMessage()); + + e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es://test:9200?foo=bar")); + assertEquals("Unknown parameter [foo]; did you mean [ssl]", e.getMessage()); + } + + public void testValidateProperties() { + Exception e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es://test:9200?pagee.size=12&validate.properties=true")); + assertEquals("Unknown parameter [pagee.size]; did you mean [page.size]", e.getMessage()); + + e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es://test:9200?&validate.properties=true&something=some_value")); + assertEquals("Unknown parameter [something]; did you mean []", e.getMessage()); + + Properties properties = new Properties(); + properties.setProperty(PROPERTIES_VALIDATION, "true"); + e = expectThrows(JdbcSQLException.class, () -> JdbcConfiguration.create("jdbc:es://test:9200?something=some_value", properties, 0)); + assertEquals("Unknown parameter [something]; did you mean []", e.getMessage()); + } + + public void testNoPropertiesValidation() throws SQLException { + JdbcConfiguration ci = ci("jdbc:es://test:9200?pagee.size=12&validate.properties=false"); + assertEquals(false, ci.validateProperties()); + + // URL properties test + long queryTimeout = randomNonNegativeLong(); + long connectTimeout = randomNonNegativeLong(); + long networkTimeout = randomNonNegativeLong(); + long pageTimeout = randomNonNegativeLong(); + int pageSize = randomIntBetween(0, Integer.MAX_VALUE); + + ci = ci("jdbc:es://test:9200?validate.properties=false&something=some_value&query.timeout=" + queryTimeout + "&connect.timeout=" + + connectTimeout + "&network.timeout=" + networkTimeout + "&page.timeout=" + pageTimeout + "&page.size=" + pageSize); + assertEquals(false, ci.validateProperties()); + assertEquals(queryTimeout, ci.queryTimeout()); + assertEquals(connectTimeout, ci.connectTimeout()); + assertEquals(networkTimeout, ci.networkTimeout()); + assertEquals(pageTimeout, ci.pageTimeout()); + assertEquals(pageSize, ci.pageSize()); + + // Properties test + Properties properties = new Properties(); + properties.setProperty(PROPERTIES_VALIDATION, "false"); + properties.put(QUERY_TIMEOUT, Long.toString(queryTimeout)); + properties.put(PAGE_TIMEOUT, Long.toString(pageTimeout)); + properties.put(CONNECT_TIMEOUT, Long.toString(connectTimeout)); + properties.put(NETWORK_TIMEOUT, Long.toString(networkTimeout)); + properties.put(PAGE_SIZE, Integer.toString(pageSize)); + + // also putting validate.properties in URL to be overriden by the properties value + ci = JdbcConfiguration.create("jdbc:es://test:9200?validate.properties=true&something=some_value", properties, 0); + assertEquals(false, ci.validateProperties()); + assertEquals(queryTimeout, ci.queryTimeout()); + assertEquals(connectTimeout, ci.connectTimeout()); + assertEquals(networkTimeout, ci.networkTimeout()); + assertEquals(pageTimeout, ci.pageTimeout()); + assertEquals(pageSize, ci.pageSize()); + } public void testTimoutOverride() throws Exception { Properties properties = new Properties(); @@ -284,6 +348,6 @@ private void assertJdbcSqlExceptionFromProperties(String wrongSetting, String co private void assertJdbcSqlException(String wrongSetting, String correctSetting, String url, Properties props) { JdbcSQLException ex = expectThrows(JdbcSQLException.class, () -> JdbcConfiguration.create(url, props, 0)); - assertEquals("Unknown parameter [" + wrongSetting + "] ; did you mean [" + correctSetting + "]", ex.getMessage()); + assertEquals("Unknown parameter [" + wrongSetting + "]; did you mean [" + correctSetting + "]", ex.getMessage()); } } diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ConnectionConfiguration.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ConnectionConfiguration.java index 6096f5baf865d..c3c89906c2302 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ConnectionConfiguration.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ConnectionConfiguration.java @@ -28,6 +28,10 @@ * to move away from the loose Strings... */ public class ConnectionConfiguration { + + // Validation + public static final String PROPERTIES_VALIDATION = "validate.properties"; + public static final String PROPERTIES_VALIDATION_DEFAULT = "true"; // Timeouts @@ -59,12 +63,15 @@ public class ConnectionConfiguration { public static final String AUTH_PASS = "password"; protected static final Set OPTION_NAMES = new LinkedHashSet<>( - Arrays.asList(CONNECT_TIMEOUT, NETWORK_TIMEOUT, QUERY_TIMEOUT, PAGE_TIMEOUT, PAGE_SIZE, AUTH_USER, AUTH_PASS)); + Arrays.asList(PROPERTIES_VALIDATION, CONNECT_TIMEOUT, NETWORK_TIMEOUT, QUERY_TIMEOUT, PAGE_TIMEOUT, PAGE_SIZE, + AUTH_USER, AUTH_PASS)); static { OPTION_NAMES.addAll(SslConfig.OPTION_NAMES); OPTION_NAMES.addAll(ProxyConfig.OPTION_NAMES); } + + private final boolean validateProperties; // Base URI for all request private final URI baseURI; @@ -87,7 +94,11 @@ public ConnectionConfiguration(URI baseURI, String connectionString, Properties this.connectionString = connectionString; Properties settings = props != null ? props : new Properties(); - checkPropertyNames(settings, optionNames()); + validateProperties = parseValue(PROPERTIES_VALIDATION, settings.getProperty(PROPERTIES_VALIDATION, PROPERTIES_VALIDATION_DEFAULT), + Boolean::parseBoolean); + if (validateProperties) { + checkPropertyNames(settings, optionNames()); + } connectTimeout = parseValue(CONNECT_TIMEOUT, settings.getProperty(CONNECT_TIMEOUT, CONNECT_TIMEOUT_DEFAULT), Long::parseLong); networkTimeout = parseValue(NETWORK_TIMEOUT, settings.getProperty(NETWORK_TIMEOUT, NETWORK_TIMEOUT_DEFAULT), Long::parseLong); @@ -106,9 +117,10 @@ public ConnectionConfiguration(URI baseURI, String connectionString, Properties this.baseURI = normalizeSchema(baseURI, connectionString, sslConfig.isEnabled()); } - public ConnectionConfiguration(URI baseURI, String connectionString, long connectTimeout, long networkTimeout, long queryTimeout, - long pageTimeout, int pageSize, String user, String pass, SslConfig sslConfig, - ProxyConfig proxyConfig) throws ClientException { + public ConnectionConfiguration(URI baseURI, String connectionString, boolean validateProperties, long connectTimeout, + long networkTimeout, long queryTimeout, long pageTimeout, int pageSize, String user, String pass, + SslConfig sslConfig, ProxyConfig proxyConfig) throws ClientException { + this.validateProperties = validateProperties; this.connectionString = connectionString; this.connectTimeout = connectTimeout; this.networkTimeout = networkTimeout; @@ -161,7 +173,7 @@ private static String isKnownProperty(String propertyName, Collection kn if (knownOptions.contains(propertyName)) { return null; } - return "Unknown parameter [" + propertyName + "] ; did you mean " + StringUtils.findSimilar(propertyName, knownOptions); + return "Unknown parameter [" + propertyName + "]; did you mean " + StringUtils.findSimilar(propertyName, knownOptions); } protected T parseValue(String key, String value, Function parser) { @@ -175,6 +187,10 @@ protected T parseValue(String key, String value, Function parser) protected boolean isSSLEnabled() { return sslConfig.isEnabled(); } + + public boolean validateProperties() { + return validateProperties; + } public SslConfig sslConfig() { return sslConfig; diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java index c3f35aefd65f4..47ad5c4746025 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java @@ -106,7 +106,7 @@ private Response post(String path } private boolean head(String path, long timeoutInMs) throws SQLException { - ConnectionConfiguration pingCfg = new ConnectionConfiguration(cfg.baseUri(), cfg.connectionString(), + ConnectionConfiguration pingCfg = new ConnectionConfiguration(cfg.baseUri(), cfg.connectionString(), cfg.validateProperties(), cfg.connectTimeout(), timeoutInMs, cfg.queryTimeout(), cfg.pageTimeout(), cfg.pageSize(), cfg.authUser(), cfg.authPass(), cfg.sslConfig(), cfg.proxyConfig()); try { From 822a21f29491f295b22dacd04b747781a69ffa61 Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Wed, 20 Feb 2019 11:25:25 +0200 Subject: [PATCH 46/54] SQL: enforce JDBC driver - ES server version parity (#38972) --- x-pack/plugin/sql/jdbc/build.gradle | 1 + .../xpack/sql/jdbc/InfoResponse.java | 13 +++- .../xpack/sql/jdbc/JdbcHttpClient.java | 18 ++++-- .../JdbcConfigurationDataSourceTests.java | 45 ++++++++++++++ .../sql/jdbc/JdbcConfigurationTests.java | 26 +------- .../xpack/sql/jdbc/VersionParityTests.java | 54 +++++++++++++++++ .../xpack/sql/jdbc/WebServerTestCase.java | 60 +++++++++++++++++++ x-pack/plugin/sql/qa/security/roles.yml | 4 ++ .../xpack/sql/qa/security/JdbcSecurityIT.java | 20 +++---- 9 files changed, 201 insertions(+), 40 deletions(-) create mode 100644 x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationDataSourceTests.java create mode 100644 x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionParityTests.java create mode 100644 x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/WebServerTestCase.java diff --git a/x-pack/plugin/sql/jdbc/build.gradle b/x-pack/plugin/sql/jdbc/build.gradle index a077b4ac7ba10..bec79dabb1465 100644 --- a/x-pack/plugin/sql/jdbc/build.gradle +++ b/x-pack/plugin/sql/jdbc/build.gradle @@ -24,6 +24,7 @@ dependencies { compile project(':libs:core') runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" testCompile "org.elasticsearch.test:framework:${version}" + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } dependencyLicenses { diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/InfoResponse.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/InfoResponse.java index a7671d80d0104..aff896d1c21f3 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/InfoResponse.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/InfoResponse.java @@ -12,10 +12,21 @@ class InfoResponse { final String cluster; final int majorVersion; final int minorVersion; + final int revisionVersion; - InfoResponse(String clusterName, byte versionMajor, byte versionMinor) { + InfoResponse(String clusterName, byte versionMajor, byte versionMinor, byte revisionVersion) { this.cluster = clusterName; this.majorVersion = versionMajor; this.minorVersion = versionMinor; + this.revisionVersion = revisionVersion; + } + + @Override + public String toString() { + return cluster + "[" + versionString() + "]"; + } + + public String versionString() { + return majorVersion + "." + minorVersion + "." + revisionVersion; } } \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java index 73713f91231d6..1b0e71736e955 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java @@ -31,7 +31,7 @@ class JdbcHttpClient { private final HttpClient httpClient; private final JdbcConfiguration conCfg; - private InfoResponse serverInfo; + private final InfoResponse serverInfo; /** * The SQLException is the only type of Exception the JDBC API can throw (and that the user expects). @@ -40,6 +40,8 @@ class JdbcHttpClient { JdbcHttpClient(JdbcConfiguration conCfg) throws SQLException { httpClient = new HttpClient(conCfg); this.conCfg = conCfg; + this.serverInfo = fetchServerInfo(); + checkServerVersion(); } boolean ping(long timeoutInMs) throws SQLException { @@ -72,16 +74,22 @@ boolean queryClose(String cursor) throws SQLException { } InfoResponse serverInfo() throws SQLException { - if (serverInfo == null) { - serverInfo = fetchServerInfo(); - } return serverInfo; } private InfoResponse fetchServerInfo() throws SQLException { MainResponse mainResponse = httpClient.serverInfo(); Version version = Version.fromString(mainResponse.getVersion()); - return new InfoResponse(mainResponse.getClusterName(), version.major, version.minor); + return new InfoResponse(mainResponse.getClusterName(), version.major, version.minor, version.revision); + } + + private void checkServerVersion() throws SQLException { + if (serverInfo.majorVersion != Version.CURRENT.major + || serverInfo.minorVersion != Version.CURRENT.minor + || serverInfo.revisionVersion != Version.CURRENT.revision) { + throw new SQLException("This version of the JDBC driver is only compatible with Elasticsearch version " + + Version.CURRENT.toString() + ", attempting to connect to a server version " + serverInfo.versionString()); + } } /** diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationDataSourceTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationDataSourceTests.java new file mode 100644 index 0000000000000..d326c0e624d49 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationDataSourceTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.jdbc; + +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.http.MockResponse; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.sql.SQLException; +import java.util.Map; +import java.util.Properties; +import java.util.stream.Collectors; + +public class JdbcConfigurationDataSourceTests extends WebServerTestCase { + + public void testDataSourceConfigurationWithSSLInURL() throws SQLException, URISyntaxException, IOException { + webServer().enqueue(new MockResponse().setResponseCode(200).addHeader("Content-Type", "application/json").setBody( + XContentHelper.toXContent(createCurrentVersionMainResponse(), XContentType.JSON, false).utf8ToString())); + + Map urlPropMap = JdbcConfigurationTests.sslProperties(); + Properties allProps = new Properties(); + allProps.putAll(urlPropMap); + String sslUrlProps = urlPropMap.entrySet().stream().map(e -> e.getKey() + "=" + e.getValue()).collect(Collectors.joining("&")); + + EsDataSource dataSource = new EsDataSource(); + String address = "jdbc:es://" + webServerAddress() + "/?" + sslUrlProps; + dataSource.setUrl(address); + JdbcConnection connection = null; + + try { + connection = (JdbcConnection) dataSource.getConnection(); + } catch (SQLException sqle) { + fail("Connection creation should have been successful. Error: " + sqle); + } + + assertEquals(address, connection.getURL()); + JdbcConfigurationTests.assertSslConfig(allProps, connection.cfg.sslConfig()); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java index 7e0aa243ad71a..d919a5819329e 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java @@ -266,28 +266,6 @@ public void testDriverConfigurationWithSSLInURL() { } } - public void testDataSourceConfigurationWithSSLInURL() throws SQLException, URISyntaxException { - Map urlPropMap = sslProperties(); - - Properties allProps = new Properties(); - allProps.putAll(urlPropMap); - String sslUrlProps = urlPropMap.entrySet().stream().map(e -> e.getKey() + "=" + e.getValue()).collect(Collectors.joining("&")); - - EsDataSource dataSource = new EsDataSource(); - String address = "jdbc:es://test?" + sslUrlProps; - dataSource.setUrl(address); - JdbcConnection connection = null; - - try { - connection = (JdbcConnection) dataSource.getConnection(); - } catch (SQLException sqle) { - fail("Connection creation should have been successful. Error: " + sqle); - } - - assertEquals(address, connection.getURL()); - assertSslConfig(allProps, connection.cfg.sslConfig()); - } - public void testTyposInSslConfigInUrl(){ assertJdbcSqlExceptionFromUrl("ssl.protocl", "ssl.protocol"); assertJdbcSqlExceptionFromUrl("sssl", "ssl"); @@ -310,7 +288,7 @@ public void testTyposInSslConfigInProperties() { assertJdbcSqlExceptionFromProperties("ssl.ruststore.type", "ssl.truststore.type"); } - private Map sslProperties() { + static Map sslProperties() { Map sslPropertiesMap = new HashMap<>(8); // always using "false" so that the SSLContext doesn't actually start verifying the keystore and trustore // locations, as we don't have file permissions to access them. @@ -326,7 +304,7 @@ private Map sslProperties() { return sslPropertiesMap; } - private void assertSslConfig(Properties allProperties, SslConfig sslConfig) throws URISyntaxException { + static void assertSslConfig(Properties allProperties, SslConfig sslConfig) throws URISyntaxException { // because SslConfig doesn't expose its internal properties (and it shouldn't), // we compare a newly created SslConfig with the one from the JdbcConfiguration with the equals() method SslConfig mockSslConfig = new SslConfig(allProperties, new URI("http://test:9200/")); diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionParityTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionParityTests.java new file mode 100644 index 0000000000000..d4ce531cd5f0f --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionParityTests.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.jdbc; + +import org.elasticsearch.Version; +import org.elasticsearch.action.main.MainResponse; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.http.MockResponse; + +import java.io.IOException; +import java.sql.SQLException; + +/** + * Test class for JDBC-ES server versions checks. + * + * It's using a {@code MockWebServer} to be able to create a response just like the one an ES instance + * would create for a request to "/", where the ES version used is configurable. + */ +public class VersionParityTests extends WebServerTestCase { + + public void testExceptionThrownOnIncompatibleVersions() throws IOException, SQLException { + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.CURRENT)); + prepareRequest(version); + + String url = JdbcConfiguration.URL_PREFIX + webServer().getHostName() + ":" + webServer().getPort(); + SQLException ex = expectThrows(SQLException.class, () -> new JdbcHttpClient(JdbcConfiguration.create(url, null, 0))); + assertEquals("This version of the JDBC driver is only compatible with Elasticsearch version " + + org.elasticsearch.xpack.sql.client.Version.CURRENT.toString() + + ", attempting to connect to a server version " + version.toString(), ex.getMessage()); + } + + public void testNoExceptionThrownForCompatibleVersions() throws IOException { + prepareRequest(null); + + String url = JdbcConfiguration.URL_PREFIX + webServerAddress(); + try { + new JdbcHttpClient(JdbcConfiguration.create(url, null, 0)); + } catch (SQLException sqle) { + fail("JDBC driver version and Elasticsearch server version should be compatible. Error: " + sqle); + } + } + + void prepareRequest(Version version) throws IOException { + MainResponse response = version == null ? createCurrentVersionMainResponse() : createMainResponse(version); + webServer().enqueue(new MockResponse().setResponseCode(200).addHeader("Content-Type", "application/json").setBody( + XContentHelper.toXContent(response, XContentType.JSON, false).utf8ToString())); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/WebServerTestCase.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/WebServerTestCase.java new file mode 100644 index 0000000000000..50f27f9ecf39a --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/WebServerTestCase.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.jdbc; + +import org.elasticsearch.Build; +import org.elasticsearch.Version; +import org.elasticsearch.action.main.MainResponse; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockWebServer; +import org.junit.After; +import org.junit.Before; + +import java.util.Date; + +/** + * Base class for unit tests that need a web server for basic tests. + */ +public abstract class WebServerTestCase extends ESTestCase { + + private MockWebServer webServer = new MockWebServer(); + + @Before + public void init() throws Exception { + webServer.start(); + } + + @After + public void cleanup() { + webServer.close(); + } + + public MockWebServer webServer() { + return webServer; + } + + MainResponse createCurrentVersionMainResponse() { + return createMainResponse(Version.CURRENT); + } + + MainResponse createMainResponse(Version version) { + String clusterUuid = randomAlphaOfLength(10); + ClusterName clusterName = new ClusterName(randomAlphaOfLength(10)); + String nodeName = randomAlphaOfLength(10); + final String date = new Date(randomNonNegativeLong()).toString(); + Build build = new Build( + Build.Flavor.UNKNOWN, Build.Type.UNKNOWN, randomAlphaOfLength(8), date, randomBoolean(), + version.toString() + ); + return new MainResponse(nodeName, version, clusterName, clusterUuid , build); + } + + String webServerAddress() { + return webServer.getHostName() + ":" + webServer.getPort(); + } +} diff --git a/x-pack/plugin/sql/qa/security/roles.yml b/x-pack/plugin/sql/qa/security/roles.yml index 337d7c7f9c7c1..141314e23f024 100644 --- a/x-pack/plugin/sql/qa/security/roles.yml +++ b/x-pack/plugin/sql/qa/security/roles.yml @@ -18,6 +18,10 @@ cli_or_drivers_minimal: privileges: [read, "indices:admin/get"] # end::cli_drivers +read_nothing: + cluster: + - "cluster:monitor/main" + read_something_else: cluster: - "cluster:monitor/main" diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java index d47b06289a8b3..c56f3b23946e7 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java @@ -230,16 +230,16 @@ public void expectUnknownColumn(String user, String sql, String column) throws E @Override public void checkNoMonitorMain(String user) throws Exception { - // Most SQL actually works fine without monitor/main - expectMatchesAdmin("SELECT * FROM test", user, "SELECT * FROM test"); - expectMatchesAdmin("SHOW TABLES LIKE 'test'", user, "SHOW TABLES LIKE 'test'"); - expectMatchesAdmin("DESCRIBE test", user, "DESCRIBE test"); - - // But there are a few things that don't work - try (Connection es = es(userProperties(user))) { - expectUnauthorized("cluster:monitor/main", user, () -> es.getMetaData().getDatabaseMajorVersion()); - expectUnauthorized("cluster:monitor/main", user, () -> es.getMetaData().getDatabaseMinorVersion()); - } + // Without monitor/main the JDBC driver - ES server version comparison doesn't take place, which fails everything else + expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user))); + expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user)).getMetaData().getDatabaseMajorVersion()); + expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user)).getMetaData().getDatabaseMinorVersion()); + expectUnauthorized("cluster:monitor/main", user, + () -> es(userProperties(user)).createStatement().executeQuery("SELECT * FROM test")); + expectUnauthorized("cluster:monitor/main", user, + () -> es(userProperties(user)).createStatement().executeQuery("SHOW TABLES LIKE 'test'")); + expectUnauthorized("cluster:monitor/main", user, + () -> es(userProperties(user)).createStatement().executeQuery("DESCRIBE test")); } private void expectUnauthorized(String action, String user, ThrowingRunnable r) { From 29caf477b6e824b5864fd5e0a3c4df75aa013b46 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 20 Feb 2019 11:32:53 +0100 Subject: [PATCH 47/54] Reenable test in `indices.put_mapping/20_mix_typeless_typeful.yml`. (#39056) This test had been disabled because of test failures, but it only affected the 6.x branch. The fix for 6.x is at #39054. On master/7.x/7.0 we can reenable the test as-is. --- .../test/indices.put_mapping/20_mix_typeless_typeful.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml index 7c6136d273979..13cb3321841cf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml @@ -55,8 +55,8 @@ "PUT mapping with _doc on an index that has types": - skip: - version: "all" - reason: include_type_name is only supported as of 6.7 # AwaitsFix: https://github.com/elastic/elasticsearch/issues/38202 + version: " - 6.6.99" + reason: include_type_name is only supported as of 6.7 - do: From cb044312e9121b664e4ee88cddeae1f1f8c41d5f Mon Sep 17 00:00:00 2001 From: Darren Meiss Date: Wed, 20 Feb 2019 04:38:26 -0500 Subject: [PATCH 48/54] Edits to text in Index API doc (#39010) --- docs/reference/docs/index_.asciidoc | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index 4c687ac0cd23c..18b3589d5defc 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -39,11 +39,11 @@ The result of the above index operation is: -------------------------------------------------- // TESTRESPONSE[s/"successful" : 2/"successful" : 1/] -The `_shards` header provides information about the replication process of the index operation. +The `_shards` header provides information about the replication process of the index operation: -* `total` - Indicates to how many shard copies (primary and replica shards) the index operation should be executed on. -* `successful`- Indicates the number of shard copies the index operation succeeded on. -* `failed` - An array that contains replication related errors in the case an index operation failed on a replica shard. +`total`:: Indicates how many shard copies (primary and replica shards) the index operation should be executed on. +`successful`:: Indicates the number of shard copies the index operation succeeded on. +`failed`:: An array that contains replication-related errors in the case an index operation failed on a replica shard. The index operation is successful in the case `successful` is at least 1. @@ -299,16 +299,16 @@ Control when the changes made by this request are visible to search. See [[index-noop]] === Noop Updates -When updating a document using the index api a new version of the document is +When updating a document using the index API a new version of the document is always created even if the document hasn't changed. If this isn't acceptable -use the `_update` api with `detect_noop` set to true. This option isn't -available on the index api because the index api doesn't fetch the old source +use the `_update` API with `detect_noop` set to true. This option isn't +available on the index API because the index API doesn't fetch the old source and isn't able to compare it against the new source. There isn't a hard and fast rule about when noop updates aren't acceptable. It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second -Elasticsearch runs on the shard with receiving the updates. +Elasticsearch runs on the shard receiving the updates. [float] [[timeout]] @@ -343,7 +343,7 @@ internal versioning is used that starts at 1 and increments with each update, deletes included. Optionally, the version number can be set to an external value (for example, if maintained in a database). To enable this functionality, `version_type` should be set to -`external`. The value provided must be a numeric, long value greater or equal to 0, +`external`. The value provided must be a numeric, long value greater than or equal to 0, and less than around 9.2e+18. When using the external version type, the system checks to see if @@ -363,7 +363,7 @@ PUT twitter/_doc/1?version=2&version_type=external // CONSOLE // TEST[continued] -*NOTE:* versioning is completely real time, and is not affected by the +*NOTE:* Versioning is completely real time, and is not affected by the near real time aspects of search operations. If no version is provided, then the operation is executed without any version checks. @@ -387,15 +387,15 @@ Next to the `external` version type explained above, Elasticsearch also supports other types for specific use cases. Here is an overview of the different version types and their semantics. -`internal`:: only index the document if the given version is identical to the version +`internal`:: Only index the document if the given version is identical to the version of the stored document. -`external` or `external_gt`:: only index the document if the given version is strictly higher +`external` or `external_gt`:: Only index the document if the given version is strictly higher than the version of the stored document *or* if there is no existing document. The given version will be used as the new version and will be stored with the new document. The supplied version must be a non-negative long number. -`external_gte`:: only index the document if the given version is *equal* or higher +`external_gte`:: Only index the document if the given version is *equal* or higher than the version of the stored document. If there is no existing document the operation will succeed as well. The given version will be used as the new version and will be stored with the new document. The supplied version must be a non-negative long number. From 9b8a1665fff772bf82c36062d446a216f3b0b7ac Mon Sep 17 00:00:00 2001 From: Darren Meiss Date: Wed, 20 Feb 2019 04:39:00 -0500 Subject: [PATCH 49/54] Edits to text in Delete By Query API doc (#39017) --- docs/reference/docs/delete-by-query.asciidoc | 54 ++++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc index 307d762abe79e..f8cb84ab790fa 100644 --- a/docs/reference/docs/delete-by-query.asciidoc +++ b/docs/reference/docs/delete-by-query.asciidoc @@ -2,7 +2,7 @@ == Delete By Query API The simplest usage of `_delete_by_query` just performs a deletion on every -document that match a query. Here is the API: +document that matches a query. Here is the API: [source,js] -------------------------------------------------- @@ -20,7 +20,7 @@ POST twitter/_delete_by_query <1> The query must be passed as a value to the `query` key, in the same way as the <>. You can also use the `q` -parameter in the same way as the search api. +parameter in the same way as the search API. That will return something like this: @@ -68,7 +68,7 @@ these documents. In case a search or bulk request got rejected, `_delete_by_quer failures that are returned by the failing bulk request are returned in the `failures` element; therefore it's possible for there to be quite a few failed entities. -If you'd like to count version conflicts rather than cause them to abort then +If you'd like to count version conflicts rather than cause them to abort, then set `conflicts=proceed` on the url or `"conflicts": "proceed"` in the request body. Back to the API format, this will delete tweets from the `twitter` index: @@ -140,14 +140,14 @@ POST twitter/_delete_by_query?scroll_size=5000 [float] === URL Parameters -In addition to the standard parameters like `pretty`, the Delete By Query API -also supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout` +In addition to the standard parameters like `pretty`, the delete by query API +also supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout`, and `scroll`. Sending the `refresh` will refresh all shards involved in the delete by query -once the request completes. This is different than the Delete API's `refresh` +once the request completes. This is different than the delete API's `refresh` parameter which causes just the shard that received the delete request -to be refreshed. Also unlike the Delete API it does not support `wait_for`. +to be refreshed. Also unlike the delete API it does not support `wait_for`. If the request contains `wait_for_completion=false` then Elasticsearch will perform some preflight checks, launch the request, and then return a `task` @@ -163,10 +163,10 @@ for details. `timeout` controls how long each write request waits for unavailabl shards to become available. Both work exactly how they work in the <>. As `_delete_by_query` uses scroll search, you can also specify the `scroll` parameter to control how long it keeps the "search context" alive, -eg `?scroll=10m`, by default it's 5 minutes. +e.g. `?scroll=10m`. By default it's 5 minutes. `requests_per_second` can be set to any positive decimal number (`1.4`, `6`, -`1000`, etc) and throttles rate at which `_delete_by_query` issues batches of +`1000`, etc.) and throttles the rate at which delete by query issues batches of delete operations by padding each batch with a wait time. The throttling can be disabled by setting `requests_per_second` to `-1`. @@ -182,7 +182,7 @@ target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds -------------------------------------------------- -Since the batch is issued as a single `_bulk` request large batch sizes will +Since the batch is issued as a single `_bulk` request, large batch sizes will cause Elasticsearch to create many requests and then wait for a while before starting the next set. This is "bursty" instead of "smooth". The default is `-1`. @@ -259,13 +259,13 @@ The number of version conflicts that the delete by query hit. `noops`:: This field is always equal to zero for delete by query. It only exists -so that delete by query, update by query and reindex APIs return responses +so that delete by query, update by query, and reindex APIs return responses with the same structure. `retries`:: The number of retries attempted by delete by query. `bulk` is the number -of bulk actions retried and `search` is the number of search actions retried. +of bulk actions retried, and `search` is the number of search actions retried. `throttled_millis`:: @@ -286,7 +286,7 @@ executed again in order to conform to `requests_per_second`. Array of failures if there were any unrecoverable errors during the process. If this is non-empty then the request aborted because of those failures. -Delete-by-query is implemented using batches and any failure causes the entire +Delete by query is implemented using batches, and any failure causes the entire process to abort but all failures in the current batch are collected into the array. You can use the `conflicts` option to prevent reindex from aborting on version conflicts. @@ -296,7 +296,7 @@ version conflicts. [[docs-delete-by-query-task-api]] === Works with the Task API -You can fetch the status of any running delete-by-query requests with the +You can fetch the status of any running delete by query requests with the <>: [source,js] @@ -306,7 +306,7 @@ GET _tasks?detailed=true&actions=*/delete/byquery // CONSOLE // TEST[skip:No tasks to retrieve] -The responses looks like: +The response looks like: [source,js] -------------------------------------------------- @@ -346,7 +346,7 @@ The responses looks like: } -------------------------------------------------- // TESTRESPONSE -<1> this object contains the actual status. It is just like the response json +<1> This object contains the actual status. It is just like the response JSON with the important addition of the `total` field. `total` is the total number of operations that the reindex expects to perform. You can estimate the progress by adding the `updated`, `created`, and `deleted` fields. The request @@ -373,7 +373,7 @@ you to delete that document. [[docs-delete-by-query-cancel-task-api]] === Works with the Cancel Task API -Any Delete By Query can be canceled using the <>: +Any delete by query can be canceled using the <>: [source,js] -------------------------------------------------- @@ -403,26 +403,26 @@ POST _delete_by_query/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_seco The task ID can be found using the <>. -Just like when setting it on the `_delete_by_query` API `requests_per_second` +Just like when setting it on the delete by query API, `requests_per_second` can be either `-1` to disable throttling or any decimal number like `1.7` or `12` to throttle to that level. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query will -take effect on after completing the current batch. This prevents scroll +take effect after completing the current batch. This prevents scroll timeouts. [float] [[docs-delete-by-query-slice]] === Slicing -Delete-by-query supports <> to parallelize the deleting process. +Delete by query supports <> to parallelize the deleting process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. [float] [[docs-delete-by-query-manual-slice]] -==== Manually slicing +==== Manual slicing -Slice a delete-by-query manually by providing a slice id and total number of +Slice a delete by query manually by providing a slice id and total number of slices to each request: [source,js] @@ -498,7 +498,7 @@ Which results in a sensible `total` like this one: ==== Automatic slicing You can also let delete-by-query automatically parallelize using -<> to slice on `_id`. Use `slices` to specify the number of +<> to slice on `_id`. Use `slices` to specify the number of slices to use: [source,js] @@ -575,8 +575,8 @@ be larger than others. Expect larger slices to have a more even distribution. are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that the using `size` with `slices` might not result in exactly `size` documents being -`_delete_by_query`ed. -* Each sub-requests gets a slightly different snapshot of the source index +deleted. +* Each sub-request gets a slightly different snapshot of the source index though these are all taken at approximately the same time. [float] @@ -588,8 +588,8 @@ number for most indices. If you're slicing manually or otherwise tuning automatic slicing, use these guidelines. Query performance is most efficient when the number of `slices` is equal to the -number of shards in the index. If that number is large, (for example, -500) choose a lower number as too many `slices` will hurt performance. Setting +number of shards in the index. If that number is large (for example, +500), choose a lower number as too many `slices` will hurt performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. From 73ccf819aa1ee69c18d062dc249f7cb5503de6db Mon Sep 17 00:00:00 2001 From: Darren Meiss Date: Wed, 20 Feb 2019 04:39:35 -0500 Subject: [PATCH 50/54] Edits to text in Update API doc (#39069) --- docs/reference/docs/update.asciidoc | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 64c0f67bc722c..66e5783e7eb9b 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -4,7 +4,7 @@ The update API allows to update a document based on a script provided. The operation gets the document (collocated with the shard) from the index, runs the script (with optional script language and parameters), -and index back the result (also allows to delete, or ignore the +and indexes back the result (also allows to delete, or ignore the operation). It uses versioning to make sure no updates have happened during the "get" and "reindex". @@ -46,8 +46,8 @@ POST test/_update/1 // CONSOLE // TEST[continued] -We can add a tag to the list of tags (note, if the tag exists, it -will still add it, since it's a list): +We can add a tag to the list of tags (if the tag exists, it + still gets added, since this is a list): [source,js] -------------------------------------------------- @@ -88,7 +88,7 @@ POST test/_update/1 // TEST[continued] In addition to `_source`, the following variables are available through -the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing` +the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). We can also add a new field to the document: @@ -116,7 +116,7 @@ POST test/_update/1 // TEST[continued] And, we can even change the operation that is executed. This example deletes -the doc if the `tags` field contain `green`, otherwise it does nothing +the doc if the `tags` field contains `green`, otherwise it does nothing (`noop`): [source,js] @@ -138,7 +138,7 @@ POST test/_update/1 [float] === Updates with a partial document -The update API also support passing a partial document, +The update API also supports passing a partial document, which will be merged into the existing document (simple recursive merge, inner merging of objects, replacing core "keys/values" and arrays). To fully replace the existing document, the <> should @@ -165,7 +165,7 @@ to put your field pairs of the partial document in the script itself. === Detecting noop updates If `doc` is specified its value is merged with the existing `_source`. -By default updates that don't change anything detect that they don't change anything and return "result": "noop" like this: +By default updates that don't change anything detect that they don't change anything and return `"result": "noop"` like this: [source,js] -------------------------------------------------- @@ -200,7 +200,7 @@ the request was ignored. -------------------------------------------------- // TESTRESPONSE -You can disable this behavior by setting "detect_noop": false like this: +You can disable this behavior by setting `"detect_noop": false` like this: [source,js] -------------------------------------------------- @@ -323,18 +323,18 @@ See <> for details. `refresh`:: Control when the changes made by this request are visible to search. See -<>. +<>. `_source`:: Allows to control if and how the updated source should be returned in the response. By default the updated source is not returned. -See <> for details. +See <> for details. `version`:: -The update API uses the Elasticsearch's versioning support internally to make +The update API uses the Elasticsearch versioning support internally to make sure the document doesn't change during the update. You can use the `version` parameter to specify that the document should only be updated if its version matches the one specified. @@ -343,7 +343,7 @@ matches the one specified. .The update API does not support versioning other than internal ===================================================== -External (version types `external` & `external_gte`) or forced (version type `force`) +External (version types `external` and `external_gte`) or forced (version type `force`) versioning is not supported by the update API as it would result in Elasticsearch version numbers being out of sync with the external system. Use the <> instead. From f14235c94ffd0622941078b6a426d3c161b63ca2 Mon Sep 17 00:00:00 2001 From: Darren Meiss Date: Wed, 20 Feb 2019 04:40:16 -0500 Subject: [PATCH 51/54] Edits to text in Update By Query API doc (#39078) --- docs/reference/docs/update-by-query.asciidoc | 59 ++++++++++---------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index a01bd30e4280a..883f6ad2a29e3 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -39,9 +39,9 @@ That will return something like this: // TESTRESPONSE[s/"took" : 147/"took" : "$body.took"/] `_update_by_query` gets a snapshot of the index when it starts and indexes what -it finds using `internal` versioning. That means that you'll get a version +it finds using `internal` versioning. That means you'll get a version conflict if the document changes between the time when the snapshot was taken -and when the index request is processed. When the versions match the document +and when the index request is processed. When the versions match, the document is updated and the version number is incremented. NOTE: Since `internal` versioning does not support the value 0 as a valid @@ -55,10 +55,10 @@ aborted. While the first failure causes the abort, all failures that are returned by the failing bulk request are returned in the `failures` element; therefore it's possible for there to be quite a few failed entities. -If you want to simply count version conflicts not cause the `_update_by_query` -to abort you can set `conflicts=proceed` on the url or `"conflicts": "proceed"` +If you want to simply count version conflicts, and not cause the `_update_by_query` +to abort, you can set `conflicts=proceed` on the url or `"conflicts": "proceed"` in the request body. The first example does this because it is just trying to -pick up an online mapping change and a version conflict simply means that the +pick up an online mapping change, and a version conflict simply means that the conflicting document was updated between the start of the `_update_by_query` and the time when it attempted to update the document. This is fine because that update will have picked up the online mapping update. @@ -92,7 +92,7 @@ POST twitter/_update_by_query?conflicts=proceed <1> The query must be passed as a value to the `query` key, in the same way as the <>. You can also use the `q` -parameter in the same way as the search api. +parameter in the same way as the search API. So far we've only been updating documents without changing their source. That is genuinely useful for things like @@ -121,7 +121,7 @@ POST twitter/_update_by_query Just as in <> you can set `ctx.op` to change the operation that is executed: - +[horizontal] `noop`:: Set `ctx.op = "noop"` if your script decides that it doesn't have to make any @@ -199,12 +199,12 @@ POST twitter/_update_by_query?pipeline=set-foo === URL Parameters In addition to the standard parameters like `pretty`, the Update By Query API -also supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout` +also supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout`, and `scroll`. Sending the `refresh` will update all shards in the index being updated when the request completes. This is different than the Update API's `refresh` -parameter which causes just the shard that received the new data to be indexed. +parameter, which causes just the shard that received the new data to be indexed. Also unlike the Update API it does not support `wait_for`. If the request contains `wait_for_completion=false` then Elasticsearch will @@ -219,12 +219,12 @@ Elasticsearch can reclaim the space it uses. before proceeding with the request. See <> for details. `timeout` controls how long each write request waits for unavailable shards to become available. Both work exactly how they work in the -<>. As `_update_by_query` uses scroll search, you can also specify +<>. Because `_update_by_query` uses scroll search, you can also specify the `scroll` parameter to control how long it keeps the "search context" alive, -eg `?scroll=10m`, by default it's 5 minutes. +e.g. `?scroll=10m`. By default it's 5 minutes. `requests_per_second` can be set to any positive decimal number (`1.4`, `6`, -`1000`, etc) and throttles rate at which `_update_by_query` issues batches of +`1000`, etc.) and throttles the rate at which `_update_by_query` issues batches of index operations by padding each batch with a wait time. The throttling can be disabled by setting `requests_per_second` to `-1`. @@ -240,7 +240,7 @@ target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds -------------------------------------------------- -Since the batch is issued as a single `_bulk` request large batch sizes will +Since the batch is issued as a single `_bulk` request, large batch sizes will cause Elasticsearch to create many requests and then wait for a while before starting the next set. This is "bursty" instead of "smooth". The default is `-1`. @@ -283,6 +283,7 @@ The JSON response looks like this: -------------------------------------------------- // TESTRESPONSE[s/"took" : 147/"took" : "$body.took"/] +[horizontal] `took`:: The number of milliseconds from start to end of the whole operation. @@ -319,8 +320,8 @@ the update by query returned a `noop` value for `ctx.op`. `retries`:: -The number of retries attempted by update-by-query. `bulk` is the number of bulk -actions retried and `search` is the number of search actions retried. +The number of retries attempted by update by query. `bulk` is the number of bulk +actions retried, and `search` is the number of search actions retried. `throttled_millis`:: @@ -341,8 +342,8 @@ executed again in order to conform to `requests_per_second`. Array of failures if there were any unrecoverable errors during the process. If this is non-empty then the request aborted because of those failures. -Update-by-query is implemented using batches and any failure causes the entire -process to abort but all failures in the current batch are collected into the +Update by query is implemented using batches. Any failure causes the entire +process to abort, but all failures in the current batch are collected into the array. You can use the `conflicts` option to prevent reindex from aborting on version conflicts. @@ -352,7 +353,7 @@ version conflicts. [[docs-update-by-query-task-api]] === Works with the Task API -You can fetch the status of all running update-by-query requests with the +You can fetch the status of all running update by query requests with the <>: [source,js] @@ -406,7 +407,7 @@ The responses looks like: -------------------------------------------------- // TESTRESPONSE -<1> this object contains the actual status. It is just like the response json +<1> This object contains the actual status. It is just like the response JSON with the important addition of the `total` field. `total` is the total number of operations that the reindex expects to perform. You can estimate the progress by adding the `updated`, `created`, and `deleted` fields. The request @@ -424,7 +425,7 @@ GET /_tasks/r1A2WoRbTwKZ516z6NEs5A:36619 The advantage of this API is that it integrates with `wait_for_completion=false` to transparently return the status of completed tasks. If the task is completed -and `wait_for_completion=false` was set on it them it'll come back with a +and `wait_for_completion=false` was set on it, then it'll come back with a `results` or an `error` field. The cost of this feature is the document that `wait_for_completion=false` creates at `.tasks/task/${taskId}`. It is up to you to delete that document. @@ -434,7 +435,7 @@ you to delete that document. [[docs-update-by-query-cancel-task-api]] === Works with the Cancel Task API -Any Update By Query can be canceled using the <>: +Any update by query can be cancelled using the <>: [source,js] -------------------------------------------------- @@ -464,25 +465,25 @@ POST _update_by_query/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_seco The task ID can be found using the <>. -Just like when setting it on the `_update_by_query` API `requests_per_second` +Just like when setting it on the `_update_by_query` API, `requests_per_second` can be either `-1` to disable throttling or any decimal number like `1.7` or `12` to throttle to that level. Rethrottling that speeds up the -query takes effect immediately but rethrotting that slows down the query will -take effect on after completing the current batch. This prevents scroll +query takes effect immediately, but rethrotting that slows down the query will +take effect after completing the current batch. This prevents scroll timeouts. [float] [[docs-update-by-query-slice]] === Slicing -Update-by-query supports <> to parallelize the updating process. +Update by query supports <> to parallelize the updating process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. [float] [[docs-update-by-query-manual-slice]] ==== Manual slicing -Slice an update-by-query manually by providing a slice id and total number of +Slice an update by query manually by providing a slice id and total number of slices to each request: [source,js] @@ -540,7 +541,7 @@ Which results in a sensible `total` like this one: [[docs-update-by-query-automatic-slice]] ==== Automatic slicing -You can also let update-by-query automatically parallelize using +You can also let update by query automatically parallelize using <> to slice on `_id`. Use `slices` to specify the number of slices to use: @@ -605,8 +606,8 @@ be larger than others. Expect larger slices to have a more even distribution. are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that the using `size` with `slices` might not result in exactly `size` documents being -`_update_by_query`ed. -* Each sub-requests gets a slightly different snapshot of the source index +updated. +* Each sub-request gets a slightly different snapshot of the source index though these are all taken at approximately the same time. [float] From 34b4262302e9ed50f0e1c12a1f685f29fb3bc843 Mon Sep 17 00:00:00 2001 From: Darren Meiss Date: Wed, 20 Feb 2019 04:40:47 -0500 Subject: [PATCH 52/54] Edits to text in Multi Get API doc (#39082) --- docs/reference/docs/multi-get.asciidoc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/reference/docs/multi-get.asciidoc b/docs/reference/docs/multi-get.asciidoc index 6d50a6a643a89..fc8cc667999c1 100644 --- a/docs/reference/docs/multi-get.asciidoc +++ b/docs/reference/docs/multi-get.asciidoc @@ -1,7 +1,7 @@ [[docs-multi-get]] == Multi Get API -Multi GET API allows to get multiple documents based on an index, type +Multi Get API allows to get multiple documents based on an index, type, (optional) and id (and possibly routing). The response includes a `docs` array with all the fetched documents in order corresponding to the original multi-get request (if there was a failure for a specific get, an object containing this @@ -89,7 +89,7 @@ GET /test/_doc/_mget By default, the `_source` field will be returned for every document (if stored). Similar to the <> API, you can retrieve only parts of the `_source` (or not at all) by using the `_source` parameter. You can also use -the url parameters `_source`,`_source_includes` & `_source_excludes` to specify defaults, +the url parameters `_source`, `_source_includes`, and `_source_excludes` to specify defaults, which will be used when there are no per-document instructions. For example: @@ -181,7 +181,7 @@ GET /test/_doc/_mget?stored_fields=field1,field2 [[mget-routing]] === Routing -You can also specify routing value as a parameter: +You can also specify a routing value as a parameter: [source,js] -------------------------------------------------- @@ -204,11 +204,11 @@ GET /_mget?routing=key1 -------------------------------------------------- // CONSOLE -In this example, document `test/_doc/2` will be fetch from shard corresponding to routing key `key1` but -document `test/_doc/1` will be fetch from shard corresponding to routing key `key2`. +In this example, document `test/_doc/2` will be fetched from the shard corresponding to routing key `key1` but +document `test/_doc/1` will be fetched from the shard corresponding to routing key `key2`. [float] [[mget-security]] === Security -See <> +See <>. From ff87932c212266c4af3081efa3e99448e49f71b8 Mon Sep 17 00:00:00 2001 From: Darren Meiss Date: Wed, 20 Feb 2019 04:41:17 -0500 Subject: [PATCH 53/54] Edits to text in Bulk API doc (#39083) --- docs/reference/docs/bulk.asciidoc | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index 89d53e6aa59f9..382d644023bb7 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -36,11 +36,11 @@ optional_source\n -------------------------------------------------- // NOTCONSOLE -*NOTE*: the final line of data must end with a newline character `\n`. Each newline character +*NOTE*: The final line of data must end with a newline character `\n`. Each newline character may be preceded by a carriage return `\r`. When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. -The possible actions are `index`, `create`, `delete` and `update`. +The possible actions are `index`, `create`, `delete`, and `update`. `index` and `create` expect a source on the next line, and have the same semantics as the `op_type` parameter to the standard index API (i.e. create will fail if a document with the same @@ -214,7 +214,7 @@ documents. See <> for more details. Each bulk item can include the version value using the `version` field. It automatically follows the behavior of the index / delete operation based on the `_version` mapping. It also -support the `version_type` (see <>) +support the `version_type` (see <>). [float] [[bulk-routing]] @@ -245,20 +245,20 @@ NOTE: Only the shards that receive the bulk request will be affected by `refresh`. Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. The request will only wait for those three shards to -refresh. The other two shards of that make up the index do not +refresh. The other two shards that make up the index do not participate in the `_bulk` request at all. [float] [[bulk-update]] === Update -When using `update` action `retry_on_conflict` can be used as field in +When using the `update` action, `retry_on_conflict` can be used as a field in the action itself (not in the extra payload line), to specify how many times an update should be retried in the case of a version conflict. -The `update` action payload, supports the following options: `doc` +The `update` action payload supports the following options: `doc` (partial document), `upsert`, `doc_as_upsert`, `script`, `params` (for -script), `lang` (for script) and `_source`. See update documentation for details on +script), `lang` (for script), and `_source`. See update documentation for details on the options. Example with update actions: [source,js] @@ -282,4 +282,4 @@ POST _bulk [[bulk-security]] === Security -See <> +See <>. From 12006ea29a0a168af95a510d8efdb454a6465c93 Mon Sep 17 00:00:00 2001 From: Darren Meiss Date: Wed, 20 Feb 2019 04:35:17 -0500 Subject: [PATCH 54/54] Fix a typo in the cat shards API docs (#38536) --- docs/reference/cat/shards.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/cat/shards.asciidoc b/docs/reference/cat/shards.asciidoc index f63e37c6a3d69..f32c553ba72f6 100644 --- a/docs/reference/cat/shards.asciidoc +++ b/docs/reference/cat/shards.asciidoc @@ -56,7 +56,7 @@ twitter 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA [[relocation]] === Relocation -Let's say you've checked your health and you see a relocating +Let's say you've checked your health and you see relocating shards. Where are they from and where are they going? [source,js]