From 0f8fa846a2822c4293df32fed18c9b99660b39ff Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 22 Apr 2016 16:10:30 -0400 Subject: [PATCH 01/86] Create TransportReplicatedMutationAction It is the superclass of replication actions that mutate data: index, delete, and shardBulk. shardFlush and shardRefresh are replication actions but they do not extend TransportReplicatedMutationAction because they don't change the data, only shuffle it around. --- .../action/bulk/TransportShardBulkAction.java | 4 +- .../action/delete/TransportDeleteAction.java | 4 +- .../action/index/TransportIndexAction.java | 4 +- .../TransportReplicatedWriteAction.java | 66 +++++++++++++++++++ .../TransportReplicationAction.java | 14 ---- 5 files changed, 72 insertions(+), 20 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedWriteAction.java diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index a2f642374b74e..778e2451227ea 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -30,7 +30,7 @@ import org.elasticsearch.action.index.TransportIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.ReplicationRequest; -import org.elasticsearch.action.support.replication.TransportReplicationAction; +import org.elasticsearch.action.support.replication.TransportReplicatedWriteAction; import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; @@ -67,7 +67,7 @@ /** * Performs the index operation. */ -public class TransportShardBulkAction extends TransportReplicationAction { +public class TransportShardBulkAction extends TransportReplicatedWriteAction { private final static String OP_TYPE_UPDATE = "update"; private final static String OP_TYPE_DELETE = "delete"; diff --git a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 62d46766c4724..8bdc59b402e6f 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; -import org.elasticsearch.action.support.replication.TransportReplicationAction; +import org.elasticsearch.action.support.replication.TransportReplicatedWriteAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -49,7 +49,7 @@ /** * Performs the delete operation. */ -public class TransportDeleteAction extends TransportReplicationAction { +public class TransportDeleteAction extends TransportReplicatedWriteAction { private final AutoCreateIndex autoCreateIndex; private final TransportCreateIndexAction createIndexAction; diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 3915e23c2edab..b2e1ff09669e1 100644 --- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.replication.ReplicationOperation; -import org.elasticsearch.action.support.replication.TransportReplicationAction; +import org.elasticsearch.action.support.replication.TransportReplicatedWriteAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; @@ -62,7 +62,7 @@ *
  • allowIdGeneration: If the id is set not, should it be generated. Defaults to true. * */ -public class TransportIndexAction extends TransportReplicationAction { +public class TransportIndexAction extends TransportReplicatedWriteAction { private final AutoCreateIndex autoCreateIndex; private final boolean allowIdGeneration; diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedWriteAction.java new file mode 100644 index 0000000000000..084fb6ccb8fbd --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedWriteAction.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.replication; + +import org.elasticsearch.action.ReplicationResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.function.Supplier; + +public abstract class TransportReplicatedWriteAction< + Request extends ReplicationRequest, + ReplicaRequest extends ReplicationRequest, + Response extends ReplicationResponse + > extends TransportReplicationAction { + + protected TransportReplicatedWriteAction(Settings settings, String actionName, TransportService transportService, + ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, + Supplier replicaRequest, String executor) { + super(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, + indexNameExpressionResolver, request, replicaRequest, executor); + } + + + + protected final void processAfterWrite(boolean refresh, IndexShard indexShard, Translog.Location location) { + if (refresh) { + try { + indexShard.refresh("refresh_flag_index"); + } catch (Throwable e) { + // ignore + } + } + if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null) { + indexShard.sync(location); + } + indexShard.maybeFlush(); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 2cc647ddd5a4e..fa7d4da6fade3 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -804,20 +804,6 @@ public void onFailure(Throwable shardFailedError) { } } - protected final void processAfterWrite(boolean refresh, IndexShard indexShard, Translog.Location location) { - if (refresh) { - try { - indexShard.refresh("refresh_flag_index"); - } catch (Throwable e) { - // ignore - } - } - if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null) { - indexShard.sync(location); - } - indexShard.maybeFlush(); - } - /** * Sets the current phase on the task if it isn't null. Pulled into its own * method because its more convenient that way. From 8bdc415fedaaa9f2d0c555590a13ec4699a7c3f7 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 22 Apr 2016 16:23:28 -0400 Subject: [PATCH 02/86] Create ReplicatedMutationRequest Superclass for index, delete, and bulkShard requests. --- .../action/bulk/BulkShardRequest.java | 16 +--- .../action/bulk/TransportShardBulkAction.java | 4 +- .../action/delete/DeleteRequest.java | 21 +----- .../action/delete/TransportDeleteAction.java | 4 +- .../action/index/IndexRequest.java | 21 +----- .../action/index/TransportIndexAction.java | 4 +- .../ReplicatedMutationRequest.java | 73 +++++++++++++++++++ .../replication/ReplicationRequest.java | 1 - ...=> TransportReplicatedMutationAction.java} | 10 +-- 9 files changed, 92 insertions(+), 62 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java rename core/src/main/java/org/elasticsearch/action/support/replication/{TransportReplicatedWriteAction.java => TransportReplicatedMutationAction.java} (90%) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index 874789e8d61f4..6513f3a46fd3e 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.support.replication.ReplicationRequest; +import org.elasticsearch.action.support.replication.ReplicatedMutationRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; @@ -31,23 +31,17 @@ /** * */ -public class BulkShardRequest extends ReplicationRequest { +public class BulkShardRequest extends ReplicatedMutationRequest { private BulkItemRequest[] items; - private boolean refresh; - public BulkShardRequest() { } BulkShardRequest(BulkRequest bulkRequest, ShardId shardId, boolean refresh, BulkItemRequest[] items) { super(shardId); this.items = items; - this.refresh = refresh; - } - - boolean refresh() { - return this.refresh; + this.refresh(refresh); } BulkItemRequest[] items() { @@ -77,7 +71,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } } - out.writeBoolean(refresh); } @Override @@ -89,7 +82,6 @@ public void readFrom(StreamInput in) throws IOException { items[i] = BulkItemRequest.readBulkItem(in); } } - refresh = in.readBoolean(); } @Override @@ -97,7 +89,7 @@ public String toString() { // This is included in error messages so we'll try to make it somewhat user friendly. StringBuilder b = new StringBuilder("BulkShardRequest to ["); b.append(index).append("] containing [").append(items.length).append("] requests"); - if (refresh) { + if (refresh()) { b.append(" and a refresh"); } return b.toString(); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 778e2451227ea..82393444bb6a6 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -30,7 +30,7 @@ import org.elasticsearch.action.index.TransportIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.ReplicationRequest; -import org.elasticsearch.action.support.replication.TransportReplicatedWriteAction; +import org.elasticsearch.action.support.replication.TransportReplicatedMutationAction; import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; @@ -67,7 +67,7 @@ /** * Performs the index operation. */ -public class TransportShardBulkAction extends TransportReplicatedWriteAction { +public class TransportShardBulkAction extends TransportReplicatedMutationAction { private final static String OP_TYPE_UPDATE = "update"; private final static String OP_TYPE_DELETE = "delete"; diff --git a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java index cbd10553522f1..7fe997073d85c 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java +++ b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -21,7 +21,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocumentRequest; -import org.elasticsearch.action.support.replication.ReplicationRequest; +import org.elasticsearch.action.support.replication.ReplicatedMutationRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -43,7 +43,7 @@ * @see org.elasticsearch.client.Client#delete(DeleteRequest) * @see org.elasticsearch.client.Requests#deleteRequest(String) */ -public class DeleteRequest extends ReplicationRequest implements DocumentRequest { +public class DeleteRequest extends ReplicatedMutationRequest implements DocumentRequest { private String type; private String id; @@ -51,7 +51,6 @@ public class DeleteRequest extends ReplicationRequest implements private String routing; @Nullable private String parent; - private boolean refresh; private long version = Versions.MATCH_ANY; private VersionType versionType = VersionType.INTERNAL; @@ -165,20 +164,6 @@ public String routing() { return this.routing; } - /** - * Should a refresh be executed post this index operation causing the operation to - * be searchable. Note, heavy indexing should not set this to true. Defaults - * to false. - */ - public DeleteRequest refresh(boolean refresh) { - this.refresh = refresh; - return this; - } - - public boolean refresh() { - return this.refresh; - } - /** * Sets the version, which will cause the delete operation to only be performed if a matching * version exists and no changes happened on the doc since then. @@ -208,7 +193,6 @@ public void readFrom(StreamInput in) throws IOException { id = in.readString(); routing = in.readOptionalString(); parent = in.readOptionalString(); - refresh = in.readBoolean(); version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); } @@ -220,7 +204,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(id); out.writeOptionalString(routing()); out.writeOptionalString(parent()); - out.writeBoolean(refresh); out.writeLong(version); out.writeByte(versionType.getValue()); } diff --git a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 8bdc59b402e6f..9d8747424bbbd 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; -import org.elasticsearch.action.support.replication.TransportReplicatedWriteAction; +import org.elasticsearch.action.support.replication.TransportReplicatedMutationAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -49,7 +49,7 @@ /** * Performs the delete operation. */ -public class TransportDeleteAction extends TransportReplicatedWriteAction { +public class TransportDeleteAction extends TransportReplicatedMutationAction { private final AutoCreateIndex autoCreateIndex; private final TransportCreateIndexAction createIndexAction; diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 31accebc3b2ad..ffd71eacd0b6a 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.TimestampParsingException; -import org.elasticsearch.action.support.replication.ReplicationRequest; +import org.elasticsearch.action.support.replication.ReplicatedMutationRequest; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -67,7 +67,7 @@ * @see org.elasticsearch.client.Requests#indexRequest(String) * @see org.elasticsearch.client.Client#index(IndexRequest) */ -public class IndexRequest extends ReplicationRequest implements DocumentRequest { +public class IndexRequest extends ReplicatedMutationRequest implements DocumentRequest { /** * Operation type controls if the type of the index operation. @@ -145,7 +145,6 @@ public static OpType fromString(String sOpType) { private OpType opType = OpType.INDEX; - private boolean refresh = false; private long version = Versions.MATCH_ANY; private VersionType versionType = VersionType.INTERNAL; @@ -542,20 +541,6 @@ public OpType opType() { return this.opType; } - /** - * Should a refresh be executed post this index operation causing the operation to - * be searchable. Note, heavy indexing should not set this to true. Defaults - * to false. - */ - public IndexRequest refresh(boolean refresh) { - this.refresh = refresh; - return this; - } - - public boolean refresh() { - return this.refresh; - } - /** * Sets the version, which will cause the index operation to only be performed if a matching * version exists and no changes happened on the doc since then. @@ -652,7 +637,6 @@ public void readFrom(StreamInput in) throws IOException { source = in.readBytesReference(); opType = OpType.fromId(in.readByte()); - refresh = in.readBoolean(); version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); pipeline = in.readOptionalString(); @@ -674,7 +658,6 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeBytesReference(source); out.writeByte(opType.id()); - out.writeBoolean(refresh); out.writeLong(version); out.writeByte(versionType.getValue()); out.writeOptionalString(pipeline); diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index b2e1ff09669e1..dd04f58bc0dd0 100644 --- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.replication.ReplicationOperation; -import org.elasticsearch.action.support.replication.TransportReplicatedWriteAction; +import org.elasticsearch.action.support.replication.TransportReplicatedMutationAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; @@ -62,7 +62,7 @@ *
  • allowIdGeneration: If the id is set not, should it be generated. Defaults to true. * */ -public class TransportIndexAction extends TransportReplicatedWriteAction { +public class TransportIndexAction extends TransportReplicatedMutationAction { private final AutoCreateIndex autoCreateIndex; private final boolean allowIdGeneration; diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java new file mode 100644 index 0000000000000..68952b90a4ebc --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.replication; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; + +import java.io.IOException; + +/** + * Base class for requests that modify data in some shard like delete, index, and shardBulk. + */ +public class ReplicatedMutationRequest> extends ReplicationRequest { + private boolean refresh; + + /** + * Create an empty request. + */ + public ReplicatedMutationRequest() { + } + + /** + * Creates a new request with resolved shard id. + */ + public ReplicatedMutationRequest(ShardId shardId) { + super(shardId); + } + + /** + * Should a refresh be executed post this index operation causing the operation to + * be searchable. Note, heavy indexing should not set this to true. Defaults + * to false. + */ + @SuppressWarnings("unchecked") + public R refresh(boolean refresh) { + this.refresh = refresh; + return (R) this; + } + + public boolean refresh() { + return this.refresh; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + refresh = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(refresh); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java index 3e88575b71789..5bd858234ff6a 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java @@ -65,7 +65,6 @@ public ReplicationRequest() { } - /** * Creates a new request with resolved shard id */ diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java similarity index 90% rename from core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedWriteAction.java rename to core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java index 084fb6ccb8fbd..70357220dbdd8 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; @@ -34,13 +33,16 @@ import java.util.function.Supplier; -public abstract class TransportReplicatedWriteAction< +/** + * Base class for transport actions that modify data in some shard like index, delete, and shardBulk. + */ +public abstract class TransportReplicatedMutationAction< Request extends ReplicationRequest, ReplicaRequest extends ReplicationRequest, Response extends ReplicationResponse > extends TransportReplicationAction { - protected TransportReplicatedWriteAction(Settings settings, String actionName, TransportService transportService, + protected TransportReplicatedMutationAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, Supplier replicaRequest, String executor) { @@ -48,8 +50,6 @@ protected TransportReplicatedWriteAction(Settings settings, String actionName, T indexNameExpressionResolver, request, replicaRequest, executor); } - - protected final void processAfterWrite(boolean refresh, IndexShard indexShard, Translog.Location location) { if (refresh) { try { From 0642083676702618f900fa842c08802a04c1a53e Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 22 Apr 2016 16:32:29 -0400 Subject: [PATCH 03/86] Factor out common code from shardOperationOnReplica --- .../action/bulk/TransportShardBulkAction.java | 10 +++------ .../action/delete/TransportDeleteAction.java | 8 +++---- .../action/index/TransportIndexAction.java | 9 +++----- .../TransportReplicatedMutationAction.java | 22 +++++++++++++++++-- 4 files changed, 29 insertions(+), 20 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 82393444bb6a6..ddd254f1c7821 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -53,6 +53,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; @@ -431,12 +432,8 @@ private UpdateResult shardUpdateOperation(IndexMetaData metaData, BulkShardReque } } - @Override - protected void shardOperationOnReplica(BulkShardRequest request) { - final ShardId shardId = request.shardId(); - IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - IndexShard indexShard = indexService.getShard(shardId.id()); + protected Location onReplicaShard(BulkShardRequest request, IndexShard indexShard) { Translog.Location location = null; for (int i = 0; i < request.items().length; i++) { BulkItemRequest item = request.items()[i]; @@ -472,8 +469,7 @@ protected void shardOperationOnReplica(BulkShardRequest request) { throw new IllegalStateException("Unexpected index operation: " + item.request()); } } - - processAfterWrite(request.refresh(), indexShard, location); + return location; } private void applyVersion(BulkItemRequest item, long version, VersionType versionType) { diff --git a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 9d8747424bbbd..059f1ca7420da 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -40,6 +40,7 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.tasks.Task; @@ -146,11 +147,8 @@ public static Engine.Delete executeDeleteRequestOnReplica(DeleteRequest request, } @Override - protected void shardOperationOnReplica(DeleteRequest request) { - final ShardId shardId = request.shardId(); - IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id()); - Engine.Delete delete = executeDeleteRequestOnReplica(request, indexShard); - processAfterWrite(request.refresh(), indexShard, delete.getTranslogLocation()); + protected Location onReplicaShard(DeleteRequest request, IndexShard indexShard) { + return executeDeleteRequestOnReplica(request, indexShard).getTranslogLocation(); } } diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index dd04f58bc0dd0..19a0a748261a7 100644 --- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -46,6 +46,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.tasks.Task; @@ -155,12 +156,8 @@ protected Tuple shardOperationOnPrimary(IndexReques } @Override - protected void shardOperationOnReplica(IndexRequest request) { - final ShardId shardId = request.shardId(); - IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - IndexShard indexShard = indexService.getShard(shardId.id()); - final Engine.Index operation = executeIndexRequestOnReplica(request, indexShard); - processAfterWrite(request.refresh(), indexShard, operation.getTranslogLocation()); + protected Location onReplicaShard(IndexRequest request, IndexShard indexShard) { + return executeIndexRequestOnReplica(request, indexShard).getTranslogLocation(); } /** diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java index 70357220dbdd8..5d99b392e1101 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java @@ -25,7 +25,9 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; @@ -37,8 +39,8 @@ * Base class for transport actions that modify data in some shard like index, delete, and shardBulk. */ public abstract class TransportReplicatedMutationAction< - Request extends ReplicationRequest, - ReplicaRequest extends ReplicationRequest, + Request extends ReplicatedMutationRequest, + ReplicaRequest extends ReplicatedMutationRequest, Response extends ReplicationResponse > extends TransportReplicationAction { @@ -50,6 +52,22 @@ protected TransportReplicatedMutationAction(Settings settings, String actionName indexNameExpressionResolver, request, replicaRequest, executor); } + /** + * Called on each replica node with a reference to the shard to modify. + * + * @return the translog location after the modification + */ + protected abstract Translog.Location onReplicaShard(ReplicaRequest request, IndexShard indexShard); + + @Override + protected final void shardOperationOnReplica(ReplicaRequest request) { + final ShardId shardId = request.shardId(); + IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + IndexShard indexShard = indexService.getShard(shardId.id()); + Translog.Location location = onReplicaShard(request, indexShard); + processAfterWrite(request.refresh(), indexShard, location); + } + protected final void processAfterWrite(boolean refresh, IndexShard indexShard, Translog.Location location) { if (refresh) { try { From 80119b9a26ede96a865af45904c3ac69d5b19b59 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 22 Apr 2016 16:51:53 -0400 Subject: [PATCH 04/86] Factor out common code in shardOperationOnPrimary --- .../action/bulk/TransportShardBulkAction.java | 15 +++----- .../action/delete/TransportDeleteAction.java | 27 ++++++--------- .../action/index/TransportIndexAction.java | 20 +++-------- .../TransportReplicatedMutationAction.java | 34 ++++++++++++++----- 4 files changed, 47 insertions(+), 49 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index ddd254f1c7821..56a5dfa9a14bd 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -68,7 +68,7 @@ /** * Performs the index operation. */ -public class TransportShardBulkAction extends TransportReplicatedMutationAction { +public class TransportShardBulkAction extends TransportReplicatedMutationAction { private final static String OP_TYPE_UPDATE = "update"; private final static String OP_TYPE_DELETE = "delete"; @@ -84,9 +84,8 @@ public TransportShardBulkAction(Settings settings, TransportService transportSer IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, MappingUpdatedAction mappingUpdatedAction, UpdateHelper updateHelper, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, - actionFilters, indexNameExpressionResolver, - BulkShardRequest::new, BulkShardRequest::new, ThreadPool.Names.BULK); + super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, + indexNameExpressionResolver, BulkShardRequest::new, ThreadPool.Names.BULK); this.updateHelper = updateHelper; this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true); this.mappingUpdatedAction = mappingUpdatedAction; @@ -108,10 +107,7 @@ protected boolean resolveIndex() { } @Override - protected Tuple shardOperationOnPrimary(BulkShardRequest request) { - ShardId shardId = request.shardId(); - final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - final IndexShard indexShard = indexService.getShard(shardId.getId()); + protected WriteResult onPrimaryShard(IndexService indexService, IndexShard indexShard, BulkShardRequest request) { final IndexMetaData metaData = indexService.getIndexSettings().getIndexMetaData(); long[] preVersions = new long[request.items().length]; @@ -122,13 +118,12 @@ protected Tuple shardOperationOnPrimary(Bul location = handleItem(metaData, request, indexShard, preVersions, preVersionTypes, location, requestIndex, item); } - processAfterWrite(request.refresh(), indexShard, location); BulkItemResponse[] responses = new BulkItemResponse[request.items().length]; BulkItemRequest[] items = request.items(); for (int i = 0; i < items.length; i++) { responses[i] = items[i].getPrimaryResponse(); } - return new Tuple<>(new BulkShardResponse(request.shardId(), responses), request); + return new WriteResult<>(new BulkShardResponse(request.shardId(), responses), location); } private Translog.Location handleItem(IndexMetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) { diff --git a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 059f1ca7420da..89ded46c0158e 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -34,9 +34,9 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; @@ -50,7 +50,7 @@ /** * Performs the delete operation. */ -public class TransportDeleteAction extends TransportReplicatedMutationAction { +public class TransportDeleteAction extends TransportReplicatedMutationAction { private final AutoCreateIndex autoCreateIndex; private final TransportCreateIndexAction createIndexAction; @@ -61,9 +61,8 @@ public TransportDeleteAction(Settings settings, TransportService transportServic TransportCreateIndexAction createIndexAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, AutoCreateIndex autoCreateIndex) { - super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, - actionFilters, indexNameExpressionResolver, - DeleteRequest::new, DeleteRequest::new, ThreadPool.Names.INDEX); + super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, + indexNameExpressionResolver, DeleteRequest::new, ThreadPool.Names.INDEX); this.createIndexAction = createIndexAction; this.autoCreateIndex = autoCreateIndex; } @@ -120,11 +119,13 @@ protected DeleteResponse newResponseInstance() { } @Override - protected Tuple shardOperationOnPrimary(DeleteRequest request) { - IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); - final WriteResult result = executeDeleteRequestOnPrimary(request, indexShard); - processAfterWrite(request.refresh(), indexShard, result.location); - return new Tuple<>(result.response, request); + protected WriteResult onPrimaryShard(IndexService indexService, IndexShard indexShard, DeleteRequest request) { + return executeDeleteRequestOnPrimary(request, indexShard); + } + + @Override + protected Location onReplicaShard(DeleteRequest request, IndexShard indexShard) { + return executeDeleteRequestOnReplica(request, indexShard).getTranslogLocation(); } public static WriteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard indexShard) { @@ -145,10 +146,4 @@ public static Engine.Delete executeDeleteRequestOnReplica(DeleteRequest request, indexShard.delete(delete); return delete; } - - @Override - protected Location onReplicaShard(DeleteRequest request, IndexShard indexShard) { - return executeDeleteRequestOnReplica(request, indexShard).getTranslogLocation(); - } - } diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 19a0a748261a7..ddd48eb58ae04 100644 --- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -36,7 +36,6 @@ import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; @@ -45,7 +44,6 @@ import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; @@ -63,7 +61,7 @@ *
  • allowIdGeneration: If the id is set not, should it be generated. Defaults to true. * */ -public class TransportIndexAction extends TransportReplicatedMutationAction { +public class TransportIndexAction extends TransportReplicatedMutationAction { private final AutoCreateIndex autoCreateIndex; private final boolean allowIdGeneration; @@ -79,7 +77,7 @@ public TransportIndexAction(Settings settings, TransportService transportService ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, AutoCreateIndex autoCreateIndex) { super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, - actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.INDEX); + actionFilters, indexNameExpressionResolver, IndexRequest::new, ThreadPool.Names.INDEX); this.mappingUpdatedAction = mappingUpdatedAction; this.createIndexAction = createIndexAction; this.autoCreateIndex = autoCreateIndex; @@ -142,17 +140,9 @@ protected IndexResponse newResponseInstance() { } @Override - protected Tuple shardOperationOnPrimary(IndexRequest request) throws Exception { - - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(request.shardId().id()); - - final WriteResult result = executeIndexRequestOnPrimary(request, indexShard, mappingUpdatedAction); - - final IndexResponse response = result.response; - final Translog.Location location = result.location; - processAfterWrite(request.refresh(), indexShard, location); - return new Tuple<>(response, request); + protected WriteResult onPrimaryShard(IndexService indexService, IndexShard indexShard, IndexRequest request) + throws Exception { + return executeIndexRequestOnPrimary(request, indexShard, mappingUpdatedAction); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java index 5d99b392e1101..541b1d78d8640 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; @@ -40,27 +41,44 @@ */ public abstract class TransportReplicatedMutationAction< Request extends ReplicatedMutationRequest, - ReplicaRequest extends ReplicatedMutationRequest, Response extends ReplicationResponse - > extends TransportReplicationAction { + > extends TransportReplicationAction { protected TransportReplicatedMutationAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, - Supplier replicaRequest, String executor) { + String executor) { super(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, - indexNameExpressionResolver, request, replicaRequest, executor); + indexNameExpressionResolver, request, request, executor); } /** - * Called on each replica node with a reference to the shard to modify. + * Called with a reference to the primary shard. * - * @return the translog location after the modification + * @return the result of the write - basically just the response to send back and the translog location of the {@linkplain IndexShard} + * after the write was completed */ - protected abstract Translog.Location onReplicaShard(ReplicaRequest request, IndexShard indexShard); + protected abstract WriteResult onPrimaryShard(IndexService indexService, IndexShard indexShard, Request request) + throws Exception; + + /** + * Called once per replica with a reference to the {@linkplain IndexShard} to modify. + * + * @return the translog location of the {@linkplain IndexShard} after the write was completed + */ + protected abstract Translog.Location onReplicaShard(Request request, IndexShard indexShard); + + @Override + protected Tuple shardOperationOnPrimary(Request request) throws Exception { + IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); + IndexShard indexShard = indexService.getShard(request.shardId().id()); + WriteResult response = onPrimaryShard(indexService, indexShard, request); + processAfterWrite(request.refresh(), indexShard, response.location); + return new Tuple<>(response.response, request); + } @Override - protected final void shardOperationOnReplica(ReplicaRequest request) { + protected final void shardOperationOnReplica(Request request) { final ShardId shardId = request.shardId(); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.getShard(shardId.id()); From 0fc045b56e1e02a48c30383ac50a281d5af7e0b6 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 22 Apr 2016 17:30:06 -0400 Subject: [PATCH 05/86] Make performOnPrimary asyncS Instead of returning Tuple it returns ReplicaRequest and takes a ActionListener as an argument. We call the listener immediately to preserve backwards compatibility for now. --- .../replication/ReplicationOperation.java | 49 +++++++++++++------ .../TransportReplicationAction.java | 5 +- .../ReplicationOperationTests.java | 11 +++-- .../TransportReplicationActionTests.java | 16 +++++- 4 files changed, 57 insertions(+), 24 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 1f7313c19434b..56c44740a9054 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -85,28 +85,26 @@ public class ReplicationOperation, R void execute() throws Exception { final String writeConsistencyFailure = checkWriteConsistency ? checkWriteConsistency() : null; - final ShardId shardId = primary.routingEntry().shardId(); + ShardRouting primaryRouting = primary.routingEntry(); + ShardId primaryId = primaryRouting.shardId(); if (writeConsistencyFailure != null) { - finishAsFailed(new UnavailableShardsException(shardId, + finishAsFailed(new UnavailableShardsException(primaryId, "{} Timeout: [{}], request: [{}]", writeConsistencyFailure, request.timeout(), request)); return; } totalShards.incrementAndGet(); pendingShards.incrementAndGet(); // increase by 1 until we finish all primary coordination - Tuple primaryResponse = primary.perform(request); - successfulShards.incrementAndGet(); // mark primary as successful - finalResponse = primaryResponse.v1(); - ReplicaRequest replicaRequest = primaryResponse.v2(); + ReplicaRequest replicaRequest = performOnPrimary(primary.routingEntry(), request); assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term"; if (logger.isTraceEnabled()) { - logger.trace("[{}] op [{}] completed on primary for request [{}]", shardId, opType, request); + logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request); } // we have to get a new state after successfully indexing into the primary in order to honour recovery semantics. // we have to make sure that every operation indexed into the primary after recovery start will also be replicated // to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then. // If the index gets deleted after primary operation, we skip replication - List shards = getShards(shardId, clusterStateSupplier.get()); + List shards = getShards(primaryId, clusterStateSupplier.get()); final String localNodeId = primary.routingEntry().currentNodeId(); for (final ShardRouting shard : shards) { if (executeOnReplicas == false || shard.unassigned()) { @@ -124,9 +122,28 @@ void execute() throws Exception { performOnReplica(shard.buildTargetRelocatingShard(), replicaRequest); } } + } + + private ReplicaRequest performOnPrimary(final ShardRouting primaryRouting, Request request) throws Exception { + return primary.perform(request, new ActionListener() { + @Override + public void onResponse(Response response) { + finalResponse = response; + successfulShards.incrementAndGet(); + // decrement pending and finish (if there are no replicas, or those are done) + decPendingAndFinishIfNeeded(); + } - // decrement pending and finish (if there are no replicas, or those are done) - decPendingAndFinishIfNeeded(); // incremented in the beginning of this method + @Override + public void onFailure(Throwable primaryException) { + RestStatus restStatus = ExceptionsHelper.status(primaryException); + shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure(primaryRouting.shardId(), primaryRouting.currentNodeId(), + primaryException, restStatus, false)); + String message = String.format(Locale.ROOT, "failed to perform %s on primary %s", opType, primaryRouting); + logger.warn("[{}] {}", primaryException, primaryRouting.shardId(), message); + decPendingAndFinishIfNeeded(); + } + }); } private void performOnReplica(final ShardRouting shard, final ReplicaRequest replicaRequest) { @@ -294,13 +311,15 @@ interface Primary, ReplicaRequest ex void failShard(String message, Throwable throwable); /** - * Performs the given request on this primary + * Performs the given request on this primary. Yes, this returns as soon as it can with the request for the replicas and calls a + * listener when the primary request is completed. Yes, the primary request might complete before the method returns. Yes, it might + * also complete after. Deal with it. * - * @return A tuple containing not null values, as first value the result of the primary operation and as second value - * the request to be executed on the replica shards. + * @param request the request to perform + * @param listener for the request to be completed. + * @return the request to send to the repicas */ - Tuple perform(Request request) throws Exception; - + ReplicaRequest perform(Request request, ActionListener listener) throws Exception; } interface Replicas> { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index fa7d4da6fade3..506529c18bfb6 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -750,10 +750,11 @@ public void failShard(String reason, Throwable e) { } @Override - public Tuple perform(Request request) throws Exception { + public ReplicaRequest perform(Request request, ActionListener listener) throws Exception { Tuple result = shardOperationOnPrimary(request); result.v2().primaryTerm(indexShard.getPrimaryTerm()); - return result; + listener.onResponse(result.v1()); + return result.v2(); } @Override diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index cc7558d1de815..c7e65ae4e0ae8 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -233,11 +233,11 @@ private void testClusterStateChangeAfterPrimaryOperation(final ShardId shardId, final ShardRouting primaryShard = state.get().routingTable().shardRoutingTable(shardId).primaryShard(); final TestPrimary primary = new TestPrimary(primaryShard, primaryTerm) { @Override - public Tuple perform(Request request) throws Exception { - final Tuple tuple = super.perform(request); + public Request perform(Request request, ActionListener listener) throws Exception { + Request replicaRequest = super.perform(request, listener); state.set(changedState); logger.debug("--> state after primary operation:\n{}", state.get().prettyPrint()); - return tuple; + return replicaRequest; } }; @@ -385,12 +385,13 @@ public void failShard(String message, Throwable throwable) { } @Override - public Tuple perform(Request request) throws Exception { + public Request perform(Request request, ActionListener listener) throws Exception { if (request.processedOnPrimary.compareAndSet(false, true) == false) { fail("processed [" + request + "] twice"); } request.primaryTerm(term); - return new Tuple<>(new Response(), request); + listener.onResponse(new Response()); + return request; } } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index a10ce35ca4123..3eeecf2c06e7f 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -63,6 +63,7 @@ import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponse.Empty; import org.elasticsearch.transport.TransportResponseOptions; import org.elasticsearch.transport.TransportService; import org.hamcrest.Matcher; @@ -478,9 +479,20 @@ public void testPrimaryReference() throws Exception { }; Action.PrimaryShardReference primary = action.new PrimaryShardReference(shard, releasable); final Request request = new Request(); - Tuple result = primary.perform(request); + Request replicaRequest = primary.perform(request, new ActionListener() { + @Override + public void onResponse(Response response) { + // Ok, nothing to do + } + + @Override + public void onFailure(Throwable e) { + // Currently can't even be called. + throw new RuntimeException(e); + } + }); - assertThat(result.v2().primaryTerm(), equalTo(primaryTerm)); + assertThat(replicaRequest.primaryTerm(), equalTo(primaryTerm)); final ElasticsearchException exception = new ElasticsearchException("testing"); primary.failShard("test", exception); From b42b8da968d42cc7414020c7b199606a5dcce50a Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 22 Apr 2016 17:45:40 -0400 Subject: [PATCH 06/86] Don't finish early if the primary finishes early We use a "fake" pending shard that we resolve when the replicas have all started. --- .../action/support/replication/ReplicationOperation.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 56c44740a9054..9748930f19aa7 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -94,7 +94,7 @@ void execute() throws Exception { } totalShards.incrementAndGet(); - pendingShards.incrementAndGet(); // increase by 1 until we finish all primary coordination + pendingShards.addAndGet(2); // increase by 2 - one for the primary shard and one for the coordination of replicas ReplicaRequest replicaRequest = performOnPrimary(primary.routingEntry(), request); assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term"; if (logger.isTraceEnabled()) { @@ -122,6 +122,8 @@ void execute() throws Exception { performOnReplica(shard.buildTargetRelocatingShard(), replicaRequest); } } + // Decrement for the replica coordination + decPendingAndFinishIfNeeded(); } private ReplicaRequest performOnPrimary(final ShardRouting primaryRouting, Request request) throws Exception { @@ -130,7 +132,7 @@ private ReplicaRequest performOnPrimary(final ShardRouting primaryRouting, Reque public void onResponse(Response response) { finalResponse = response; successfulShards.incrementAndGet(); - // decrement pending and finish (if there are no replicas, or those are done) + // Decrement for the primary decPendingAndFinishIfNeeded(); } From 34b378943b8185451acf6350f661c0ad33b5836d Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 22 Apr 2016 17:48:47 -0400 Subject: [PATCH 07/86] Doc --- .../support/replication/ReplicationOperation.java | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 9748930f19aa7..5c365284ed077 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.index.engine.VersionConflictEngineException; @@ -54,6 +53,16 @@ public class ReplicationOperation, R final private Supplier clusterStateSupplier; final private String opType; final private AtomicInteger totalShards = new AtomicInteger(); + /** + * The number of pending sub-operations in this operation. This is incremented when the following operations start and decremented when + * they complete: + *
      + *
    • The operation on the primary
    • + *
    • The operation on each replica
    • + *
    • Coordination of the operation as a whole. This prevents the operation from terminating early if we haven't started any replica + * operations and the primary finishes.
    • + *
    + */ final private AtomicInteger pendingShards = new AtomicInteger(); final private AtomicInteger successfulShards = new AtomicInteger(); final private boolean executeOnReplicas; From 3d22b2d7ceb473db339259452a7c4f117ce86069 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 22 Apr 2016 17:59:55 -0400 Subject: [PATCH 08/86] Push the listener into shardOperationOnPrimary --- .../flush/TransportShardFlushAction.java | 6 ++-- .../refresh/TransportShardRefreshAction.java | 8 +++-- .../TransportReplicatedMutationAction.java | 28 ++++++++++++--- .../TransportReplicationAction.java | 35 ++++--------------- .../TransportReplicationActionTests.java | 5 +-- 5 files changed, 43 insertions(+), 39 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index 7e750b9767790..d80a046318529 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.indices.flush; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.TransportReplicationAction; @@ -55,11 +56,12 @@ protected ReplicationResponse newResponseInstance() { } @Override - protected Tuple shardOperationOnPrimary(ShardFlushRequest shardRequest) { + protected ShardFlushRequest shardOperationOnPrimary(ShardFlushRequest shardRequest, ActionListener listener) { IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id()); indexShard.flush(shardRequest.getRequest()); logger.trace("{} flush request executed on primary", indexShard.shardId()); - return new Tuple<>(new ReplicationResponse(), shardRequest); + listener.onResponse(new ReplicationResponse()); + return shardRequest; } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index 0670c1f3cc64d..8db09b1c0c9c1 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.indices.refresh; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.BasicReplicationRequest; @@ -27,7 +28,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; @@ -57,11 +57,13 @@ protected ReplicationResponse newResponseInstance() { } @Override - protected Tuple shardOperationOnPrimary(BasicReplicationRequest shardRequest) { + protected BasicReplicationRequest shardOperationOnPrimary(BasicReplicationRequest shardRequest, + ActionListener listener) { IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id()); indexShard.refresh("api"); + listener.onResponse(new ReplicationResponse()); logger.trace("{} refresh request executed on primary", indexShard.shardId()); - return new Tuple<>(new ReplicationResponse(), shardRequest); + return shardRequest; } @Override diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java index 541b1d78d8640..7496accc2a02a 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.support.replication; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.cluster.action.shard.ShardStateAction; @@ -69,12 +70,13 @@ protected abstract WriteResult onPrimaryShard(IndexService indexServic protected abstract Translog.Location onReplicaShard(Request request, IndexShard indexShard); @Override - protected Tuple shardOperationOnPrimary(Request request) throws Exception { + protected Request shardOperationOnPrimary(Request request, ActionListener listener) throws Exception { IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexShard indexShard = indexService.getShard(request.shardId().id()); - WriteResult response = onPrimaryShard(indexService, indexShard, request); - processAfterWrite(request.refresh(), indexShard, response.location); - return new Tuple<>(response.response, request); + WriteResult result = onPrimaryShard(indexService, indexShard, request); + processAfterWrite(request.refresh(), indexShard, result.location); + listener.onResponse(result.response); + return request; } @Override @@ -99,4 +101,22 @@ protected final void processAfterWrite(boolean refresh, IndexShard indexShard, T } indexShard.maybeFlush(); } + + protected static class WriteResult { + public final T response; + public final Translog.Location location; + + public WriteResult(T response, Translog.Location location) { + this.response = response; + this.location = location; + } + + @SuppressWarnings("unchecked") + public T response() { + // this sets total, pending and failed to 0 and this is ok, because we will embed this into the replica + // request and not use it + response.setShardInfo(new ReplicationResponse.ShardInfo()); + return (T) response; + } + } } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 506529c18bfb6..cf2086e2b4ed5 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -41,7 +41,6 @@ import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.Settings; @@ -150,10 +149,11 @@ protected void resolveRequest(MetaData metaData, IndexMetaData indexMetaData, Re /** * Primary operation on node with primary copy * - * @return A tuple containing not null values, as first value the result of the primary operation and as second value - * the request to be executed on the replica shards. + * @param shardRequest the request to the primary shard + * @param listener called when the operation is complete with the result of the operation, assuming all the replicas succeed + * @return the request to the replicas. */ - protected abstract Tuple shardOperationOnPrimary(Request shardRequest) throws Exception; + protected abstract ReplicaRequest shardOperationOnPrimary(Request shardRequest, ActionListener listener) throws Exception; /** * Replica operation on nodes with replica copies @@ -197,26 +197,6 @@ protected boolean retryPrimaryException(Throwable e) { || TransportActions.isShardNotAvailableException(e); } - protected static class WriteResult { - - public final T response; - public final Translog.Location location; - - public WriteResult(T response, Translog.Location location) { - this.response = response; - this.location = location; - } - - @SuppressWarnings("unchecked") - public T response() { - // this sets total, pending and failed to 0 and this is ok, because we will embed this into the replica - // request and not use it - response.setShardInfo(new ReplicationResponse.ShardInfo()); - return (T) response; - } - - } - class OperationTransportHandler implements TransportRequestHandler { @Override public void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception { @@ -751,10 +731,9 @@ public void failShard(String reason, Throwable e) { @Override public ReplicaRequest perform(Request request, ActionListener listener) throws Exception { - Tuple result = shardOperationOnPrimary(request); - result.v2().primaryTerm(indexShard.getPrimaryTerm()); - listener.onResponse(result.v1()); - return result.v2(); + ReplicaRequest replicaRequest = shardOperationOnPrimary(request, listener); + replicaRequest.primaryTerm(indexShard.getPrimaryTerm()); + return replicaRequest; } @Override diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 3eeecf2c06e7f..795ad6a23e0ae 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -779,10 +779,11 @@ protected Response newResponseInstance() { } @Override - protected Tuple shardOperationOnPrimary(Request shardRequest) throws Exception { + protected Request shardOperationOnPrimary(Request shardRequest, ActionListener listener) throws Exception { boolean executedBefore = shardRequest.processedOnPrimary.getAndSet(true); assert executedBefore == false : "request has already been executed on the primary"; - return new Tuple<>(new Response(), shardRequest); + listener.onResponse(new Response()); + return shardRequest; } @Override From 5b142dc331214c8eef90587144f4b3f959f9eced Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 22 Apr 2016 18:03:52 -0400 Subject: [PATCH 09/86] Cleanup --- .../elasticsearch/action/bulk/TransportShardBulkAction.java | 2 ++ .../replication/TransportReplicatedMutationAction.java | 5 ++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 56a5dfa9a14bd..f469efaaf8988 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -238,6 +238,7 @@ private Tuple update(IndexMetaData metaData, switch (updateResult.result.operation()) { case UPSERT: case INDEX: + @SuppressWarnings("unchecked") WriteResult result = updateResult.writeResult; IndexRequest indexRequest = updateResult.request(); BytesReference indexSourceAsBytes = indexRequest.source(); @@ -252,6 +253,7 @@ private Tuple update(IndexMetaData metaData, setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse)); break; case DELETE: + @SuppressWarnings("unchecked") WriteResult writeResult = updateResult.writeResult; DeleteResponse response = writeResult.response(); DeleteRequest deleteRequest = updateResult.request(); diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java index 7496accc2a02a..15a73a10a6a52 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java @@ -111,12 +111,11 @@ public WriteResult(T response, Translog.Location location) { this.location = location; } - @SuppressWarnings("unchecked") - public T response() { + public T response() { // this sets total, pending and failed to 0 and this is ok, because we will embed this into the replica // request and not use it response.setShardInfo(new ReplicationResponse.ShardInfo()); - return (T) response; + return response; } } } From 52c5f7c3f04710901f503334239a611c0e21c85a Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 22 Apr 2016 19:33:00 -0400 Subject: [PATCH 10/86] Add a listener to shard operations --- .../flush/TransportShardFlushAction.java | 4 +++- .../refresh/TransportShardRefreshAction.java | 4 +++- .../TransportReplicatedMutationAction.java | 4 +++- .../TransportReplicationAction.java | 22 +++++++++++++++---- .../TransportReplicationActionTests.java | 10 ++++----- 5 files changed, 32 insertions(+), 12 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index d80a046318529..cd0dbe4e07d15 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -33,6 +33,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; /** @@ -65,10 +66,11 @@ protected ShardFlushRequest shardOperationOnPrimary(ShardFlushRequest shardReque } @Override - protected void shardOperationOnReplica(ShardFlushRequest request) { + protected void shardOperationOnReplica(ShardFlushRequest request, ActionListener listener) { IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); indexShard.flush(request.getRequest()); logger.trace("{} flush request executed on replica", indexShard.shardId()); + listener.onResponse(TransportResponse.Empty.INSTANCE); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index 8db09b1c0c9c1..b9211fb71629f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; /** @@ -67,10 +68,11 @@ protected BasicReplicationRequest shardOperationOnPrimary(BasicReplicationReques } @Override - protected void shardOperationOnReplica(BasicReplicationRequest request) { + protected void shardOperationOnReplica(BasicReplicationRequest request, ActionListener listener) { final ShardId shardId = request.shardId(); IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id()); indexShard.refresh("api"); + listener.onResponse(TransportResponse.Empty.INSTANCE); logger.trace("{} refresh request executed on replica", indexShard.shardId()); } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java index 15a73a10a6a52..723f526bd2731 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java @@ -33,6 +33,7 @@ import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; import java.util.function.Supplier; @@ -80,12 +81,13 @@ protected Request shardOperationOnPrimary(Request request, ActionListener listener) { final ShardId shardId = request.shardId(); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.getShard(shardId.id()); Translog.Location location = onReplicaShard(request, indexShard); processAfterWrite(request.refresh(), indexShard, location); + listener.onResponse(TransportResponse.Empty.INSTANCE); } protected final void processAfterWrite(boolean refresh, IndexShard indexShard, Translog.Location location) { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index cf2086e2b4ed5..46e5f07cf60de 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -65,6 +65,7 @@ import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponse.Empty; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -158,7 +159,7 @@ protected void resolveRequest(MetaData metaData, IndexMetaData indexMetaData, Re /** * Replica operation on nodes with replica copies */ - protected abstract void shardOperationOnReplica(ReplicaRequest shardRequest); + protected abstract void shardOperationOnReplica(ReplicaRequest shardRequest, ActionListener listener); /** * True if write consistency should be checked for an implementation @@ -406,14 +407,27 @@ protected void doRun() throws Exception { setPhase(task, "replica"); assert request.shardId() != null : "request shardId must be set"; try (Releasable ignored = acquireReplicaOperationLock(request.shardId(), request.primaryTerm())) { - shardOperationOnReplica(request); + shardOperationOnReplica(request, new ActionListener() { + @Override + public void onResponse(Empty response) { + setPhase(task, "finished"); + try { + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } catch (Exception e) { + onFailure(e); + } + } + + @Override + public void onFailure(Throwable e) { + AsyncReplicaAction.this.onFailure(e); + } + }); if (logger.isTraceEnabled()) { logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), request); } } - setPhase(task, "finished"); - channel.sendResponse(TransportResponse.Empty.INSTANCE); } } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 795ad6a23e0ae..71451d0e8a133 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.client.transport.NoNodeAvailableException; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.shard.ShardStateAction; @@ -43,7 +44,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lease.Releasable; @@ -63,7 +63,6 @@ import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponse.Empty; import org.elasticsearch.transport.TransportResponseOptions; import org.elasticsearch.transport.TransportService; import org.hamcrest.Matcher; @@ -681,13 +680,13 @@ public void testReplicasCounter() throws Exception { final ReplicationTask task = maybeTask(); Action action = new Action(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool) { @Override - protected void shardOperationOnReplica(Request request) { + protected void shardOperationOnReplica(Request request, ActionListener listener) { assertIndexShardCounter(1); assertPhase(task, "replica"); if (throwException) { throw new ElasticsearchException("simulated"); } - super.shardOperationOnReplica(request); + super.shardOperationOnReplica(request, listener); } }; final Action.ReplicaOperationTransportHandler replicaOperationTransportHandler = action.new ReplicaOperationTransportHandler(); @@ -787,8 +786,9 @@ protected Request shardOperationOnPrimary(Request shardRequest, ActionListener listener) { request.processedOnReplicas.incrementAndGet(); + listener.onResponse(TransportResponse.Empty.INSTANCE); } @Override From 1f25cf35e796835b3827b8a4110e09e5de61784c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 22 Apr 2016 19:56:18 -0400 Subject: [PATCH 11/86] Cleanup --- .../support/replication/ReplicationOperation.java | 2 +- .../replication/TransportReplicationAction.java | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 5c365284ed077..c614fa228c6ad 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -104,7 +104,7 @@ void execute() throws Exception { totalShards.incrementAndGet(); pendingShards.addAndGet(2); // increase by 2 - one for the primary shard and one for the coordination of replicas - ReplicaRequest replicaRequest = performOnPrimary(primary.routingEntry(), request); + ReplicaRequest replicaRequest = performOnPrimary(primaryRouting, request); assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term"; if (logger.isTraceEnabled()) { logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request); diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 46e5f07cf60de..165176664dad5 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -157,7 +157,8 @@ protected void resolveRequest(MetaData metaData, IndexMetaData indexMetaData, Re protected abstract ReplicaRequest shardOperationOnPrimary(Request shardRequest, ActionListener listener) throws Exception; /** - * Replica operation on nodes with replica copies + * Replica operation on nodes with replica copies. While this does take a listener it should not return until it has completed any operations + * that it must take under the shard lock. The listener is for waiting for things like index to become visible in search. */ protected abstract void shardOperationOnReplica(ReplicaRequest shardRequest, ActionListener listener); @@ -410,6 +411,10 @@ protected void doRun() throws Exception { shardOperationOnReplica(request, new ActionListener() { @Override public void onResponse(Empty response) { + if (logger.isTraceEnabled()) { + logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), + request); + } setPhase(task, "finished"); try { channel.sendResponse(TransportResponse.Empty.INSTANCE); @@ -423,10 +428,6 @@ public void onFailure(Throwable e) { AsyncReplicaAction.this.onFailure(e); } }); - if (logger.isTraceEnabled()) { - logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), - request); - } } } } From 85033a87551da89f36a23d4dfd5016db218e08ee Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 22 Apr 2016 20:28:21 -0400 Subject: [PATCH 12/86] Never reply to replica actions while you have the operation lock This last thing was causing periodic test failures because we were replying while we had the operation lock. Now, we probably could get away with that in most cases but the tests don't like it and it isn't a good idea to do network io while you have a lock anyway. So this prevents it. --- .../refresh/TransportShardRefreshAction.java | 2 +- .../TransportReplicationAction.java | 47 ++++++++++++++----- 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index b9211fb71629f..2d68c4935f827 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -72,8 +72,8 @@ protected void shardOperationOnReplica(BasicReplicationRequest request, ActionLi final ShardId shardId = request.shardId(); IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id()); indexShard.refresh("api"); - listener.onResponse(TransportResponse.Empty.INSTANCE); logger.trace("{} refresh request executed on replica", indexShard.shardId()); + listener.onResponse(TransportResponse.Empty.INSTANCE); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 165176664dad5..1a01721fc84ff 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -52,7 +52,6 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.Task; @@ -70,6 +69,7 @@ import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Supplier; @@ -157,8 +157,8 @@ protected void resolveRequest(MetaData metaData, IndexMetaData indexMetaData, Re protected abstract ReplicaRequest shardOperationOnPrimary(Request shardRequest, ActionListener listener) throws Exception; /** - * Replica operation on nodes with replica copies. While this does take a listener it should not return until it has completed any operations - * that it must take under the shard lock. The listener is for waiting for things like index to become visible in search. + * Replica operation on nodes with replica copies. While this does take a listener it should not return until it has completed any + * operations that it must take under the shard lock. The listener is for waiting for things like index to become visible in search. */ protected abstract void shardOperationOnReplica(ReplicaRequest shardRequest, ActionListener listener); @@ -345,6 +345,10 @@ public RetryOnReplicaException(StreamInput in) throws IOException { } private final class AsyncReplicaAction extends AbstractRunnable { + /** + * The number of operations remaining before we can reply. See javadoc for {@link #operationComplete()} more. + */ + private final AtomicInteger operationsUntilReply = new AtomicInteger(2); private final ReplicaRequest request; private final TransportChannel channel; /** @@ -411,16 +415,7 @@ protected void doRun() throws Exception { shardOperationOnReplica(request, new ActionListener() { @Override public void onResponse(Empty response) { - if (logger.isTraceEnabled()) { - logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), - request); - } - setPhase(task, "finished"); - try { - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } catch (Exception e) { - onFailure(e); - } + operationComplete(); } @Override @@ -429,6 +424,32 @@ public void onFailure(Throwable e) { } }); } + operationComplete(); + } + + /** + * Handle a portion of the operation finishing. Called twice: once after the operation returns and the lock is released and once + * after the listener returns. We only reply over the channel when both have finished but we don't know in which order they will + * finish. + * + * The reason we can't reply until both is finished is a bit unclear - but the advantage of doing it this ways is that we never + * ever ever reply while we have the operation lock. And it is just a good idea in general not to do network IO while you have a + * lock. So that is something. + */ + private void operationComplete() { + if (operationsUntilReply.decrementAndGet() != 0) { + return; + } + if (logger.isTraceEnabled()) { + logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), + request); + } + setPhase(task, "finished"); + try { + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } catch (Exception e) { + onFailure(e); + } } } From a2bc7f30e6d4857a1224ef5a89909b36c8f33731 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 22 Apr 2016 21:11:55 -0400 Subject: [PATCH 13/86] Return last written location from refresh --- .../org/elasticsearch/index/engine/Engine.java | 4 +++- .../elasticsearch/index/engine/InternalEngine.java | 5 ++++- .../elasticsearch/index/engine/ShadowEngine.java | 4 +++- .../org/elasticsearch/index/translog/Translog.java | 14 ++++++++++++++ .../index/engine/InternalEngineTests.java | 13 +++++++++---- 5 files changed, 33 insertions(+), 7 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 69aee4ac68098..4b5af28141615 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -611,8 +611,10 @@ public final boolean refreshNeeded() { /** * Synchronously refreshes the engine for new search operations to reflect the latest * changes. + * + * @return the maximum translog location that was made visible by the refresh */ - public abstract void refresh(String source) throws EngineException; + public abstract Translog.Location refresh(String source) throws EngineException; /** * Called when our engine is using too much heap and should move buffered indexed/deleted documents to disk. diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 9f7bc41add995..5c3e08929583c 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -500,10 +500,12 @@ private void innerDelete(Delete delete) throws IOException { } @Override - public void refresh(String source) throws EngineException { + public Translog.Location refresh(String source) throws EngineException { // we obtain a read lock here, since we don't want a flush to happen while we are refreshing // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) + Translog.Location location = null; try (ReleasableLock lock = readLock.acquire()) { + location = translog.getLastWriteLocation(); ensureOpen(); searcherManager.maybeRefreshBlocking(); } catch (AlreadyClosedException e) { @@ -522,6 +524,7 @@ public void refresh(String source) throws EngineException { maybePruneDeletedTombstones(); versionMapRefreshPending.set(false); mergeScheduler.refreshConfig(); + return location; } @Override diff --git a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index c30b2e9bf501f..6cbbbb4fe6a15 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -182,7 +182,7 @@ public List segments(boolean verbose) { } @Override - public void refresh(String source) throws EngineException { + public Translog.Location refresh(String source) throws EngineException { // we obtain a read lock here, since we don't want a flush to happen while we are refreshing // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) try (ReleasableLock lock = readLock.acquire()) { @@ -196,6 +196,8 @@ public void refresh(String source) throws EngineException { failEngine("refresh failed", t); throw new RefreshFailedEngineException(shardId, t); } + // Return null here because we don't have a translog. "Everything" is visible. + return null; } @Override diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index 93f9db67b357a..96931784e0cd4 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -129,6 +129,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private final AtomicBoolean closed = new AtomicBoolean(); private final TranslogConfig config; private final String translogUUID; + /** + * The last location of any write operation. + */ + private volatile Location lastWriteLocation; /** * Creates a new Translog instance. This method will create a new transaction log unless the given {@link TranslogConfig} has @@ -434,6 +438,7 @@ public Location add(Operation operation) throws IOException { ensureOpen(); Location location = current.add(bytes); assert assertBytesAtLocation(location, bytes); + lastWriteLocation = location; return location; } } catch (AlreadyClosedException | IOException ex) { @@ -447,6 +452,15 @@ public Location add(Operation operation) throws IOException { } } + /** + * The last location that was written to the translog. + */ + public Location getLastWriteLocation() { + try (ReleasableLock lock = readLock.acquire()) { + return lastWriteLocation; + } + } + boolean assertBytesAtLocation(Translog.Location location, BytesReference expectedBytes) throws IOException { // tests can override this ByteBuffer buffer = ByteBuffer.allocate(location.size); diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index afdcb42ae831b..14c978594f526 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -316,11 +316,16 @@ public void testSegments() throws Exception { // create a doc and refresh ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - engine.index(new Engine.Index(newUid("1"), doc)); - + Engine.Index index = new Engine.Index(newUid("1"), doc); + engine.index(index); + Translog.Location firstIndexedLocation = index.getTranslogLocation(); ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), B_2, null); - engine.index(new Engine.Index(newUid("2"), doc2)); - engine.refresh("test"); + index = new Engine.Index(newUid("2"), doc2); + engine.index(index); + Translog.Location secondIndexedLocation = index.getTranslogLocation(); + assertThat(secondIndexedLocation, greaterThan(firstIndexedLocation)); + Translog.Location refreshedLocation = engine.refresh("test"); + assertEquals(secondIndexedLocation, refreshedLocation); segments = engine.segments(false); assertThat(segments.size(), equalTo(1)); From 213bebb6ece11b85d17e44af9a54fc2e5e332d39 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 22 Apr 2016 21:35:52 -0400 Subject: [PATCH 14/86] Add refresh listeners --- .../ReplicatedMutationRequest.java | 15 ++++++ .../elasticsearch/index/IndexSettings.java | 11 ++++ .../elasticsearch/index/shard/IndexShard.java | 51 ++++++++++++++++++- .../index/translog/TranslogTests.java | 12 +++++ 4 files changed, 88 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java index 68952b90a4ebc..a8927abd1df7c 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java @@ -30,6 +30,7 @@ */ public class ReplicatedMutationRequest> extends ReplicationRequest { private boolean refresh; + private boolean blockUntilRefresh; /** * Create an empty request. @@ -59,6 +60,20 @@ public boolean refresh() { return this.refresh; } + /** + * Should this request block until it has been made visible by a refresh? Unlike {@link #refresh(boolean)} this is quite safe to use + * under heavy indexing so long as few total operations use it. A bulk request only counts as a single operation. + */ + @SuppressWarnings("unchecked") + public R setBlockUntilRefresh(boolean blockUntilRefresh) { + this.blockUntilRefresh = blockUntilRefresh; + return (R) this; + } + + public boolean isBlockUntilRefresh() { + return blockUntilRefresh; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index 31e10e4d7ec2a..e9887020a8cd1 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -115,6 +115,8 @@ public final class IndexSettings { public static final Setting INDEX_GC_DELETES_SETTING = Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS), Property.Dynamic, Property.IndexScope); + public static final Setting MAX_REFRESH_LISTENERS_PER_SHARD = Setting.intSetting("index.max_refresh_listeners", 100, 0, + Property.Dynamic, Property.IndexScope); private final Index index; private final Version version; @@ -145,6 +147,7 @@ public final class IndexSettings { private volatile int maxResultWindow; private volatile int maxRescoreWindow; private volatile boolean TTLPurgeDisabled; + private volatile int maxRefreshListeners; /** * Returns the default search field for this index. @@ -251,6 +254,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(INDEX_GC_DELETES_SETTING, this::setGCDeletes); scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING, this::setTranslogFlushThresholdSize); scopedSettings.addSettingsUpdateConsumer(INDEX_REFRESH_INTERVAL_SETTING, this::setRefreshInterval); + scopedSettings.addSettingsUpdateConsumer(MAX_REFRESH_LISTENERS_PER_SHARD, this::setMaxRefreshListeners); } private void setTranslogFlushThresholdSize(ByteSizeValue byteSizeValue) { @@ -499,6 +503,13 @@ public T getValue(Setting setting) { return scopedSettings.get(setting); } + public int getMaxRefreshListeners() { + return maxRefreshListeners; + } + + private void setMaxRefreshListeners(int maxRefreshListeners) { + this.maxRefreshListeners = maxRefreshListeners; + } IndexScopedSettings getScopedSettings() { return scopedSettings;} } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 8739c2657013d..6c9d7b4e4e8de 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -85,7 +85,6 @@ import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.merge.MergeStats; -import org.elasticsearch.index.percolator.PercolatorFieldMapper; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.search.stats.SearchStats; @@ -115,6 +114,8 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.EnumSet; +import java.util.Iterator; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Objects; @@ -124,6 +125,8 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import static java.util.Objects.requireNonNull; + public class IndexShard extends AbstractIndexShardComponent { private final ThreadPool threadPool; @@ -194,6 +197,10 @@ public class IndexShard extends AbstractIndexShardComponent { * IndexingMemoryController}). */ private final AtomicBoolean active = new AtomicBoolean(); + /** + * Refresh listeners. Uses a LinkedList because we frequently remove items from the front of it. + */ + private final List refreshListeners = new LinkedList<>(); public IndexShard(ShardId shardId, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache, MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService, @@ -1572,4 +1579,46 @@ protected void delete(Engine engine, Engine.Delete engineDelete) { } } + public void addRefreshListener(Translog.Location location, Runnable action) { + requireNonNull(location, "location cannot be null"); + requireNonNull(action, "action cannot be null"); + + boolean tooManyListeners = false; + synchronized (refreshListeners) { + if (refreshListeners.size() >= indexSettings.getMaxRefreshListeners()) { + tooManyListeners = true; + } else { + refreshListeners.add(new RefreshListener(location, action)); + } + } + if (tooManyListeners) { + refresh("too_many_listeners"); + action.run(); + } + } + + private void callRefreshListeners(Translog.Location refreshedLocation) { + synchronized (refreshListeners) { + for (Iterator itr = refreshListeners.iterator(); itr.hasNext();) { + RefreshListener listener = itr.next(); + if (listener.location.compareTo(refreshedLocation) <= 0) { + itr.remove(); + threadPool.executor(ThreadPool.Names.LISTENER).execute(listener.action); + } + } + } + } + + /** + * Listens for the next refresh that includes this location. + */ + private static class RefreshListener { + private final Translog.Location location; + private final Runnable action; + + public RefreshListener(Translog.Location location, Runnable action) { + this.location = location; + this.action = action; + } + } } diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 9b6e4670794bf..6f9a302da07d9 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -205,18 +205,30 @@ private String randomNonTranslogPatternString(int min, int max) { } public void testRead() throws IOException { + assertNull(translog.getLastWriteLocation()); Translog.Location loc1 = translog.add(new Translog.Index("test", "1", new byte[]{1})); + assertEquals(translog.currentFileGeneration(), translog.getLastWriteLocation().generation); + assertEquals(43, translog.getLastWriteLocation().translogLocation); Translog.Location loc2 = translog.add(new Translog.Index("test", "2", new byte[]{2})); + assertEquals(translog.currentFileGeneration(), translog.getLastWriteLocation().generation); + assertEquals(89, translog.getLastWriteLocation().translogLocation); assertThat(translog.read(loc1).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{1}))); assertThat(translog.read(loc2).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{2}))); translog.sync(); + assertEquals(translog.currentFileGeneration(), translog.getLastWriteLocation().generation); + assertEquals(89, translog.getLastWriteLocation().translogLocation); assertThat(translog.read(loc1).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{1}))); assertThat(translog.read(loc2).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{2}))); Translog.Location loc3 = translog.add(new Translog.Index("test", "2", new byte[]{3})); + assertEquals(translog.currentFileGeneration(), translog.getLastWriteLocation().generation); + assertEquals(135, translog.getLastWriteLocation().translogLocation); assertThat(translog.read(loc3).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{3}))); translog.sync(); assertThat(translog.read(loc3).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{3}))); + long lastGeneration = translog.currentFileGeneration(); translog.prepareCommit(); + assertEquals(lastGeneration, translog.getLastWriteLocation().generation); + assertEquals(135, translog.getLastWriteLocation().translogLocation); assertThat(translog.read(loc3).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{3}))); translog.commit(); assertNull(translog.read(loc1)); From a5ffd892d0a352ae7e9757f2640fc2a1fa656bf2 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 25 Apr 2016 07:44:25 -0400 Subject: [PATCH 15/86] WIP --- .../action/delete/DeleteRequestBuilder.java | 9 +++++++++ .../action/index/IndexRequestBuilder.java | 9 +++++++++ .../replication/ReplicatedMutationRequest.java | 2 ++ .../TransportReplicatedMutationAction.java | 13 ++++++++++--- .../common/settings/IndexScopedSettings.java | 2 +- .../index/IndexRequestBuilderIT.java | 15 +++++++++++++++ 6 files changed, 46 insertions(+), 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java index 0ce907bac1d64..98afa38d6d6f3 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java @@ -81,6 +81,15 @@ public DeleteRequestBuilder setRefresh(boolean refresh) { return this; } + /** + * Should this request block until it has been made visible by a refresh? Unlike {@link #refresh(boolean)} this is quite safe to use + * under heavy indexing so long as few total operations use it. A bulk request only counts as a single operation. + */ + public DeleteRequestBuilder setBlockUntilRefresh(boolean blockUntilRefresh) { + request.setBlockUntilRefresh(blockUntilRefresh); + return this; + } + /** * Sets the version, which will cause the delete operation to only be performed if a matching * version exists and no changes happened on the doc since then. diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index 4116755e4eb05..f307e78e8b795 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -230,6 +230,15 @@ public IndexRequestBuilder setRefresh(boolean refresh) { return this; } + /** + * Should this request block until it has been made visible by a refresh? Unlike {@link #refresh(boolean)} this is quite safe to use + * under heavy indexing so long as few total operations use it. A bulk request only counts as a single operation. + */ + public IndexRequestBuilder setBlockUntilRefresh(boolean blockUntilRefresh) { + request.setBlockUntilRefresh(blockUntilRefresh); + return this; + } + /** * Sets the version, which will cause the index operation to only be performed if a matching * version exists and no changes happened on the doc since then. diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java index a8927abd1df7c..e9ca80c051129 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java @@ -78,11 +78,13 @@ public boolean isBlockUntilRefresh() { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); refresh = in.readBoolean(); + blockUntilRefresh = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(refresh); + out.writeBoolean(blockUntilRefresh); } } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java index 723f526bd2731..c779ab37877e3 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java @@ -25,7 +25,6 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; @@ -76,7 +75,11 @@ protected Request shardOperationOnPrimary(Request request, ActionListener result = onPrimaryShard(indexService, indexShard, request); processAfterWrite(request.refresh(), indexShard, result.location); - listener.onResponse(result.response); + if (request.isBlockUntilRefresh() && false == request.refresh()) { + indexShard.addRefreshListener(result.location, () -> listener.onResponse(result.response)); + } else { + listener.onResponse(result.response); + } return request; } @@ -87,7 +90,11 @@ protected final void shardOperationOnReplica(Request request, ActionListener listener.onResponse(TransportResponse.Empty.INSTANCE)); + } else { + listener.onResponse(TransportResponse.Empty.INSTANCE); + } } protected final void processAfterWrite(boolean refresh, IndexShard indexShard, Translog.Location location) { diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index fabda1b62bb30..84285e6d43dd8 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -40,7 +40,6 @@ import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.Store; -import org.elasticsearch.index.IndexWarmer; import org.elasticsearch.indices.IndicesRequestCache; import java.util.Arrays; @@ -115,6 +114,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.QUERY_STRING_LENIENT_SETTING, IndexSettings.ALLOW_UNMAPPED, IndexSettings.INDEX_CHECK_ON_STARTUP, + IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD, ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING, IndexSettings.INDEX_GC_DELETES_SETTING, IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING, diff --git a/core/src/test/java/org/elasticsearch/index/IndexRequestBuilderIT.java b/core/src/test/java/org/elasticsearch/index/IndexRequestBuilderIT.java index c41051ec59cfd..cda1519e891fd 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexRequestBuilderIT.java +++ b/core/src/test/java/org/elasticsearch/index/IndexRequestBuilderIT.java @@ -20,9 +20,11 @@ package org.elasticsearch.index; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; @@ -30,6 +32,8 @@ import java.util.Map; import java.util.concurrent.ExecutionException; +import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.hamcrest.Matchers.containsString; public class IndexRequestBuilderIT extends ESIntegTestCase { @@ -51,6 +55,17 @@ public void testSetSource() throws InterruptedException, ExecutionException { ElasticsearchAssertions.assertHitCount(searchResponse, builders.length); } + /** + * Setting blockUntilRefresh will cause the request to block until the document is made visible by a refresh. + */ + public void testBlockUntilRefresh() { + IndexResponse index = client().prepareIndex("test", "index", "1").setSource("foo", "bar").setBlockUntilRefresh(true).get(); + assertEquals(RestStatus.CREATED, index.status()); + SearchResponse search = client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(); + assertSearchHits(search, "1"); + // TODO tests for delete and bulk and ?update? + } + public void testOddNumberOfSourceObjects() { try { client().prepareIndex("test", "test").setSource("test_field", "foobar", new Object()); From 46c855c9971cb2b748206d2afa6a2d88724be3ba Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 25 Apr 2016 10:11:10 -0400 Subject: [PATCH 16/86] Move test to own class --- .../index/translog/Translog.java | 1 + .../index/BlockUntilRefreshIT.java | 42 +++++++++++++++++++ .../index/IndexRequestBuilderIT.java | 15 ------- 3 files changed, 43 insertions(+), 15 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index 96931784e0cd4..3a11250695909 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -132,6 +132,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC /** * The last location of any write operation. */ + // NOCOMMIT Decide if we should keep this. We can probably build it synthetically when needed, especially if we're will to relax "size". private volatile Location lastWriteLocation; /** diff --git a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java new file mode 100644 index 0000000000000..94cf270de7b50 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESIntegTestCase; + +import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; + +public class BlockUntilRefreshIT extends ESIntegTestCase { + /** + * Setting blockUntilRefresh will cause the request to block until the document is made visible by a refresh. + */ + public void testBlockUntilRefresh() { + IndexResponse index = client().prepareIndex("test", "index", "1").setSource("foo", "bar").setBlockUntilRefresh(true).get(); + assertEquals(RestStatus.CREATED, index.status()); + SearchResponse search = client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(); + assertSearchHits(search, "1"); + // TODO tests for delete and bulk and ?update? + } + +} diff --git a/core/src/test/java/org/elasticsearch/index/IndexRequestBuilderIT.java b/core/src/test/java/org/elasticsearch/index/IndexRequestBuilderIT.java index cda1519e891fd..c41051ec59cfd 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexRequestBuilderIT.java +++ b/core/src/test/java/org/elasticsearch/index/IndexRequestBuilderIT.java @@ -20,11 +20,9 @@ package org.elasticsearch.index; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; @@ -32,8 +30,6 @@ import java.util.Map; import java.util.concurrent.ExecutionException; -import static org.elasticsearch.index.query.QueryBuilders.matchQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.hamcrest.Matchers.containsString; public class IndexRequestBuilderIT extends ESIntegTestCase { @@ -55,17 +51,6 @@ public void testSetSource() throws InterruptedException, ExecutionException { ElasticsearchAssertions.assertHitCount(searchResponse, builders.length); } - /** - * Setting blockUntilRefresh will cause the request to block until the document is made visible by a refresh. - */ - public void testBlockUntilRefresh() { - IndexResponse index = client().prepareIndex("test", "index", "1").setSource("foo", "bar").setBlockUntilRefresh(true).get(); - assertEquals(RestStatus.CREATED, index.status()); - SearchResponse search = client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(); - assertSearchHits(search, "1"); - // TODO tests for delete and bulk and ?update? - } - public void testOddNumberOfSourceObjects() { try { client().prepareIndex("test", "test").setSource("test_field", "foobar", new Object()); From 247cb483c4459dea8e95e0e3bd2e4bf8d452c598 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 25 Apr 2016 10:29:37 -0400 Subject: [PATCH 17/86] Basic block_until_refresh exposed to java client and basic "is it plugged in" style tests. --- .../action/bulk/BulkRequest.java | 21 ++++++++ .../action/bulk/BulkRequestBuilder.java | 11 +++++ .../action/bulk/TransportBulkAction.java | 4 +- .../action/delete/DeleteRequestBuilder.java | 6 ++- .../action/index/IndexRequestBuilder.java | 6 ++- .../ReplicatedMutationRequest.java | 8 ++-- .../TransportReplicatedMutationAction.java | 4 +- .../index/BlockUntilRefreshIT.java | 48 +++++++++++++++---- 8 files changed, 90 insertions(+), 18 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index f008bf9a4e87b..abe8185218f77 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import java.io.IOException; @@ -63,6 +64,10 @@ public class BulkRequest extends ActionRequest implements Composite protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT; private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT; private boolean refresh = false; + /** + * Should this request block until all of its results are visible for search? + */ + private boolean blockUntilRefresh = false; private long sizeInBytes = 0; @@ -466,6 +471,20 @@ public TimeValue timeout() { return timeout; } + /** + * Should this request block until it has been made visible for search by a refresh? Unlike {@link #refresh(boolean)} this is quite safe + * to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} for + * the limit. A bulk request counts as one request on each shard that it touches. + */ + public BulkRequest setBlockUntilRefresh(boolean blockUntilRefresh) { + this.blockUntilRefresh = blockUntilRefresh; + return this; + } + + public boolean shouldBlockUntilRefresh() { + return blockUntilRefresh; + } + private int findNextMarker(byte marker, int from, BytesReference data, int length) { for (int i = from; i < length; i++) { if (data.get(i) == marker) { @@ -538,6 +557,7 @@ public void readFrom(StreamInput in) throws IOException { } } refresh = in.readBoolean(); + blockUntilRefresh = in.readBoolean(); timeout = TimeValue.readTimeValue(in); } @@ -557,6 +577,7 @@ public void writeTo(StreamOutput out) throws IOException { request.writeTo(out); } out.writeBoolean(refresh); + out.writeBoolean(blockUntilRefresh); timeout.writeTo(out); } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java index 3744055d26cdb..7cebdd0c77310 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java @@ -30,6 +30,7 @@ import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.IndexSettings; /** * A bulk request holds an ordered {@link IndexRequest}s and {@link DeleteRequest}s and allows to executes @@ -142,6 +143,16 @@ public final BulkRequestBuilder setTimeout(String timeout) { return this; } + /** + * Should this request block until it has been made visible for search by a refresh? Unlike {@link #refresh(boolean)} this is quite safe + * to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} for + * the limit. A bulk request counts as one request on each shard that it touches. + */ + public final BulkRequestBuilder setBlockUntilRefresh(boolean blockUntilRefresh) { + request.setBlockUntilRefresh(blockUntilRefresh); + return this; + } + /** * The number of actions currently in the bulk. */ diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 667e691f6c86e..d097adfeaa005 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -344,9 +344,11 @@ void executeBulk(Task task, final BulkRequest bulkRequest, final long startTimeN for (Map.Entry> entry : requestsByShard.entrySet()) { final ShardId shardId = entry.getKey(); final List requests = entry.getValue(); - BulkShardRequest bulkShardRequest = new BulkShardRequest(bulkRequest, shardId, bulkRequest.refresh(), requests.toArray(new BulkItemRequest[requests.size()])); + BulkShardRequest bulkShardRequest = new BulkShardRequest(bulkRequest, shardId, bulkRequest.refresh(), + requests.toArray(new BulkItemRequest[requests.size()])); bulkShardRequest.consistencyLevel(bulkRequest.consistencyLevel()); bulkShardRequest.timeout(bulkRequest.timeout()); + bulkShardRequest.setBlockUntilRefresh(bulkRequest.shouldBlockUntilRefresh()); if (task != null) { bulkShardRequest.setParentTask(nodeId, task.getId()); } diff --git a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java index 98afa38d6d6f3..4de70e5783164 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.support.replication.ReplicationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; /** @@ -82,8 +83,9 @@ public DeleteRequestBuilder setRefresh(boolean refresh) { } /** - * Should this request block until it has been made visible by a refresh? Unlike {@link #refresh(boolean)} this is quite safe to use - * under heavy indexing so long as few total operations use it. A bulk request only counts as a single operation. + * Should this request block until it has been made visible for search by a refresh? Unlike {@link #refresh(boolean)} this is quite safe + * to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} for + * the limit. A bulk request counts as one request on each shard that it touches. */ public DeleteRequestBuilder setBlockUntilRefresh(boolean blockUntilRefresh) { request.setBlockUntilRefresh(blockUntilRefresh); diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index f307e78e8b795..c8c25d49d4bd7 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import java.util.Map; @@ -231,8 +232,9 @@ public IndexRequestBuilder setRefresh(boolean refresh) { } /** - * Should this request block until it has been made visible by a refresh? Unlike {@link #refresh(boolean)} this is quite safe to use - * under heavy indexing so long as few total operations use it. A bulk request only counts as a single operation. + * Should this request block until it has been made visible for search by a refresh? Unlike {@link #refresh(boolean)} this is quite safe + * to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} for + * the limit. A bulk request counts as one request on each shard that it touches. */ public IndexRequestBuilder setBlockUntilRefresh(boolean blockUntilRefresh) { request.setBlockUntilRefresh(blockUntilRefresh); diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java index e9ca80c051129..1d0c305a9b74a 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -61,8 +62,9 @@ public boolean refresh() { } /** - * Should this request block until it has been made visible by a refresh? Unlike {@link #refresh(boolean)} this is quite safe to use - * under heavy indexing so long as few total operations use it. A bulk request only counts as a single operation. + * Should this request block until it has been made visible for search by a refresh? Unlike {@link #refresh(boolean)} this is quite safe + * to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} for + * the limit. A bulk request counts as one request on each shard that it touches. */ @SuppressWarnings("unchecked") public R setBlockUntilRefresh(boolean blockUntilRefresh) { @@ -70,7 +72,7 @@ public R setBlockUntilRefresh(boolean blockUntilRefresh) { return (R) this; } - public boolean isBlockUntilRefresh() { + public boolean shouldBlockUntilRefresh() { return blockUntilRefresh; } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java index c779ab37877e3..3d3a253f6a2f2 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java @@ -75,7 +75,7 @@ protected Request shardOperationOnPrimary(Request request, ActionListener result = onPrimaryShard(indexService, indexShard, request); processAfterWrite(request.refresh(), indexShard, result.location); - if (request.isBlockUntilRefresh() && false == request.refresh()) { + if (request.shouldBlockUntilRefresh() && false == request.refresh()) { indexShard.addRefreshListener(result.location, () -> listener.onResponse(result.response)); } else { listener.onResponse(result.response); @@ -90,7 +90,7 @@ protected final void shardOperationOnReplica(Request request, ActionListener listener.onResponse(TransportResponse.Empty.INSTANCE)); } else { listener.onResponse(TransportResponse.Empty.INSTANCE); diff --git a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java index 94cf270de7b50..061678905ab09 100644 --- a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java +++ b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java @@ -19,24 +19,56 @@ package org.elasticsearch.index; +import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESIntegTestCase; +import java.util.concurrent.ExecutionException; + import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; +/** + * Tests that requests with block_until_refresh set to true will be visible when they return. + */ public class BlockUntilRefreshIT extends ESIntegTestCase { - /** - * Setting blockUntilRefresh will cause the request to block until the document is made visible by a refresh. - */ - public void testBlockUntilRefresh() { + public void testIndex() { IndexResponse index = client().prepareIndex("test", "index", "1").setSource("foo", "bar").setBlockUntilRefresh(true).get(); assertEquals(RestStatus.CREATED, index.status()); - SearchResponse search = client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(); - assertSearchHits(search, "1"); - // TODO tests for delete and bulk and ?update? + assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); + // TODO update!!! + } + + public void testDelete() throws InterruptedException, ExecutionException { + // Index normally + indexRandom(true, client().prepareIndex("test", "test", "1").setSource("foo", "bar")); + assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); + + // Now delete with blockUntilRefresh + client().prepareDelete("test", "test", "1").setBlockUntilRefresh(true).get(); + assertNoSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get()); } + public void testBulk() { + // Index by bulk with block_until_refresh + BulkRequestBuilder bulk = client().prepareBulk().setBlockUntilRefresh(true); + bulk.add(client().prepareIndex("test", "index", "1").setSource("foo", "bar")); + assertNoFailures(bulk.get()); + assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); + + // Update by bult with block_until_refresh + bulk = client().prepareBulk().setBlockUntilRefresh(true); + bulk.add(client().prepareUpdate("test", "index", "1").setDoc("foo", "baz")); + assertNoFailures(bulk.get()); + assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "baz")).get(), "1"); + + // Update by bult with block_until_refresh + bulk = client().prepareBulk().setBlockUntilRefresh(true); + bulk.add(client().prepareDelete("test", "index", "1")); + assertNoFailures(bulk.get()); + assertNoSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get()); + } } From 9919758b644fd73895fb88cd6a4909a8387eb2e2 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 25 Apr 2016 11:00:14 -0400 Subject: [PATCH 18/86] Oh boy that wasn't working --- .../TransportReplicatedMutationAction.java | 31 ++++++++++++++-- .../elasticsearch/index/IndexSettings.java | 1 + .../elasticsearch/index/shard/IndexShard.java | 35 ++++++++++--------- 3 files changed, 49 insertions(+), 18 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java index 3d3a253f6a2f2..eceb9325cdc0e 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java @@ -28,8 +28,10 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShard.RefreshListener; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportResponse; @@ -76,7 +78,20 @@ protected Request shardOperationOnPrimary(Request request, ActionListener result = onPrimaryShard(indexService, indexShard, request); processAfterWrite(request.refresh(), indexShard, result.location); if (request.shouldBlockUntilRefresh() && false == request.refresh()) { - indexShard.addRefreshListener(result.location, () -> listener.onResponse(result.response)); + indexShard.addRefreshListener(new RefreshListener() { + @Override + public Location location() { + return result.location; + } + + @Override + public void refreshed(boolean forcedRefresh) { + logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + // TODO mark the response + listener.onResponse(result.response); + } + + }); } else { listener.onResponse(result.response); } @@ -91,7 +106,19 @@ protected final void shardOperationOnReplica(Request request, ActionListener listener.onResponse(TransportResponse.Empty.INSTANCE)); + indexShard.addRefreshListener(new RefreshListener() { + @Override + public Location location() { + return location; + } + + @Override + public void refreshed(boolean forcedRefresh) { + logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + // TODO mark the response?!? + listener.onResponse(TransportResponse.Empty.INSTANCE); + } + }); } else { listener.onResponse(TransportResponse.Empty.INSTANCE); } diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index e9887020a8cd1..c94a7e62968ea 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -232,6 +232,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti maxResultWindow = scopedSettings.get(MAX_RESULT_WINDOW_SETTING); maxRescoreWindow = scopedSettings.get(MAX_RESCORE_WINDOW_SETTING); TTLPurgeDisabled = scopedSettings.get(INDEX_TTL_DISABLE_PURGE_SETTING); +// maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD); this.mergePolicyConfig = new MergePolicyConfig(logger, this); assert indexNameMatcher.test(indexMetaData.getIndex().getName()); diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 6c9d7b4e4e8de..40e421f4f2774 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1579,46 +1579,49 @@ protected void delete(Engine engine, Engine.Delete engineDelete) { } } - public void addRefreshListener(Translog.Location location, Runnable action) { - requireNonNull(location, "location cannot be null"); - requireNonNull(action, "action cannot be null"); + public void addRefreshListener(RefreshListener listener) { + requireNonNull(listener, "listener cannot be null"); boolean tooManyListeners = false; synchronized (refreshListeners) { if (refreshListeners.size() >= indexSettings.getMaxRefreshListeners()) { tooManyListeners = true; } else { - refreshListeners.add(new RefreshListener(location, action)); + refreshListeners.add(listener); } } if (tooManyListeners) { refresh("too_many_listeners"); - action.run(); + listener.refreshed(true); } } - private void callRefreshListeners(Translog.Location refreshedLocation) { + void callRefreshListeners(Translog.Location refreshedLocation) { synchronized (refreshListeners) { for (Iterator itr = refreshListeners.iterator(); itr.hasNext();) { RefreshListener listener = itr.next(); - if (listener.location.compareTo(refreshedLocation) <= 0) { + if (listener.location().compareTo(refreshedLocation) <= 0) { itr.remove(); - threadPool.executor(ThreadPool.Names.LISTENER).execute(listener.action); + threadPool.executor(ThreadPool.Names.LISTENER).execute(() -> listener.refreshed(false)); } } } } /** - * Listens for the next refresh that includes this location. + * Called when a refresh includes the location. */ - private static class RefreshListener { - private final Translog.Location location; - private final Runnable action; + public static interface RefreshListener { + /** + * The location to wait for. + */ + Translog.Location location(); - public RefreshListener(Translog.Location location, Runnable action) { - this.location = location; - this.action = action; - } + /** + * Called when the location has been refreshed. + * + * @param forcedRefresh did this request force a refresh because ran out of listener slots? + */ + void refreshed(boolean forcedRefresh); } } From 611cbeeaeb458f4b428bfc43a1ee6652adf4baff Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 25 Apr 2016 11:01:40 -0400 Subject: [PATCH 19/86] Move ReplicationResponse now it is in the same package as its request --- .../main/java/org/elasticsearch/action/DocWriteResponse.java | 1 + .../action/admin/indices/flush/TransportFlushAction.java | 2 +- .../action/admin/indices/flush/TransportShardFlushAction.java | 2 +- .../action/admin/indices/refresh/TransportRefreshAction.java | 2 +- .../admin/indices/refresh/TransportShardRefreshAction.java | 2 +- .../java/org/elasticsearch/action/bulk/BulkShardResponse.java | 2 +- .../action/support/replication/ReplicationOperation.java | 1 - .../action/{ => support/replication}/ReplicationResponse.java | 4 +++- .../replication/TransportBroadcastReplicationAction.java | 1 - .../replication/TransportReplicatedMutationAction.java | 1 - .../support/replication/TransportReplicationAction.java | 1 - 11 files changed, 9 insertions(+), 10 deletions(-) rename core/src/main/java/org/elasticsearch/action/{ => support/replication}/ReplicationResponse.java (98%) diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 4df43b75401f9..a5b1dd2268bbc 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.action; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.StatusToXContent; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index 8bb124d8fc494..a29918b438ef3 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index cd0dbe4e07d15..e8cac9dfc8c40 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -20,8 +20,8 @@ package org.elasticsearch.action.admin.indices.flush; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlockLevel; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java index 34bf39daabd19..ac64e276778f6 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -19,10 +19,10 @@ package org.elasticsearch.action.admin.indices.refresh; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.BasicReplicationRequest; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index 2d68c4935f827..d8d5da34d211f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -20,9 +20,9 @@ package org.elasticsearch.action.admin.indices.refresh; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.BasicReplicationRequest; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlockLevel; diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java index 76c80a9b0640a..045f5ab408b50 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.ReplicationResponse; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index c614fa228c6ad..90d61bcb29082 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -21,7 +21,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.support.TransportActions; diff --git a/core/src/main/java/org/elasticsearch/action/ReplicationResponse.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java similarity index 98% rename from core/src/main/java/org/elasticsearch/action/ReplicationResponse.java rename to core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java index df2f90b002046..bacdb9bfb2059 100644 --- a/core/src/main/java/org/elasticsearch/action/ReplicationResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java @@ -17,10 +17,12 @@ * under the License. */ -package org.elasticsearch.action; +package org.elasticsearch.action.support.replication; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java index 25de821e22714..2cab7d7831795 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java @@ -22,7 +22,6 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java index eceb9325cdc0e..9f0c1749a13ea 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.support.replication; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 1a01721fc84ff..08c2c15baf319 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -22,7 +22,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.support.ActionFilters; From e445cb0cb91ebdbcfdbf566696edb2bf1c84a882 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 25 Apr 2016 11:03:31 -0400 Subject: [PATCH 20/86] Javadoc --- .../org/elasticsearch/action/bulk/BulkRequestBuilder.java | 6 +++--- .../elasticsearch/action/delete/DeleteRequestBuilder.java | 6 +++--- .../org/elasticsearch/action/index/IndexRequestBuilder.java | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java index 7cebdd0c77310..031f0b506b29c 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java @@ -144,9 +144,9 @@ public final BulkRequestBuilder setTimeout(String timeout) { } /** - * Should this request block until it has been made visible for search by a refresh? Unlike {@link #refresh(boolean)} this is quite safe - * to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} for - * the limit. A bulk request counts as one request on each shard that it touches. + * Should this request block until it has been made visible for search by a refresh? Unlike {@link #setRefresh(boolean)} this is quite + * safe to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} + * for the limit. A bulk request counts as one request on each shard that it touches. Defaults to false. */ public final BulkRequestBuilder setBlockUntilRefresh(boolean blockUntilRefresh) { request.setBlockUntilRefresh(blockUntilRefresh); diff --git a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java index 4de70e5783164..1b8de03af518b 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java @@ -83,9 +83,9 @@ public DeleteRequestBuilder setRefresh(boolean refresh) { } /** - * Should this request block until it has been made visible for search by a refresh? Unlike {@link #refresh(boolean)} this is quite safe - * to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} for - * the limit. A bulk request counts as one request on each shard that it touches. + * Should this request block until it has been made visible for search by a refresh? Unlike {@link #setRefresh(boolean)} this is quite + * safe to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} + * for the limit. A bulk request counts as one request on each shard that it touches. Defaults to false. */ public DeleteRequestBuilder setBlockUntilRefresh(boolean blockUntilRefresh) { request.setBlockUntilRefresh(blockUntilRefresh); diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index c8c25d49d4bd7..6c299401e744c 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -232,9 +232,9 @@ public IndexRequestBuilder setRefresh(boolean refresh) { } /** - * Should this request block until it has been made visible for search by a refresh? Unlike {@link #refresh(boolean)} this is quite safe - * to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} for - * the limit. A bulk request counts as one request on each shard that it touches. + * Should this request block until it has been made visible for search by a refresh? Unlike {@link #setRefresh(boolean)} this is quite + * safe to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} + * for the limit. A bulk request counts as one request on each shard that it touches. Defaults to false. */ public IndexRequestBuilder setBlockUntilRefresh(boolean blockUntilRefresh) { request.setBlockUntilRefresh(blockUntilRefresh); From 2058f4a808762c4588309f21b13b677245832f2c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 25 Apr 2016 11:45:55 -0400 Subject: [PATCH 21/86] Pass back information about whether we refreshed --- .../action/DocWriteResponse.java | 22 ++++++++++++++- .../action/bulk/BulkShardResponse.java | 11 ++++++-- .../ReplicatedMutationResponse.java | 27 +++++++++++++++++++ .../TransportReplicatedMutationAction.java | 9 ++++--- .../BroadcastReplicationTests.java | 1 - .../ReplicationOperationTests.java | 2 -- .../TransportReplicationActionTests.java | 2 -- .../elasticsearch/document/ShardInfoIT.java | 2 +- .../index/BlockUntilRefreshIT.java | 25 ++++++++++++----- 9 files changed, 83 insertions(+), 18 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationResponse.java diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index a5b1dd2268bbc..af97f815f6f60 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -18,11 +18,15 @@ */ package org.elasticsearch.action; +import org.elasticsearch.action.support.replication.ReplicatedMutationRequest; +import org.elasticsearch.action.support.replication.ReplicatedMutationResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.StatusToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; @@ -31,12 +35,13 @@ /** * A base class for the response of a write operation that involves a single doc */ -public abstract class DocWriteResponse extends ReplicationResponse implements StatusToXContent { +public abstract class DocWriteResponse extends ReplicatedMutationResponse implements StatusToXContent { private ShardId shardId; private String id; private String type; private long version; + private boolean forcedRefresh; public DocWriteResponse(ShardId shardId, String type, String id, long version) { this.shardId = shardId; @@ -85,6 +90,19 @@ public long getVersion() { return this.version; } + /** + * Did this request force a refresh? Requests that set {@link ReplicatedMutationRequest#refresh(boolean)} to true should always return + * true for this. Requests that set {@link ReplicatedMutationRequest#setBlockUntilRefresh(boolean)} to true should only return this if + * they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}). + */ + public boolean forcedRefresh() { + return forcedRefresh; + } + + public void setForcedRefresh(boolean forcedRefresh) { + this.forcedRefresh = forcedRefresh; + } + /** returns the rest status for this response (based on {@link ShardInfo#status()} */ public RestStatus status() { return getShardInfo().status(); @@ -98,6 +116,7 @@ public void readFrom(StreamInput in) throws IOException { type = in.readString(); id = in.readString(); version = in.readZLong(); + forcedRefresh = in.readBoolean(); } @Override @@ -107,6 +126,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(type); out.writeString(id); out.writeZLong(version); + out.writeBoolean(forcedRefresh); } static final class Fields { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java index 045f5ab408b50..19493c54113a6 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.action.support.replication.ReplicatedMutationResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; @@ -29,7 +29,7 @@ /** * */ -public class BulkShardResponse extends ReplicationResponse { +public class BulkShardResponse extends ReplicatedMutationResponse { private ShardId shardId; private BulkItemResponse[] responses; @@ -50,6 +50,13 @@ public BulkItemResponse[] getResponses() { return responses; } + @Override + public void setForcedRefresh(boolean forcedRefresh) { + for (BulkItemResponse response : responses) { + response.getResponse().setForcedRefresh(forcedRefresh); + } + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationResponse.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationResponse.java new file mode 100644 index 0000000000000..07234931184d0 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationResponse.java @@ -0,0 +1,27 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.replication; + +/** + * Base class for responses that modify data in some shard like delete, index, and shardBulk. + */ +public abstract class ReplicatedMutationResponse extends ReplicationResponse { + public abstract void setForcedRefresh(boolean forcedRefresh); +} diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java index 9f0c1749a13ea..bd886b4948cfe 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java @@ -43,7 +43,7 @@ */ public abstract class TransportReplicatedMutationAction< Request extends ReplicatedMutationRequest, - Response extends ReplicationResponse + Response extends ReplicatedMutationResponse > extends TransportReplicationAction { protected TransportReplicatedMutationAction(Settings settings, String actionName, TransportService transportService, @@ -76,6 +76,7 @@ protected Request shardOperationOnPrimary(Request request, ActionListener result = onPrimaryShard(indexService, indexShard, request); processAfterWrite(request.refresh(), indexShard, result.location); + result.response.setForcedRefresh(request.refresh()); if (request.shouldBlockUntilRefresh() && false == request.refresh()) { indexShard.addRefreshListener(new RefreshListener() { @Override @@ -86,7 +87,9 @@ public Location location() { @Override public void refreshed(boolean forcedRefresh) { logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); - // TODO mark the response + if (forcedRefresh) { + result.response.setForcedRefresh(true); + } listener.onResponse(result.response); } @@ -137,7 +140,7 @@ protected final void processAfterWrite(boolean refresh, IndexShard indexShard, T indexShard.maybeFlush(); } - protected static class WriteResult { + protected static class WriteResult { public final T response; public final Translog.Location location; diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java index 0bd7f9bf18a75..6d2b741a3f4ca 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.admin.indices.flush.FlushRequest; diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index c7e65ae4e0ae8..8b0aba2e7cd49 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -21,7 +21,6 @@ import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.support.PlainActionFuture; @@ -30,7 +29,6 @@ import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 71451d0e8a133..ad3d0f2c967ca 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -20,11 +20,9 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.client.transport.NoNodeAvailableException; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.shard.ShardStateAction; diff --git a/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java b/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java index 4f28cf19d7bae..765cee3b6e8d6 100644 --- a/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java +++ b/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.document; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -27,6 +26,7 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; diff --git a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java index 061678905ab09..ea62bea01bccc 100644 --- a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java +++ b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java @@ -19,7 +19,10 @@ package org.elasticsearch.index; +import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESIntegTestCase; @@ -38,6 +41,7 @@ public class BlockUntilRefreshIT extends ESIntegTestCase { public void testIndex() { IndexResponse index = client().prepareIndex("test", "index", "1").setSource("foo", "bar").setBlockUntilRefresh(true).get(); assertEquals(RestStatus.CREATED, index.status()); + assertFalse("request shouldn't have forced a refresh", index.forcedRefresh()); assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); // TODO update!!! } @@ -48,7 +52,9 @@ public void testDelete() throws InterruptedException, ExecutionException { assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); // Now delete with blockUntilRefresh - client().prepareDelete("test", "test", "1").setBlockUntilRefresh(true).get(); + DeleteResponse delete = client().prepareDelete("test", "test", "1").setBlockUntilRefresh(true).get(); + assertTrue("document was deleted", delete.isFound()); + assertFalse("request shouldn't have forced a refresh", delete.forcedRefresh()); assertNoSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get()); } @@ -56,19 +62,26 @@ public void testBulk() { // Index by bulk with block_until_refresh BulkRequestBuilder bulk = client().prepareBulk().setBlockUntilRefresh(true); bulk.add(client().prepareIndex("test", "index", "1").setSource("foo", "bar")); - assertNoFailures(bulk.get()); + assertBulkSuccess(bulk.get()); assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); - // Update by bult with block_until_refresh + // Update by bulk with block_until_refresh bulk = client().prepareBulk().setBlockUntilRefresh(true); bulk.add(client().prepareUpdate("test", "index", "1").setDoc("foo", "baz")); - assertNoFailures(bulk.get()); + assertBulkSuccess(bulk.get()); assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "baz")).get(), "1"); - // Update by bult with block_until_refresh + // Update by bulk with block_until_refresh bulk = client().prepareBulk().setBlockUntilRefresh(true); bulk.add(client().prepareDelete("test", "index", "1")); - assertNoFailures(bulk.get()); + assertBulkSuccess(bulk.get()); assertNoSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get()); } + + private void assertBulkSuccess(BulkResponse response) { + assertNoFailures(response); + for (BulkItemResponse item : response) { + assertFalse("request shouldn't have forced a refresh", item.getResponse().forcedRefresh()); + } + } } From 8d121bf35eb265b8a0aee9710afeb1b054a113d4 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 25 Apr 2016 15:40:53 -0400 Subject: [PATCH 22/86] Cleanup listener implementation Much more testing too! --- .../resources/checkstyle_suppressions.xml | 1 - .../action/DocWriteResponse.java | 1 + .../ReplicatedMutationResponse.java | 3 + .../replication/ReplicationResponse.java | 6 +- .../TransportReplicatedMutationAction.java | 2 +- .../elasticsearch/index/IndexSettings.java | 2 +- .../elasticsearch/index/engine/Engine.java | 38 +++- .../index/engine/InternalEngine.java | 76 +++++++- .../index/engine/ShadowEngine.java | 10 +- .../elasticsearch/index/shard/IndexShard.java | 63 +----- .../index/engine/InternalEngineTests.java | 180 ++++++++++++++++-- .../index/translog/TranslogTests.java | 1 + 12 files changed, 297 insertions(+), 86 deletions(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 9f36857c8b207..66bbce92a321a 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -20,7 +20,6 @@ - diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index af97f815f6f60..9de862e6eda81 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -99,6 +99,7 @@ public boolean forcedRefresh() { return forcedRefresh; } + @Override public void setForcedRefresh(boolean forcedRefresh) { this.forcedRefresh = forcedRefresh; } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationResponse.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationResponse.java index 07234931184d0..8547e632661f0 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationResponse.java @@ -23,5 +23,8 @@ * Base class for responses that modify data in some shard like delete, index, and shardBulk. */ public abstract class ReplicatedMutationResponse extends ReplicationResponse { + /** + * Mark the request as to forced refresh or not. + */ public abstract void setForcedRefresh(boolean forcedRefresh); } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java index bacdb9bfb2059..181e0e0ea746c 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java @@ -81,14 +81,16 @@ public ShardInfo(int total, int successful, Failure... failures) { } /** - * @return the total number of shards the write should go to (replicas and primaries). This includes relocating shards, so this number can be higher than the number of shards. + * @return the total number of shards the write should go to (replicas and primaries). This includes relocating shards, so this + * number can be higher than the number of shards. */ public int getTotal() { return total; } /** - * @return the total number of shards the write succeeded on (replicas and primaries). This includes relocating shards, so this number can be higher than the number of shards. + * @return the total number of shards the write succeeded on (replicas and primaries). This includes relocating shards, so this + * number can be higher than the number of shards. */ public int getSuccessful() { return successful; diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java index bd886b4948cfe..baa0ee253f1d6 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java @@ -26,8 +26,8 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.Engine.RefreshListener; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.IndexShard.RefreshListener; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.Translog.Location; diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index c94a7e62968ea..cb0f6fa447fd3 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -232,7 +232,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti maxResultWindow = scopedSettings.get(MAX_RESULT_WINDOW_SETTING); maxRescoreWindow = scopedSettings.get(MAX_RESCORE_WINDOW_SETTING); TTLPurgeDisabled = scopedSettings.get(INDEX_TTL_DISABLE_PURGE_SETTING); -// maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD); + maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD); this.mergePolicyConfig = new MergePolicyConfig(logger, this); assert indexNameMatcher.test(indexMetaData.getIndex().getName()); diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 4b5af28141615..0bb2f9b4fec75 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -28,15 +28,12 @@ import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SegmentCommitInfo; -import org.apache.lucene.index.SegmentInfo; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.SegmentReader; import org.apache.lucene.index.SnapshotDeletionPolicy; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; import org.apache.lucene.search.SearcherManager; -import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; @@ -65,7 +62,6 @@ import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.TranslogRecoveryPerformer; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; @@ -74,7 +70,6 @@ import java.io.IOException; import java.nio.file.NoSuchFileException; import java.util.Arrays; -import java.util.Collection; import java.util.Comparator; import java.util.HashMap; import java.util.List; @@ -611,10 +606,14 @@ public final boolean refreshNeeded() { /** * Synchronously refreshes the engine for new search operations to reflect the latest * changes. - * - * @return the maximum translog location that was made visible by the refresh */ - public abstract Translog.Location refresh(String source) throws EngineException; + @Nullable + public abstract void refresh(String source) throws EngineException; + + /** + * Add a listener for refreshes. + */ + public abstract void addRefreshListener(RefreshListener listener); /** * Called when our engine is using too much heap and should move buffered indexed/deleted documents to disk. @@ -978,6 +977,9 @@ public static class GetResult implements Releasable { public static final GetResult NOT_EXISTS = new GetResult(false, Versions.NOT_FOUND, null); + /** + * Build a realtime get result from the translog. + */ public GetResult(boolean exists, long version, @Nullable Translog.Source source) { this.source = source; this.exists = exists; @@ -986,6 +988,9 @@ public GetResult(boolean exists, long version, @Nullable Translog.Source source) this.searcher = null; } + /** + * Build a non-realtime get result from the searcher. + */ public GetResult(Searcher searcher, Versions.DocIdAndVersion docIdAndVersion) { this.exists = true; this.source = null; @@ -1165,4 +1170,21 @@ public interface Warmer { * This operation will close the engine if the recovery fails. */ public abstract Engine recoverFromTranslog() throws IOException; + + /** + * Called when a refresh includes the location. + */ + public static interface RefreshListener { + /** + * The location to wait for. + */ + Translog.Location location(); + + /** + * Called when the location has been refreshed. + * + * @param forcedRefresh did this request force a refresh because ran out of listener slots? + */ + void refreshed(boolean forcedRefresh); + } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 5c3e08929583c..de9982058a863 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -32,6 +32,7 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.SearcherFactory; import org.apache.lucene.search.SearcherManager; import org.apache.lucene.store.AlreadyClosedException; @@ -67,6 +68,8 @@ import java.io.IOException; import java.util.Arrays; import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; @@ -76,6 +79,8 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.function.Function; +import static java.util.Objects.requireNonNull; + /** * */ @@ -114,6 +119,15 @@ public class InternalEngine extends Engine { private final AtomicInteger throttleRequestCount = new AtomicInteger(); private final EngineConfig.OpenMode openMode; private final AtomicBoolean allowCommits = new AtomicBoolean(true); + /** + * Refresh listeners. Uses a LinkedList because we frequently remove items from the front of it. + */ + // NOCOMMIT visibility + private final List refreshListeners = new LinkedList<>(); + /** + * The translog location that was last made visible by a refresh. This is written to while {@link #refreshListeners} is synchronized. + */ + private volatile Translog.Location lastRefreshedLocation; public InternalEngine(EngineConfig engineConfig) throws EngineException { super(engineConfig); @@ -291,6 +305,7 @@ private SearcherManager createSearcherManager() throws EngineException { searcherManager = new SearcherManager(directoryReader, searcherFactory); lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store); success = true; + searcherManager.addListener(new RefreshListenerCallingRefreshListener()); return searcherManager; } catch (IOException e) { maybeFailEngine("start", e); @@ -500,12 +515,10 @@ private void innerDelete(Delete delete) throws IOException { } @Override - public Translog.Location refresh(String source) throws EngineException { + public void refresh(String source) throws EngineException { // we obtain a read lock here, since we don't want a flush to happen while we are refreshing // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) - Translog.Location location = null; try (ReleasableLock lock = readLock.acquire()) { - location = translog.getLastWriteLocation(); ensureOpen(); searcherManager.maybeRefreshBlocking(); } catch (AlreadyClosedException e) { @@ -524,7 +537,31 @@ public Translog.Location refresh(String source) throws EngineException { maybePruneDeletedTombstones(); versionMapRefreshPending.set(false); mergeScheduler.refreshConfig(); - return location; + } + + @Override + public void addRefreshListener(RefreshListener listener) { + requireNonNull(listener, "listener cannot be null"); + + synchronized (refreshListeners) { + Translog.Location lastRefresh = lastRefreshedLocation; + if (lastRefresh != null && lastRefresh.compareTo(listener.location()) >= 0) { + // Already refreshed, just call the listener + listener.refreshed(false); + return; + } + if (refreshListeners.size() < engineConfig.getIndexSettings().getMaxRefreshListeners()) { + // We have a free slot so register the listener + refreshListeners.add(listener); + return; + } + /* + * No free slot so force a refresh and call the listener in this thread. Do so outside of the synchronized block so any other + * attempts to add a listener can continue. + */ + } + refresh("too_many_listeners"); + listener.refreshed(true); } @Override @@ -1134,4 +1171,35 @@ public void onSettingsChanged() { public MergeStats getMergeStats() { return mergeScheduler.stats(); } + + /** + * Listens to Lucene's {@linkplain ReferenceManager.RefreshListener} and fires off Elasticsearch's {@linkplain RefreshListener}s. + */ + private class RefreshListenerCallingRefreshListener implements ReferenceManager.RefreshListener { + private Translog.Location currentRefreshLocation; + @Override + public void beforeRefresh() throws IOException { + currentRefreshLocation = translog.getLastWriteLocation(); + } + + @Override + public void afterRefresh(boolean didRefresh) throws IOException { + if ( + false == didRefresh || // We didn't refresh so we shouldn't alert anyone to anything + null == currentRefreshLocation // The translog had an empty last write location at the start of the refresh. + ) { + return; + } + synchronized (refreshListeners) { + for (Iterator itr = refreshListeners.iterator(); itr.hasNext();) { + RefreshListener listener = itr.next(); + if (listener.location().compareTo(currentRefreshLocation) <= 0) { + itr.remove(); + engineConfig.getThreadPool().executor(ThreadPool.Names.LISTENER).execute(() -> listener.refreshed(false)); + } + } + lastRefreshedLocation = currentRefreshLocation; + } + } + } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index 6cbbbb4fe6a15..bf498929f889e 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ReleasableLock; -import org.elasticsearch.index.shard.TranslogRecoveryPerformer; import org.elasticsearch.index.translog.Translog; import java.io.IOException; @@ -182,7 +181,7 @@ public List segments(boolean verbose) { } @Override - public Translog.Location refresh(String source) throws EngineException { + public void refresh(String source) throws EngineException { // we obtain a read lock here, since we don't want a flush to happen while we are refreshing // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) try (ReleasableLock lock = readLock.acquire()) { @@ -196,8 +195,11 @@ public Translog.Location refresh(String source) throws EngineException { failEngine("refresh failed", t); throw new RefreshFailedEngineException(shardId, t); } - // Return null here because we don't have a translog. "Everything" is visible. - return null; + } + + @Override + public void addRefreshListener(RefreshListener listener) { + throw new UnsupportedOperationException("Can't listen for a refresh on a shadow engine because it doesn't have a translog"); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 40e421f4f2774..a0aec78538819 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -63,6 +63,7 @@ import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.Engine.RefreshListener; import org.elasticsearch.index.engine.EngineClosedException; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineException; @@ -114,8 +115,6 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.EnumSet; -import java.util.Iterator; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Objects; @@ -125,8 +124,6 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; -import static java.util.Objects.requireNonNull; - public class IndexShard extends AbstractIndexShardComponent { private final ThreadPool threadPool; @@ -197,10 +194,6 @@ public class IndexShard extends AbstractIndexShardComponent { * IndexingMemoryController}). */ private final AtomicBoolean active = new AtomicBoolean(); - /** - * Refresh listeners. Uses a LinkedList because we frequently remove items from the front of it. - */ - private final List refreshListeners = new LinkedList<>(); public IndexShard(ShardId shardId, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache, MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService, @@ -575,6 +568,7 @@ public Engine.GetResult get(Engine.Get get) { */ public void refresh(String source) { verifyNotClosed(); + if (canIndex()) { long bytes = getEngine().getIndexBufferRAMBytesUsed(); writingBytes.addAndGet(bytes); @@ -1547,6 +1541,13 @@ public boolean isRefreshNeeded() { return getEngine().refreshNeeded(); } + /** + * Add a listener for refreshes. + */ + public void addRefreshListener(RefreshListener listener) { + getEngine().addRefreshListener(listener); + } + private class IndexShardRecoveryPerformer extends TranslogRecoveryPerformer { protected IndexShardRecoveryPerformer(ShardId shardId, MapperService mapperService, ESLogger logger) { @@ -1578,50 +1579,4 @@ protected void delete(Engine engine, Engine.Delete engineDelete) { IndexShard.this.delete(engine, engineDelete); } } - - public void addRefreshListener(RefreshListener listener) { - requireNonNull(listener, "listener cannot be null"); - - boolean tooManyListeners = false; - synchronized (refreshListeners) { - if (refreshListeners.size() >= indexSettings.getMaxRefreshListeners()) { - tooManyListeners = true; - } else { - refreshListeners.add(listener); - } - } - if (tooManyListeners) { - refresh("too_many_listeners"); - listener.refreshed(true); - } - } - - void callRefreshListeners(Translog.Location refreshedLocation) { - synchronized (refreshListeners) { - for (Iterator itr = refreshListeners.iterator(); itr.hasNext();) { - RefreshListener listener = itr.next(); - if (listener.location().compareTo(refreshedLocation) <= 0) { - itr.remove(); - threadPool.executor(ThreadPool.Names.LISTENER).execute(() -> listener.refreshed(false)); - } - } - } - } - - /** - * Called when a refresh includes the location. - */ - public static interface RefreshListener { - /** - * The location to wait for. - */ - Translog.Location location(); - - /** - * Called when the location has been refreshed. - * - * @param forcedRefresh did this request force a refresh because ran out of listener slots? - */ - void refreshed(boolean forcedRefresh); - } } diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 14c978594f526..fd30f3171b9b7 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -70,7 +70,9 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.codec.CodecService; +import org.elasticsearch.index.engine.Engine.RefreshListener; import org.elasticsearch.index.engine.Engine.Searcher; +import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperForType; @@ -92,6 +94,7 @@ import org.elasticsearch.index.store.DirectoryUtils; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.mapper.MapperRegistry; @@ -116,8 +119,10 @@ import java.util.Locale; import java.util.Map; import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -308,24 +313,26 @@ public void onFailedEngine(String reason, @Nullable Throwable t) { public void testSegments() throws Exception { try (Store store = createStore(); - Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { + Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { List segments = engine.segments(false); assertThat(segments.isEmpty(), equalTo(true)); assertThat(engine.segmentsStats(false).getCount(), equalTo(0L)); assertThat(engine.segmentsStats(false).getMemoryInBytes(), equalTo(0L)); - // create a doc and refresh + // create two docs and refresh ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - Translog.Location firstIndexedLocation = index.getTranslogLocation(); + Engine.Index first = new Engine.Index(newUid("1"), doc); + engine.index(first); ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), B_2, null); - index = new Engine.Index(newUid("2"), doc2); - engine.index(index); - Translog.Location secondIndexedLocation = index.getTranslogLocation(); - assertThat(secondIndexedLocation, greaterThan(firstIndexedLocation)); - Translog.Location refreshedLocation = engine.refresh("test"); - assertEquals(secondIndexedLocation, refreshedLocation); + Engine.Index second = new Engine.Index(newUid("2"), doc2); + engine.index(second); + assertThat(second.getTranslogLocation(), greaterThan(first.getTranslogLocation())); + DummyRefreshListener refreshListener = new DummyRefreshListener(second.getTranslogLocation()); + engine.addRefreshListener(refreshListener); + engine.refresh("test"); + assertBusy(() -> assertNotNull("The listener should be called in the listener threadpool soon after the refresh", + refreshListener.forcedRefresh.get())); + assertFalse("We didn't force a refresh with the index operations!?", refreshListener.forcedRefresh.get()); segments = engine.segments(false); assertThat(segments.size(), equalTo(1)); @@ -2115,4 +2122,155 @@ public void testCurrentTranslogIDisCommitted() throws IOException { } } } + + public void testTooManyRefreshListeners() throws Exception { + try (Store store = createStore(); + Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { + ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); + Engine.Index index = new Engine.Index(newUid("1"), doc); + engine.index(index); + + // Fill the listener slots + List nonForcedListeners = new ArrayList<>(INDEX_SETTINGS.getMaxRefreshListeners()); + for (int i = 0; i < defaultSettings.getMaxRefreshListeners(); i++) { + DummyRefreshListener listener = new DummyRefreshListener(index.getTranslogLocation()); + nonForcedListeners.add(listener); + engine.addRefreshListener(listener); + } + + // We shouldn't have called any of them + for (DummyRefreshListener listener : nonForcedListeners) { + assertNull("Called listener too early!", listener.forcedRefresh.get()); + } + + // Add one more listener which should cause a refresh. In this thread, no less. + DummyRefreshListener forcingListener = new DummyRefreshListener(index.getTranslogLocation()); + engine.addRefreshListener(forcingListener); + assertTrue("Forced listener wasn't forced?", forcingListener.forcedRefresh.get()); + + // That forces all the listeners through. On the listener thread pool so give them some time with assertBusy. + for (DummyRefreshListener listener : nonForcedListeners) { + assertBusy( + () -> assertEquals("Expected listener called with unforced refresh!", Boolean.FALSE, listener.forcedRefresh.get())); + } + } + } + + public void testAddRefreshListenerAfterRefresh() throws Exception { + try (Store store = createStore(); + Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { + ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); + Engine.Index index = new Engine.Index(newUid("1"), doc); + engine.index(index); + engine.refresh("I said so"); + if (randomBoolean()) { + engine.index(new Engine.Index(newUid("1"), doc)); + if (randomBoolean()) { + engine.refresh("I said so"); + } + } + + DummyRefreshListener listener = new DummyRefreshListener(index.getTranslogLocation()); + engine.addRefreshListener(listener); + assertFalse(listener.forcedRefresh.get()); + } + } + + /** + * Uses a whole bunch of threads to index, wait for refresh, and non-realtime get documents to validate that they are visible after + * waiting regardless of what crazy sequence of events causes the refresh listener to fire. + */ + public void testAddRefreshListenerLotsOfThreads() throws Exception { + int threadCount = between(5, defaultSettings.getMaxRefreshListeners() * 2); + long runTime = TimeUnit.SECONDS.toNanos(5); + try (Store store = createStore(); + Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { + AtomicBoolean run = new AtomicBoolean(true); + CopyOnWriteArrayList failures = new CopyOnWriteArrayList<>(); + + // These threads add and block until the refresh makes the change visible and then do a non-realtime get. + Thread[] threads = new Thread[threadCount]; + for (int i = 0; i < threadCount; i++) { + final String id = String.format("%04d", i); + final Term uid = newUid(id); + threads[i] = new Thread(() -> { + int iteration = 0; + while (run.get()) { + try { + iteration++; + Document document = testDocument(); + String value = id + "i" + iteration; + document.add(new TextField("test", value, Field.Store.YES)); + ParsedDocument doc = testParsedDocument(id, id, "test", null, -1, -1, document, B_1, null); + Engine.Index index = new Engine.Index(uid, doc); + boolean created = engine.index(index); + assertEquals(iteration, index.version()); + assertEquals(iteration == 1, created); + + DummyRefreshListener listener = new DummyRefreshListener(index.getTranslogLocation()); + engine.addRefreshListener(listener); + assertBusy(() -> assertNotNull(listener.forcedRefresh.get())); + + Engine.Get get = new Engine.Get(false, uid); + try (Engine.GetResult getResult = engine.get(get)) { + assertTrue("document not found", getResult.exists()); + assertEquals(iteration, getResult.version()); + SingleFieldsVisitor visitor = new SingleFieldsVisitor("test"); + getResult.docIdAndVersion().context.reader().document(getResult.docIdAndVersion().docId, visitor); + assertEquals(Arrays.asList(value), visitor.fields().get("test")); + } + } catch (Throwable t) { + failures.add(new RuntimeException("failure on the [" + iteration + "] iteration of thread [" + id + "]", t)); + } + } + }); + threads[i].start(); + } + long end = System.nanoTime() + runTime; + while (System.nanoTime() < end && failures.isEmpty()) { + Thread.sleep(100); + engine.refresh("because test"); + } + run.set(false); + // Give the threads a second to finish whatever they were doing - refreshing all the time so they'll finish any blocking. + end = System.nanoTime() + TimeUnit.SECONDS.toNanos(1); + while (System.nanoTime() < end) { + Thread.sleep(100); + engine.refresh("because test"); + } + for (Thread thread : threads) { + thread.join(); + } + if (failures.isEmpty()) { + return; + } + RuntimeException e = new RuntimeException("there were failures"); + for (Throwable failure: failures) { + e.addSuppressed(failure); + } + throw e; + } + } + + private static class DummyRefreshListener implements RefreshListener { + private final Translog.Location location; + /** + * When the listener is called this captures it's only argument. + */ + private AtomicReference forcedRefresh = new AtomicReference<>(); + + public DummyRefreshListener(Location location) { + this.location = location; + } + + @Override + public Translog.Location location() { + return location; + } + + @Override + public void refreshed(boolean forcedRefresh) { + this.forcedRefresh.set(forcedRefresh); + } + } } diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 6f9a302da07d9..6e24abe33daf0 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -205,6 +205,7 @@ private String randomNonTranslogPatternString(int min, int max) { } public void testRead() throws IOException { + // NOCOMMIT remove the numbers and replace with greater than assertions assertNull(translog.getLastWriteLocation()); Translog.Location loc1 = translog.add(new Translog.Index("test", "1", new byte[]{1})); assertEquals(translog.currentFileGeneration(), translog.getLastWriteLocation().generation); From 1f36966742f851b7328015151ef6fc8f95299af2 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 25 Apr 2016 15:46:06 -0400 Subject: [PATCH 23/86] Cleanup translog tests --- .../index/translog/TranslogTests.java | 21 +++++++------------ 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 6e24abe33daf0..4a08dd8026bc7 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.translog; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.Term; import org.apache.lucene.mockfile.FilterFileChannel; @@ -42,7 +43,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.index.Index; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; @@ -205,31 +205,26 @@ private String randomNonTranslogPatternString(int min, int max) { } public void testRead() throws IOException { - // NOCOMMIT remove the numbers and replace with greater than assertions assertNull(translog.getLastWriteLocation()); Translog.Location loc1 = translog.add(new Translog.Index("test", "1", new byte[]{1})); - assertEquals(translog.currentFileGeneration(), translog.getLastWriteLocation().generation); - assertEquals(43, translog.getLastWriteLocation().translogLocation); + assertEquals(loc1, translog.getLastWriteLocation()); Translog.Location loc2 = translog.add(new Translog.Index("test", "2", new byte[]{2})); - assertEquals(translog.currentFileGeneration(), translog.getLastWriteLocation().generation); - assertEquals(89, translog.getLastWriteLocation().translogLocation); + assertEquals(loc2, translog.getLastWriteLocation()); + assertThat(loc2, greaterThan(loc1)); assertThat(translog.read(loc1).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{1}))); assertThat(translog.read(loc2).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{2}))); translog.sync(); - assertEquals(translog.currentFileGeneration(), translog.getLastWriteLocation().generation); - assertEquals(89, translog.getLastWriteLocation().translogLocation); + assertEquals(loc2, translog.getLastWriteLocation()); assertThat(translog.read(loc1).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{1}))); assertThat(translog.read(loc2).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{2}))); Translog.Location loc3 = translog.add(new Translog.Index("test", "2", new byte[]{3})); - assertEquals(translog.currentFileGeneration(), translog.getLastWriteLocation().generation); - assertEquals(135, translog.getLastWriteLocation().translogLocation); + assertEquals(loc3, translog.getLastWriteLocation()); + assertThat(loc3, greaterThan(loc2)); assertThat(translog.read(loc3).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{3}))); translog.sync(); assertThat(translog.read(loc3).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{3}))); - long lastGeneration = translog.currentFileGeneration(); translog.prepareCommit(); - assertEquals(lastGeneration, translog.getLastWriteLocation().generation); - assertEquals(135, translog.getLastWriteLocation().translogLocation); + assertEquals(loc3, translog.getLastWriteLocation()); assertThat(translog.read(loc3).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{3}))); translog.commit(); assertNull(translog.read(loc1)); From 8a80cc70a76375a7593745884cb987535b37ca80 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 25 Apr 2016 17:38:24 -0400 Subject: [PATCH 24/86] Support for update --- .../action/update/TransportUpdateAction.java | 7 +- .../action/update/UpdateRequest.java | 23 +++++ .../action/update/UpdateRequestBuilder.java | 11 +++ .../index/engine/InternalEngine.java | 2 +- .../index/BlockUntilRefreshIT.java | 94 ++++++++++++++++++- .../index/engine/InternalEngineTests.java | 2 +- 6 files changed, 131 insertions(+), 8 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index 0363ef8fe4312..b3f84077e8b5b 100644 --- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.delete.TransportDeleteAction; import org.elasticsearch.action.index.IndexRequest; @@ -175,6 +176,7 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< switch (result.operation()) { case UPSERT: IndexRequest upsertRequest = result.action(); + upsertRequest.setBlockUntilRefresh(request.shouldBlockUntilRefresh()); // we fetch it from the index request so we don't generate the bytes twice, its already done in the index request final BytesReference upsertSourceBytes = upsertRequest.source(); indexAction.execute(upsertRequest, new ActionListener() { @@ -212,6 +214,7 @@ protected void doRun() { break; case INDEX: IndexRequest indexRequest = result.action(); + indexRequest.setBlockUntilRefresh(request.shouldBlockUntilRefresh()); // we fetch it from the index request so we don't generate the bytes twice, its already done in the index request final BytesReference indexSourceBytes = indexRequest.source(); indexAction.execute(indexRequest, new ActionListener() { @@ -241,7 +244,9 @@ protected void doRun() { }); break; case DELETE: - deleteAction.execute(result.action(), new ActionListener() { + DeleteRequest deleteRequest = result.action(); + deleteRequest.setBlockUntilRefresh(request.shouldBlockUntilRefresh()); + deleteAction.execute(deleteRequest, new ActionListener() { @Override public void onResponse(DeleteResponse response) { UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), false); diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 31f219fd4c7e1..dbd048ea14f5b 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.Script; @@ -85,6 +86,12 @@ public class UpdateRequest extends InstanceShardOperationRequest @Nullable private IndexRequest doc; + /** + * Should this request block until all of its results are visible for search? + */ + private boolean blockUntilRefresh = false; + + public UpdateRequest() { } @@ -718,6 +725,20 @@ public UpdateRequest scriptedUpsert(boolean scriptedUpsert) { return this; } + /** + * Should this request block until it has been made visible for search by a refresh? Unlike {@link #refresh(boolean)} this is quite safe + * to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} for + * the limit. Defaults to false. + */ + public UpdateRequest setBlockUntilRefresh(boolean blockUntilRefresh) { + this.blockUntilRefresh = blockUntilRefresh; + return this; + } + + public boolean shouldBlockUntilRefresh() { + return blockUntilRefresh; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -751,6 +772,7 @@ public void readFrom(StreamInput in) throws IOException { versionType = VersionType.fromValue(in.readByte()); detectNoop = in.readBoolean(); scriptedUpsert = in.readBoolean(); + blockUntilRefresh = in.readBoolean(); } @Override @@ -801,6 +823,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeByte(versionType.getValue()); out.writeBoolean(detectNoop); out.writeBoolean(scriptedUpsert); + out.writeBoolean(blockUntilRefresh); } } diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java index 30b636f4efc68..b9c36a76cf26c 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.script.Script; @@ -325,6 +326,16 @@ public UpdateRequestBuilder setScriptedUpsert(boolean scriptedUpsert) { return this; } + /** + * Should this request block until it has been made visible for search by a refresh? Unlike {@link #setRefresh(boolean)} this is quite + * safe to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} + * for the limit. Defaults to false. + */ + public UpdateRequestBuilder setBlockUntilRefresh(boolean blockUntilRefresh) { + request.setBlockUntilRefresh(blockUntilRefresh); + return this; + } + /** * Set the new ttl of the document as a long. Note that if detectNoop is true (the default) * and the source of the document isn't changed then the ttl update won't take diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index de9982058a863..aaf83f88c6aa9 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -122,7 +122,7 @@ public class InternalEngine extends Engine { /** * Refresh listeners. Uses a LinkedList because we frequently remove items from the front of it. */ - // NOCOMMIT visibility + // TODO replace this with a LinkedTransferQueue? private final List refreshListeners = new LinkedList<>(); /** * The translog location that was last made visible by a refresh. This is written to while {@link #refreshListeners} is synchronized. diff --git a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java index ea62bea01bccc..6312b5eee6f18 100644 --- a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java +++ b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java @@ -24,18 +24,29 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.NativeScriptFactory; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.test.ESIntegTestCase; +import java.util.Collection; +import java.util.Map; import java.util.concurrent.ExecutionException; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singleton; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; /** - * Tests that requests with block_until_refresh set to true will be visible when they return. + * Tests that requests with block_until_refresh set to true will be visible when they return. */ public class BlockUntilRefreshIT extends ESIntegTestCase { public void testIndex() { @@ -43,7 +54,6 @@ public void testIndex() { assertEquals(RestStatus.CREATED, index.status()); assertFalse("request shouldn't have forced a refresh", index.forcedRefresh()); assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); - // TODO update!!! } public void testDelete() throws InterruptedException, ExecutionException { @@ -58,22 +68,47 @@ public void testDelete() throws InterruptedException, ExecutionException { assertNoSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get()); } + public void testUpdate() throws InterruptedException, ExecutionException { + // Index normally + indexRandom(true, client().prepareIndex("test", "test", "1").setSource("foo", "bar")); + assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); + + // Update with block_until_refresh + UpdateResponse update = client().prepareUpdate("test", "test", "1").setDoc("foo", "baz").setBlockUntilRefresh(true).get(); + assertEquals(2, update.getVersion()); + assertFalse("request shouldn't have forced a refresh", update.forcedRefresh()); + assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "baz")).get(), "1"); + + // Upsert with block_until_refresh + update = client().prepareUpdate("test", "test", "2").setDocAsUpsert(true).setDoc("foo", "cat").setBlockUntilRefresh(true).get(); + assertEquals(1, update.getVersion()); + assertFalse("request shouldn't have forced a refresh", update.forcedRefresh()); + assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "cat")).get(), "2"); + + // Update-becomes-delete with block_until_refresh + update = client().prepareUpdate("test", "test", "2").setScript(new Script("delete_plz", ScriptType.INLINE, "native", emptyMap())) + .setBlockUntilRefresh(true).get(); + assertEquals(2, update.getVersion()); + assertFalse("request shouldn't have forced a refresh", update.forcedRefresh()); + assertNoSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "cat")).get()); + } + public void testBulk() { // Index by bulk with block_until_refresh BulkRequestBuilder bulk = client().prepareBulk().setBlockUntilRefresh(true); - bulk.add(client().prepareIndex("test", "index", "1").setSource("foo", "bar")); + bulk.add(client().prepareIndex("test", "test", "1").setSource("foo", "bar")); assertBulkSuccess(bulk.get()); assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); // Update by bulk with block_until_refresh bulk = client().prepareBulk().setBlockUntilRefresh(true); - bulk.add(client().prepareUpdate("test", "index", "1").setDoc("foo", "baz")); + bulk.add(client().prepareUpdate("test", "test", "1").setDoc("foo", "baz")); assertBulkSuccess(bulk.get()); assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "baz")).get(), "1"); // Update by bulk with block_until_refresh bulk = client().prepareBulk().setBlockUntilRefresh(true); - bulk.add(client().prepareDelete("test", "index", "1")); + bulk.add(client().prepareDelete("test", "test", "1")); assertBulkSuccess(bulk.get()); assertNoSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get()); } @@ -84,4 +119,53 @@ private void assertBulkSuccess(BulkResponse response) { assertFalse("request shouldn't have forced a refresh", item.getResponse().forcedRefresh()); } } + + @Override + protected Collection> nodePlugins() { + return singleton(DeletePlzPlugin.class); + } + + public static class DeletePlzPlugin extends Plugin { + @Override + public String name() { + return "delete_please"; + } + + @Override + public String description() { + return "adds a script that converts any update into a delete for testing"; + } + + public void onModule(ScriptModule scriptModule) { + scriptModule.registerScript("delete_plz", DeletePlzFactory.class); + } + } + + public static class DeletePlzFactory implements NativeScriptFactory { + @Override + public ExecutableScript newScript(Map params) { + return new ExecutableScript() { + private Map ctx; + + @Override + @SuppressWarnings("unchecked") // Elasicsearch convention + public void setNextVar(String name, Object value) { + if (name.equals("ctx")) { + ctx = (Map) value; + } + } + + @Override + public Object run() { + ctx.put("op", "delete"); + return null; + } + }; + } + + @Override + public boolean needsScores() { + return false; + } + } } diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index fd30f3171b9b7..00c939409c192 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -2191,7 +2191,7 @@ public void testAddRefreshListenerLotsOfThreads() throws Exception { // These threads add and block until the refresh makes the change visible and then do a non-realtime get. Thread[] threads = new Thread[threadCount]; for (int i = 0; i < threadCount; i++) { - final String id = String.format("%04d", i); + final String id = String.format(Locale.ROOT, "%04d", i); final Term uid = newUid(id); threads[i] = new Thread(() -> { int iteration = 0; From bcfded11515af5e0b3c3e36f3c2f73f5cd26512e Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 25 Apr 2016 18:14:20 -0400 Subject: [PATCH 25/86] Replace LinkedList and synchronized with LinkedTransferQueue --- .../index/engine/InternalEngine.java | 78 +++++++++++-------- .../index/BlockUntilRefreshIT.java | 2 + .../index/engine/InternalEngineTests.java | 8 +- 3 files changed, 54 insertions(+), 34 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index aaf83f88c6aa9..73dbf54b0c660 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -54,6 +54,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.Engine.RefreshListener; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.OnGoingMerge; @@ -69,10 +70,10 @@ import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.LinkedTransferQueue; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; @@ -122,10 +123,9 @@ public class InternalEngine extends Engine { /** * Refresh listeners. Uses a LinkedList because we frequently remove items from the front of it. */ - // TODO replace this with a LinkedTransferQueue? - private final List refreshListeners = new LinkedList<>(); + private final LinkedTransferQueue refreshListeners = new LinkedTransferQueue<>(); /** - * The translog location that was last made visible by a refresh. This is written to while {@link #refreshListeners} is synchronized. + * The translog location that was last made visible by a refresh. */ private volatile Translog.Location lastRefreshedLocation; @@ -543,23 +543,22 @@ public void refresh(String source) throws EngineException { public void addRefreshListener(RefreshListener listener) { requireNonNull(listener, "listener cannot be null"); - synchronized (refreshListeners) { - Translog.Location lastRefresh = lastRefreshedLocation; - if (lastRefresh != null && lastRefresh.compareTo(listener.location()) >= 0) { - // Already refreshed, just call the listener - listener.refreshed(false); - return; - } - if (refreshListeners.size() < engineConfig.getIndexSettings().getMaxRefreshListeners()) { - // We have a free slot so register the listener - refreshListeners.add(listener); - return; - } - /* - * No free slot so force a refresh and call the listener in this thread. Do so outside of the synchronized block so any other - * attempts to add a listener can continue. - */ + Translog.Location lastRefresh = lastRefreshedLocation; + if (lastRefresh != null && lastRefresh.compareTo(listener.location()) >= 0) { + // Location already visible, just call the listener + listener.refreshed(false); + return; + } + // NOCOMMIT size is slow here + if (refreshListeners.size() < engineConfig.getIndexSettings().getMaxRefreshListeners()) { + // We have a free slot so register the listener + refreshListeners.add(listener); + return; } + /* + * No free slot so force a refresh and call the listener in this thread. Do so outside of the synchronized block so any other + * attempts to add a listener can continue. + */ refresh("too_many_listeners"); listener.refreshed(true); } @@ -1184,21 +1183,36 @@ public void beforeRefresh() throws IOException { @Override public void afterRefresh(boolean didRefresh) throws IOException { - if ( - false == didRefresh || // We didn't refresh so we shouldn't alert anyone to anything - null == currentRefreshLocation // The translog had an empty last write location at the start of the refresh. - ) { + if (false == didRefresh) { + // We didn't refresh so we shouldn't alert anyone to anything. return; } - synchronized (refreshListeners) { - for (Iterator itr = refreshListeners.iterator(); itr.hasNext();) { - RefreshListener listener = itr.next(); - if (listener.location().compareTo(currentRefreshLocation) <= 0) { - itr.remove(); - engineConfig.getThreadPool().executor(ThreadPool.Names.LISTENER).execute(() -> listener.refreshed(false)); - } + if (null == currentRefreshLocation) { + /* + * The translog had an empty last write location at the start of the refresh so we can't alert anyone to anything. This + * usually happens during recovery. The next refresh cycle out to pick up this refresh. + */ + return; + } + /* + * First set the lastRefreshedLocation so listeners that come in locations before that will just execute inline without messing + * around with refreshListeners at all. + */ + lastRefreshedLocation = currentRefreshLocation; + /* + * Now pop all listeners off the front of refreshListeners that are ready to be called. The listeners won't always be in order + * but they should be pretty close because you don't listen to times super far in the future. This prevents us from having to + * iterate over the whole queue on every refresh at the cost of some requests having to wait an extra cycle if they get stuck + * behind a request that missed the refresh cycle. + */ + Iterator itr = refreshListeners.iterator(); + while (itr.hasNext()) { + RefreshListener listener = itr.next(); + if (listener.location().compareTo(currentRefreshLocation) > 0) { + break; } - lastRefreshedLocation = currentRefreshLocation; + itr.remove(); + engineConfig.getThreadPool().executor(ThreadPool.Names.LISTENER).execute(() -> listener.refreshed(false)); } } } diff --git a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java index 6312b5eee6f18..3ffcba52288ca 100644 --- a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java +++ b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java @@ -113,6 +113,8 @@ public void testBulk() { assertNoSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get()); } + // TODO add a test for -1 refresh_interval. Use an explicit refresh to trigger the listener. It might get triggered before - that is ok + private void assertBulkSuccess(BulkResponse response) { assertNoFailures(response); for (BulkItemResponse item : response) { diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 00c939409c192..657d359ed825d 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -2209,7 +2209,10 @@ public void testAddRefreshListenerLotsOfThreads() throws Exception { DummyRefreshListener listener = new DummyRefreshListener(index.getTranslogLocation()); engine.addRefreshListener(listener); - assertBusy(() -> assertNotNull(listener.forcedRefresh.get())); + assertBusy(() -> assertNotNull("listener never called", listener.forcedRefresh.get())); + if (threadCount < defaultSettings.getMaxRefreshListeners()) { + assertFalse(listener.forcedRefresh.get()); + } Engine.Get get = new Engine.Get(false, uid); try (Engine.GetResult getResult = engine.get(get)) { @@ -2270,7 +2273,8 @@ public Translog.Location location() { @Override public void refreshed(boolean forcedRefresh) { - this.forcedRefresh.set(forcedRefresh); + Boolean oldValue = this.forcedRefresh.getAndSet(forcedRefresh); + assertNull("Listener called twice", oldValue); } } } From bd531167fe54f1bde6f6d4ddb0a8de5a7bcc18a2 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 25 Apr 2016 18:21:02 -0400 Subject: [PATCH 26/86] Don't try and set forced refresh on bulk items without a response NullPointerExceptions are bad. If the entire request fails then the user has worse problems then "did these force a refresh". --- .../org/elasticsearch/action/bulk/BulkShardResponse.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java index 19493c54113a6..8ed77149a0592 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.bulk; +import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.support.replication.ReplicatedMutationResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -53,7 +54,10 @@ public BulkItemResponse[] getResponses() { @Override public void setForcedRefresh(boolean forcedRefresh) { for (BulkItemResponse response : responses) { - response.getResponse().setForcedRefresh(forcedRefresh); + DocWriteResponse r = response.getResponse(); + if (r != null) { + r.setForcedRefresh(forcedRefresh); + } } } From 8250343240de7e63118c663a230a7a314807a754 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 25 Apr 2016 19:34:22 -0400 Subject: [PATCH 27/86] Switch to estimated count We don't need a linear time count of the number of listeners - a volatile variable is good enough to guess. It probably undercounts more than it overcounts but it isn't a huge problem. --- .../TransportReplicatedMutationAction.java | 5 ++++- .../elasticsearch/index/engine/InternalEngine.java | 13 +++++++++++-- .../index/engine/InternalEngineTests.java | 6 +++++- 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java index baa0ee253f1d6..9c8a8d34ce5e8 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java @@ -76,7 +76,10 @@ protected Request shardOperationOnPrimary(Request request, ActionListener result = onPrimaryShard(indexService, indexShard, request); processAfterWrite(request.refresh(), indexShard, result.location); - result.response.setForcedRefresh(request.refresh()); + if (request.refresh()) { + // Only setForcedRefresh if it is true because this can touch every item in a bulk request + result.response.setForcedRefresh(true); + } if (request.shouldBlockUntilRefresh() && false == request.refresh()) { indexShard.addRefreshListener(new RefreshListener() { @Override diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 73dbf54b0c660..eb807dd757adc 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -124,6 +124,12 @@ public class InternalEngine extends Engine { * Refresh listeners. Uses a LinkedList because we frequently remove items from the front of it. */ private final LinkedTransferQueue refreshListeners = new LinkedTransferQueue<>(); + /** + * The estimated size of refreshListenersEstimatedSize used for triggering refresh when the size gets over + * {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}. No effort is made to correct for threading issues in the size calculation + * beyond it being volatile. + */ + private volatile int refreshListenersEstimatedSize; /** * The translog location that was last made visible by a refresh. */ @@ -550,9 +556,10 @@ public void addRefreshListener(RefreshListener listener) { return; } // NOCOMMIT size is slow here - if (refreshListeners.size() < engineConfig.getIndexSettings().getMaxRefreshListeners()) { + if (refreshListenersEstimatedSize < engineConfig.getIndexSettings().getMaxRefreshListeners()) { // We have a free slot so register the listener refreshListeners.add(listener); + refreshListenersEstimatedSize++; return; } /* @@ -1209,11 +1216,13 @@ public void afterRefresh(boolean didRefresh) throws IOException { while (itr.hasNext()) { RefreshListener listener = itr.next(); if (listener.location().compareTo(currentRefreshLocation) > 0) { - break; + return; } itr.remove(); + refreshListenersEstimatedSize--; engineConfig.getThreadPool().executor(ThreadPool.Names.LISTENER).execute(() -> listener.refreshed(false)); } + refreshListenersEstimatedSize = 0; } } } diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 657d359ed825d..e81112d75eb28 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -127,6 +127,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import static java.lang.Math.max; import static java.util.Collections.emptyMap; import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; @@ -174,6 +175,8 @@ public void setUp() throws Exception { .put(IndexSettings.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD, + between(10, 10 * IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.get(Settings.EMPTY))) .build()); // TODO randomize more settings threadPool = new ThreadPool(getClass().getName()); store = createStore(); @@ -2181,7 +2184,7 @@ public void testAddRefreshListenerAfterRefresh() throws Exception { * waiting regardless of what crazy sequence of events causes the refresh listener to fire. */ public void testAddRefreshListenerLotsOfThreads() throws Exception { - int threadCount = between(5, defaultSettings.getMaxRefreshListeners() * 2); + int threadCount = between(5, max(50, defaultSettings.getMaxRefreshListeners() * 2)); long runTime = TimeUnit.SECONDS.toNanos(5); try (Store store = createStore(); Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { @@ -2226,6 +2229,7 @@ public void testAddRefreshListenerLotsOfThreads() throws Exception { failures.add(new RuntimeException("failure on the [" + iteration + "] iteration of thread [" + id + "]", t)); } } + logger.info("Finished [{}] iterations", iteration); }); threads[i].start(); } From 0c9b0477085c021f503db775640d25668e02f635 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 25 Apr 2016 20:30:06 -0400 Subject: [PATCH 28/86] REST --- .../action/DocWriteResponse.java | 3 +- .../action/update/TransportUpdateAction.java | 3 ++ .../rest/action/bulk/RestBulkAction.java | 1 + .../rest/action/delete/RestDeleteAction.java | 1 + .../rest/action/index/RestIndexAction.java | 1 + .../rest/action/update/RestUpdateAction.java | 1 + .../resources/rest-api-spec/api/bulk.json | 5 +++ .../resources/rest-api-spec/api/delete.json | 5 +++ .../resources/rest-api-spec/api/index.json | 5 +++ .../resources/rest-api-spec/api/update.json | 5 +++ .../test/bulk/50_block_until_refresh.yaml | 15 ++++++++ .../rest-api-spec/test/delete/50_refresh.yaml | 1 + .../test/delete/55_block_until_refresh.yaml | 34 ++++++++++++++++++ .../test/index/65_block_until_refresh.yaml | 18 ++++++++++ .../rest-api-spec/test/update/60_refresh.yaml | 1 + .../test/update/65_block_until_refresh.yaml | 36 +++++++++++++++++++ 16 files changed, 134 insertions(+), 1 deletion(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_block_until_refresh.yaml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/delete/55_block_until_refresh.yaml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/index/65_block_until_refresh.yaml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/update/65_block_until_refresh.yaml diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 9de862e6eda81..fa0053bb647a7 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -143,7 +143,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Fields._INDEX, shardId.getIndexName()) .field(Fields._TYPE, type) .field(Fields._ID, id) - .field(Fields._VERSION, version); + .field(Fields._VERSION, version) + .field("forced_refresh", forcedRefresh); shardInfo.toXContent(builder, params); return builder; } diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index b3f84077e8b5b..1e33cc4c32faf 100644 --- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -189,6 +189,7 @@ public void onResponse(IndexResponse response) { } else { update.setGetResult(null); } + update.setForcedRefresh(response.forcedRefresh()); listener.onResponse(update); } @@ -222,6 +223,7 @@ protected void doRun() { public void onResponse(IndexResponse response) { UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.isCreated()); update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); + update.setForcedRefresh(response.forcedRefresh()); listener.onResponse(update); } @@ -251,6 +253,7 @@ protected void doRun() { public void onResponse(DeleteResponse response) { UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), false); update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null)); + update.setForcedRefresh(response.forcedRefresh()); listener.onResponse(update); } diff --git a/core/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java b/core/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java index 620418eb08727..ccd64eeccd641 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java @@ -85,6 +85,7 @@ public void handleRequest(final RestRequest request, final RestChannel channel, } bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT)); bulkRequest.refresh(request.paramAsBoolean("refresh", bulkRequest.refresh())); + bulkRequest.setBlockUntilRefresh(request.paramAsBoolean("block_until_refresh", bulkRequest.shouldBlockUntilRefresh())); bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields, defaultPipeline, null, allowExplicitIndex); client.bulk(bulkRequest, new RestBuilderListener(channel) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java b/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java index 8e3449344c4a1..0f5b5c971eff7 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java @@ -52,6 +52,7 @@ public void handleRequest(final RestRequest request, final RestChannel channel, deleteRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing deleteRequest.timeout(request.paramAsTime("timeout", DeleteRequest.DEFAULT_TIMEOUT)); deleteRequest.refresh(request.paramAsBoolean("refresh", deleteRequest.refresh())); + deleteRequest.setBlockUntilRefresh(request.paramAsBoolean("block_until_refresh", deleteRequest.shouldBlockUntilRefresh())); deleteRequest.version(RestActions.parseVersion(request)); deleteRequest.versionType(VersionType.fromString(request.param("version_type"), deleteRequest.versionType())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java index 26dd1eca78d60..d1a854891df98 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java @@ -81,6 +81,7 @@ public void handleRequest(final RestRequest request, final RestChannel channel, indexRequest.source(request.content()); indexRequest.timeout(request.paramAsTime("timeout", IndexRequest.DEFAULT_TIMEOUT)); indexRequest.refresh(request.paramAsBoolean("refresh", indexRequest.refresh())); + indexRequest.setBlockUntilRefresh(request.paramAsBoolean("block_until_refresh", indexRequest.shouldBlockUntilRefresh())); indexRequest.version(RestActions.parseVersion(request)); indexRequest.versionType(VersionType.fromString(request.param("version_type"), indexRequest.versionType())); String sOpType = request.param("op_type"); diff --git a/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java b/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java index 88f90374523be..7a7879dca6ee3 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java @@ -59,6 +59,7 @@ public void handleRequest(final RestRequest request, final RestChannel channel, updateRequest.parent(request.param("parent")); updateRequest.timeout(request.paramAsTime("timeout", updateRequest.timeout())); updateRequest.refresh(request.paramAsBoolean("refresh", updateRequest.refresh())); + updateRequest.setBlockUntilRefresh(request.paramAsBoolean("block_until_refresh", updateRequest.shouldBlockUntilRefresh())); String consistencyLevel = request.param("consistency"); if (consistencyLevel != null) { updateRequest.consistencyLevel(WriteConsistencyLevel.fromString(consistencyLevel)); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json index 590054b04a425..3d58c9ec982a8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json @@ -16,6 +16,11 @@ } }, "params": { + "block_until_refresh": { + "type" : "boolean", + "default": false, + "description" : "Do not return from the request until the changes this request makes is visible by search" + }, "consistency": { "type" : "enum", "options" : ["one", "quorum", "all"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json index be09c0179d4be..3eb81f20fbbc6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json @@ -23,6 +23,11 @@ } }, "params": { + "block_until_refresh": { + "type" : "boolean", + "default": false, + "description" : "Do not return from the request until the changes this request makes is visible by search" + }, "consistency": { "type" : "enum", "options" : ["one", "quorum", "all"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json index 5c13f67c2121d..1d7ab116e8c20 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json @@ -22,6 +22,11 @@ } }, "params": { + "block_until_refresh": { + "type" : "boolean", + "default": false, + "description" : "Do not return from the request until the changes this request makes is visible by search" + }, "consistency": { "type" : "enum", "options" : ["one", "quorum", "all"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index 20fc3524283e5..2a318040e4e42 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -23,6 +23,11 @@ } }, "params": { + "block_until_refresh": { + "type" : "boolean", + "default": false, + "description" : "Do not return from the request until the changes this request makes is visible by search" + }, "consistency": { "type": "enum", "options": ["one", "quorum", "all"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_block_until_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_block_until_refresh.yaml new file mode 100644 index 0000000000000..614d3ce6bd4f3 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_block_until_refresh.yaml @@ -0,0 +1,15 @@ +--- +"block_until_refresh waits until changes are visible in search": + - do: + bulk: + block_until_refresh: true + body: | + {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id"}} + {"f1": "v1", "f2": 42} + {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id2"}} + {"f1": "v2", "f2": 47} + + - do: + count: + index: test_index + - match: {count: 2} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yaml index 4d3f9fe039dd7..6cdd135b154f4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yaml @@ -31,6 +31,7 @@ id: 3 body: { foo: bar } refresh: 1 + - is_true: forced_refresh - do: search: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/55_block_until_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/55_block_until_refresh.yaml new file mode 100644 index 0000000000000..fb9453e414656 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/55_block_until_refresh.yaml @@ -0,0 +1,34 @@ +--- +"block_until_refresh waits until changes are visible in search": + - do: + index: + index: test_1 + type: test + id: 1 + body: { foo: bar } + refresh: true + - is_true: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + - match: { hits.total: 1 } + + - do: + delete: + index: test_1 + type: test + id: 1 + block_until_refresh: true + - is_false: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + - match: { hits.total: 0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/65_block_until_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/65_block_until_refresh.yaml new file mode 100644 index 0000000000000..dc1522dee3464 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/65_block_until_refresh.yaml @@ -0,0 +1,18 @@ +--- +"block_until_refresh waits until changes are visible in search": + - do: + index: + index: test_1 + type: test + id: 1 + body: { foo: bar } + block_until_refresh: true + - is_false: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + - match: { hits.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yaml index 6048292ceabff..5dc952084d781 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yaml @@ -39,6 +39,7 @@ body: doc: { foo: baz } upsert: { foo: bar } + - is_true: forced_refresh - do: search: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/65_block_until_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/65_block_until_refresh.yaml new file mode 100644 index 0000000000000..02e246229e59d --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/65_block_until_refresh.yaml @@ -0,0 +1,36 @@ +--- +"block_until_refresh waits until changes are visible in search": + - do: + index: + index: test_1 + type: test + id: 1 + body: { foo: bar } + refresh: true + - is_true: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + - match: { hits.total: 1 } + + - do: + update: + index: test_1 + type: test + id: 1 + block_until_refresh: true + body: + doc: { test: asdf } + - is_false: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { match: { test: asdf } } + - match: { hits.total: 1 } From e61b7391f91263a4c4d6107bfbc2a828bbcc805c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 25 Apr 2016 22:48:09 -0400 Subject: [PATCH 29/86] Trigger listeners even when there is no refresh Each refresh gives us an opportunity to pick up any listeners we may have left behind. --- .../TransportReplicatedMutationAction.java | 4 +-- .../elasticsearch/index/engine/Engine.java | 2 +- .../index/engine/InternalEngine.java | 11 +++---- .../index/BlockUntilRefreshIT.java | 17 +++++++++- .../index/engine/InternalEngineTests.java | 32 +++++++++++++++++++ 5 files changed, 55 insertions(+), 11 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java index 9c8a8d34ce5e8..287b45e0c672e 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java @@ -89,8 +89,8 @@ public Location location() { @Override public void refreshed(boolean forcedRefresh) { - logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); if (forcedRefresh) { + logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); result.response.setForcedRefresh(true); } listener.onResponse(result.response); @@ -110,7 +110,7 @@ protected final void shardOperationOnReplica(Request request, ActionListener refreshListeners = new LinkedTransferQueue<>(); /** @@ -548,14 +548,14 @@ public void refresh(String source) throws EngineException { @Override public void addRefreshListener(RefreshListener listener) { requireNonNull(listener, "listener cannot be null"); + Translog.Location listenerLocation = requireNonNull(listener.location(), "listener's location cannot be null"); Translog.Location lastRefresh = lastRefreshedLocation; - if (lastRefresh != null && lastRefresh.compareTo(listener.location()) >= 0) { + if (lastRefresh != null && lastRefresh.compareTo(listenerLocation) >= 0) { // Location already visible, just call the listener listener.refreshed(false); return; } - // NOCOMMIT size is slow here if (refreshListenersEstimatedSize < engineConfig.getIndexSettings().getMaxRefreshListeners()) { // We have a free slot so register the listener refreshListeners.add(listener); @@ -1190,10 +1190,7 @@ public void beforeRefresh() throws IOException { @Override public void afterRefresh(boolean didRefresh) throws IOException { - if (false == didRefresh) { - // We didn't refresh so we shouldn't alert anyone to anything. - return; - } + // This intentionally ignores didRefresh so a refresh call over the API can force rechecking the refreshListeners. if (null == currentRefreshLocation) { /* * The translog had an empty last write location at the start of the refresh so we can't alert anyone to anything. This diff --git a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java index 3ffcba52288ca..8df1bbb1c43ce 100644 --- a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java +++ b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.index; +import org.elasticsearch.action.ListenableActionFuture; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; @@ -113,7 +114,21 @@ public void testBulk() { assertNoSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get()); } - // TODO add a test for -1 refresh_interval. Use an explicit refresh to trigger the listener. It might get triggered before - that is ok + /** + * Tests that an explicit request makes block_until_refresh return. It doesn't check that block_until_refresh doesn't return until the + * explicit refresh if the interval is -1 because we don't have that kind of control over refresh. It can happen all on its own. + */ + public void testNoRefreshInterval() throws InterruptedException, ExecutionException { + client().admin().indices().prepareCreate("test").setSettings("index.refresh_interval", -1).get(); + ListenableActionFuture index = client().prepareIndex("test", "index", "1").setSource("foo", "bar") + .setBlockUntilRefresh(true).execute(); + while (false == index.isDone()) { + client().admin().indices().prepareRefresh("test").get(); + } + assertEquals(RestStatus.CREATED, index.get().status()); + assertFalse("request shouldn't have forced a refresh", index.get().forcedRefresh()); + assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); + } private void assertBulkSuccess(BulkResponse response) { assertNoFailures(response); diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index e81112d75eb28..bb0a3ad7277ea 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -2259,6 +2259,38 @@ public void testAddRefreshListenerLotsOfThreads() throws Exception { } } + /** + * It is possible that {@link Engine#addRefreshListener(RefreshListener)} can miss the call to refresh that made the change visible if + * the refresh happens concurrently with the the add. + */ + public void testAddRefreshListenerConcurrentRefresh() throws Exception { + try (Store store = createStore(); + Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { + AtomicBoolean run = new AtomicBoolean(true); + Thread refresher = new Thread(() -> { + while (run.get()) { + engine.refresh("test"); + } + }); + refresher.start(); + try { + for (int i = 0; i < 100; i++) { + ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); + Engine.Index index = new Engine.Index(newUid("1"), doc); + engine.index(index); + + DummyRefreshListener listener = new DummyRefreshListener(index.getTranslogLocation()); + engine.addRefreshListener(listener); + assertBusy(() -> assertNotNull(listener.forcedRefresh.get())); + assertFalse(listener.forcedRefresh.get()); + } + } finally { + run.set(false); + refresher.join(); + } + } + } + private static class DummyRefreshListener implements RefreshListener { private final Translog.Location location; /** From 6c43be821eaf61141d3ec520f988aad3a96a3941 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 16 May 2016 14:34:39 -0400 Subject: [PATCH 30/86] Rename refresh setter and getter --- .../org/elasticsearch/action/DocWriteResponse.java | 2 +- .../org/elasticsearch/action/bulk/BulkRequest.java | 6 +++--- .../elasticsearch/action/bulk/BulkShardRequest.java | 4 ++-- .../action/delete/DeleteRequestBuilder.java | 2 +- .../action/index/IndexRequestBuilder.java | 2 +- .../support/replication/ReplicatedMutationRequest.java | 6 +++--- .../replication/TransportReplicatedMutationAction.java | 10 +++++----- .../org/elasticsearch/action/update/UpdateHelper.java | 4 ++-- .../org/elasticsearch/action/update/UpdateRequest.java | 6 +++--- .../action/update/UpdateRequestBuilder.java | 2 +- .../rest/action/delete/RestDeleteAction.java | 2 +- .../rest/action/index/RestIndexAction.java | 2 +- .../rest/action/update/RestUpdateAction.java | 2 +- .../elasticsearch/action/bulk/BulkRequestTests.java | 8 ++++---- .../java/org/elasticsearch/aliases/IndexAliasesIT.java | 8 ++++---- 15 files changed, 33 insertions(+), 33 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index fa0053bb647a7..4669f490709d4 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -91,7 +91,7 @@ public long getVersion() { } /** - * Did this request force a refresh? Requests that set {@link ReplicatedMutationRequest#refresh(boolean)} to true should always return + * Did this request force a refresh? Requests that set {@link ReplicatedMutationRequest#setRefresh(boolean)} to true should always return * true for this. Requests that set {@link ReplicatedMutationRequest#setBlockUntilRefresh(boolean)} to true should only return this if * they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}). */ diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index abe8185218f77..0e36ef1d7d820 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -518,9 +518,9 @@ public ActionRequestValidationException validate() { } for (ActionRequest request : requests) { // We first check if refresh has been set - if ((request instanceof DeleteRequest && ((DeleteRequest)request).refresh()) || - (request instanceof UpdateRequest && ((UpdateRequest)request).refresh()) || - (request instanceof IndexRequest && ((IndexRequest)request).refresh())) { + if ((request instanceof DeleteRequest && ((DeleteRequest)request).isRefresh()) || + (request instanceof UpdateRequest && ((UpdateRequest)request).isRefresh()) || + (request instanceof IndexRequest && ((IndexRequest)request).isRefresh())) { validationException = addValidationError("Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.", validationException); } ActionRequestValidationException ex = request.validate(); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index 6513f3a46fd3e..41978114e3c11 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -41,7 +41,7 @@ public BulkShardRequest() { BulkShardRequest(BulkRequest bulkRequest, ShardId shardId, boolean refresh, BulkItemRequest[] items) { super(shardId); this.items = items; - this.refresh(refresh); + this.setRefresh(refresh); } BulkItemRequest[] items() { @@ -89,7 +89,7 @@ public String toString() { // This is included in error messages so we'll try to make it somewhat user friendly. StringBuilder b = new StringBuilder("BulkShardRequest to ["); b.append(index).append("] containing [").append(items.length).append("] requests"); - if (refresh()) { + if (isRefresh()) { b.append(" and a refresh"); } return b.toString(); diff --git a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java index 1b8de03af518b..d87dc5bfd54f1 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java @@ -78,7 +78,7 @@ public DeleteRequestBuilder setRouting(String routing) { * to false. */ public DeleteRequestBuilder setRefresh(boolean refresh) { - request.refresh(refresh); + request.setRefresh(refresh); return this; } diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index 6c299401e744c..7ee910973ca14 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -227,7 +227,7 @@ public IndexRequestBuilder setCreate(boolean create) { * to false. */ public IndexRequestBuilder setRefresh(boolean refresh) { - request.refresh(refresh); + request.setRefresh(refresh); return this; } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java index 1d0c305a9b74a..28b3f9b432e3b 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java @@ -52,17 +52,17 @@ public ReplicatedMutationRequest(ShardId shardId) { * to false. */ @SuppressWarnings("unchecked") - public R refresh(boolean refresh) { + public R setRefresh(boolean refresh) { this.refresh = refresh; return (R) this; } - public boolean refresh() { + public boolean isRefresh() { return this.refresh; } /** - * Should this request block until it has been made visible for search by a refresh? Unlike {@link #refresh(boolean)} this is quite safe + * Should this request block until it has been made visible for search by a refresh? Unlike {@link #setRefresh(boolean)} this is quite safe * to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} for * the limit. A bulk request counts as one request on each shard that it touches. */ diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java index 287b45e0c672e..1bcd737cfff25 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java @@ -75,12 +75,12 @@ protected Request shardOperationOnPrimary(Request request, ActionListener result = onPrimaryShard(indexService, indexShard, request); - processAfterWrite(request.refresh(), indexShard, result.location); - if (request.refresh()) { + processAfterWrite(request.isRefresh(), indexShard, result.location); + if (request.isRefresh()) { // Only setForcedRefresh if it is true because this can touch every item in a bulk request result.response.setForcedRefresh(true); } - if (request.shouldBlockUntilRefresh() && false == request.refresh()) { + if (request.shouldBlockUntilRefresh() && false == request.isRefresh()) { indexShard.addRefreshListener(new RefreshListener() { @Override public Location location() { @@ -109,8 +109,8 @@ protected final void shardOperationOnReplica(Request request, ActionListenertrue. Defaults * to false. */ - public UpdateRequest refresh(boolean refresh) { + public UpdateRequest setRefresh(boolean refresh) { this.refresh = refresh; return this; } - public boolean refresh() { + public boolean isRefresh() { return this.refresh; } @@ -726,7 +726,7 @@ public UpdateRequest scriptedUpsert(boolean scriptedUpsert) { } /** - * Should this request block until it has been made visible for search by a refresh? Unlike {@link #refresh(boolean)} this is quite safe + * Should this request block until it has been made visible for search by a refresh? Unlike {@link #setRefresh(boolean)} this is quite safe * to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} for * the limit. Defaults to false. */ diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java index b9c36a76cf26c..cecb3ffd65d98 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java @@ -129,7 +129,7 @@ public UpdateRequestBuilder setVersionType(VersionType versionType) { * to false. */ public UpdateRequestBuilder setRefresh(boolean refresh) { - request.refresh(refresh); + request.setRefresh(refresh); return this; } diff --git a/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java b/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java index 0f5b5c971eff7..5273713f14a84 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java @@ -51,7 +51,7 @@ public void handleRequest(final RestRequest request, final RestChannel channel, deleteRequest.routing(request.param("routing")); deleteRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing deleteRequest.timeout(request.paramAsTime("timeout", DeleteRequest.DEFAULT_TIMEOUT)); - deleteRequest.refresh(request.paramAsBoolean("refresh", deleteRequest.refresh())); + deleteRequest.setRefresh(request.paramAsBoolean("refresh", deleteRequest.isRefresh())); deleteRequest.setBlockUntilRefresh(request.paramAsBoolean("block_until_refresh", deleteRequest.shouldBlockUntilRefresh())); deleteRequest.version(RestActions.parseVersion(request)); deleteRequest.versionType(VersionType.fromString(request.param("version_type"), deleteRequest.versionType())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java index d1a854891df98..887e170a27459 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java @@ -80,7 +80,7 @@ public void handleRequest(final RestRequest request, final RestChannel channel, indexRequest.setPipeline(request.param("pipeline")); indexRequest.source(request.content()); indexRequest.timeout(request.paramAsTime("timeout", IndexRequest.DEFAULT_TIMEOUT)); - indexRequest.refresh(request.paramAsBoolean("refresh", indexRequest.refresh())); + indexRequest.setRefresh(request.paramAsBoolean("refresh", indexRequest.isRefresh())); indexRequest.setBlockUntilRefresh(request.paramAsBoolean("block_until_refresh", indexRequest.shouldBlockUntilRefresh())); indexRequest.version(RestActions.parseVersion(request)); indexRequest.versionType(VersionType.fromString(request.param("version_type"), indexRequest.versionType())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java b/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java index 7a7879dca6ee3..7e55c6f0341ae 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java @@ -58,7 +58,7 @@ public void handleRequest(final RestRequest request, final RestChannel channel, updateRequest.routing(request.param("routing")); updateRequest.parent(request.param("parent")); updateRequest.timeout(request.paramAsTime("timeout", updateRequest.timeout())); - updateRequest.refresh(request.paramAsBoolean("refresh", updateRequest.refresh())); + updateRequest.setRefresh(request.paramAsBoolean("refresh", updateRequest.isRefresh())); updateRequest.setBlockUntilRefresh(request.paramAsBoolean("block_until_refresh", updateRequest.shouldBlockUntilRefresh())); String consistencyLevel = request.param("consistency"); if (consistencyLevel != null) { diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java index 30552201dfdf0..425d5ef975a9c 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java @@ -180,12 +180,12 @@ public void testSimpleBulk10() throws Exception { public void testBulkRequestWithRefresh() throws Exception { BulkRequest bulkRequest = new BulkRequest(); // We force here a "id is missing" validation error - bulkRequest.add(new DeleteRequest("index", "type", null).refresh(true)); + bulkRequest.add(new DeleteRequest("index", "type", null).setRefresh(true)); // We force here a "type is missing" validation error bulkRequest.add(new DeleteRequest("index", null, "id")); - bulkRequest.add(new DeleteRequest("index", "type", "id").refresh(true)); - bulkRequest.add(new UpdateRequest("index", "type", "id").doc("{}").refresh(true)); - bulkRequest.add(new IndexRequest("index", "type", "id").source("{}").refresh(true)); + bulkRequest.add(new DeleteRequest("index", "type", "id").setRefresh(true)); + bulkRequest.add(new UpdateRequest("index", "type", "id").doc("{}").setRefresh(true)); + bulkRequest.add(new IndexRequest("index", "type", "id").source("{}").setRefresh(true)); ActionRequestValidationException validate = bulkRequest.validate(); assertThat(validate, notNullValue()); assertThat(validate.validationErrors(), not(empty())); diff --git a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java index 08f8970f1e42d..2c10631e6aa59 100644 --- a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -175,10 +175,10 @@ public void testSearchingFilteringAliasesSingleIndex() throws Exception { assertAcked(admin().indices().prepareAliases().addAlias("test", "tests", termQuery("name", "test"))); logger.info("--> indexing against [test]"); - client().index(indexRequest("test").type("type1").id("1").source(source("1", "foo test")).refresh(true)).actionGet(); - client().index(indexRequest("test").type("type1").id("2").source(source("2", "bar test")).refresh(true)).actionGet(); - client().index(indexRequest("test").type("type1").id("3").source(source("3", "baz test")).refresh(true)).actionGet(); - client().index(indexRequest("test").type("type1").id("4").source(source("4", "something else")).refresh(true)).actionGet(); + client().index(indexRequest("test").type("type1").id("1").source(source("1", "foo test")).setRefresh(true)).actionGet(); + client().index(indexRequest("test").type("type1").id("2").source(source("2", "bar test")).setRefresh(true)).actionGet(); + client().index(indexRequest("test").type("type1").id("3").source(source("3", "baz test")).setRefresh(true)).actionGet(); + client().index(indexRequest("test").type("type1").id("4").source(source("4", "something else")).setRefresh(true)).actionGet(); logger.info("--> checking single filtering alias search"); SearchResponse searchResponse = client().prepareSearch("foos").setQuery(QueryBuilders.matchAllQuery()).get(); From b971d6d3301c7522b2e7eb90d5d8dd96a77fa625 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 16 May 2016 14:41:06 -0400 Subject: [PATCH 31/86] Docs for setForcedRefresh --- .../org/elasticsearch/action/bulk/BulkShardResponse.java | 9 +++++++++ .../support/replication/ReplicatedMutationResponse.java | 8 +++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java index 8ed77149a0592..b4372958cd5f9 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java @@ -20,9 +20,11 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.support.replication.ReplicatedMutationRequest; import org.elasticsearch.action.support.replication.ReplicatedMutationResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -51,6 +53,13 @@ public BulkItemResponse[] getResponses() { return responses; } + /** + * Each DocWriteResponse already has a location for whether or not it forced a refresh so we just set that information on the response. + * + * Requests that set {@link ReplicatedMutationRequest#setRefresh(boolean)} to true should always set this to true. Requests that set + * {@link ReplicatedMutationRequest#setBlockUntilRefresh(boolean)} to true should only set this to true if they run out of refresh + * listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}). + */ @Override public void setForcedRefresh(boolean forcedRefresh) { for (BulkItemResponse response : responses) { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationResponse.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationResponse.java index 8547e632661f0..2c5d228848245 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationResponse.java @@ -19,12 +19,18 @@ package org.elasticsearch.action.support.replication; +import org.elasticsearch.index.IndexSettings; + /** * Base class for responses that modify data in some shard like delete, index, and shardBulk. */ public abstract class ReplicatedMutationResponse extends ReplicationResponse { /** - * Mark the request as to forced refresh or not. + * Mark the request with if it was forced to refresh the index. All implementations by default assume that the request didn't force a + * refresh unless set otherwise so it mostly only makes sense to call this with {@code true}. Requests that set + * {@link ReplicatedMutationRequest#setRefresh(boolean)} to true should always set this to true. Requests that set + * {@link ReplicatedMutationRequest#setBlockUntilRefresh(boolean)} to true should only set this to true if they run out of refresh + * listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}). */ public abstract void setForcedRefresh(boolean forcedRefresh); } From 066da45b08148b266e4173166662fc1b3f66ed53 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 16 May 2016 15:54:11 -0400 Subject: [PATCH 32/86] Remove RefreshListener interface Just pass a Translog.Location and a Consumer when registering. --- .../action/DocWriteResponse.java | 6 +-- .../ReplicatedMutationRequest.java | 6 +-- .../TransportReplicatedMutationAction.java | 37 ++++----------- .../elasticsearch/index/engine/Engine.java | 24 +++------- .../index/engine/InternalEngine.java | 24 +++++----- .../index/engine/ShadowEngine.java | 3 +- .../elasticsearch/index/shard/IndexShard.java | 12 +++-- .../index/engine/InternalEngineTests.java | 47 ++++++++----------- 8 files changed, 63 insertions(+), 96 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 4669f490709d4..3c0c6e8250e69 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -91,9 +91,9 @@ public long getVersion() { } /** - * Did this request force a refresh? Requests that set {@link ReplicatedMutationRequest#setRefresh(boolean)} to true should always return - * true for this. Requests that set {@link ReplicatedMutationRequest#setBlockUntilRefresh(boolean)} to true should only return this if - * they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}). + * Did this request force a refresh? Requests that set {@link ReplicatedMutationRequest#setRefresh(boolean)} to true should always + * return true for this. Requests that set {@link ReplicatedMutationRequest#setBlockUntilRefresh(boolean)} to true should only return + * this if they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}). */ public boolean forcedRefresh() { return forcedRefresh; diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java index 28b3f9b432e3b..ed016f314890b 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java @@ -62,9 +62,9 @@ public boolean isRefresh() { } /** - * Should this request block until it has been made visible for search by a refresh? Unlike {@link #setRefresh(boolean)} this is quite safe - * to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} for - * the limit. A bulk request counts as one request on each shard that it touches. + * Should this request block until it has been made visible for search by a refresh? Unlike {@link #setRefresh(boolean)} this is quite + * safe to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} + * for the limit. A bulk request counts as one request on each shard that it touches. */ @SuppressWarnings("unchecked") public R setBlockUntilRefresh(boolean blockUntilRefresh) { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java index 1bcd737cfff25..9bf653c71baf8 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java @@ -26,11 +26,9 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.engine.Engine.RefreshListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportResponse; @@ -81,21 +79,12 @@ protected Request shardOperationOnPrimary(Request request, ActionListener { + if (forcedRefresh) { + logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + result.response.setForcedRefresh(true); } - + listener.onResponse(result.response); }); } else { listener.onResponse(result.response); @@ -111,18 +100,10 @@ protected final void shardOperationOnReplica(Request request, ActionListener { + logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + // TODO mark the response?!? + listener.onResponse(TransportResponse.Empty.INSTANCE); }); } else { listener.onResponse(TransportResponse.Empty.INSTANCE); diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index a672c16f36d9d..396c703ea4ea5 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -81,6 +81,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Consumer; import java.util.function.Function; /** @@ -612,8 +613,12 @@ public final boolean refreshNeeded() { /** * Add a listener for refreshes. + * + * @param location the location to listen for + * @param listener for the refresh. Called with true if registering the listener ran it out of slots and forced a refresh. Called with + * false otherwise. */ - public abstract void addRefreshListener(RefreshListener listener); + public abstract void addRefreshListener(Translog.Location location, Consumer listener); /** * Called when our engine is using too much heap and should move buffered indexed/deleted documents to disk. @@ -1170,21 +1175,4 @@ public interface Warmer { * This operation will close the engine if the recovery fails. */ public abstract Engine recoverFromTranslog() throws IOException; - - /** - * Called when a refresh includes the location. - */ - public static interface RefreshListener { - /** - * The location to wait for. Must not return null and should be fast. - */ - Translog.Location location(); - - /** - * Called when the location has been refreshed. - * - * @param forcedRefresh did this request force a refresh because ran out of listener slots? - */ - void refreshed(boolean forcedRefresh); - } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 3209e963e5c03..9e95f026fd2ac 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -33,6 +33,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ReferenceManager; +import org.apache.lucene.search.ReferenceManager.RefreshListener; import org.apache.lucene.search.SearcherFactory; import org.apache.lucene.search.SearcherManager; import org.apache.lucene.store.AlreadyClosedException; @@ -44,6 +45,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.LoggerInfoStream; @@ -54,7 +56,6 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.engine.Engine.RefreshListener; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.OnGoingMerge; @@ -78,6 +79,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Consumer; import java.util.function.Function; import static java.util.Objects.requireNonNull; @@ -123,7 +125,7 @@ public class InternalEngine extends Engine { /** * Refresh listeners. While they are not stored in sorted order they are processed as though they are. */ - private final LinkedTransferQueue refreshListeners = new LinkedTransferQueue<>(); + private final LinkedTransferQueue>> refreshListeners = new LinkedTransferQueue<>(); /** * The estimated size of refreshListenersEstimatedSize used for triggering refresh when the size gets over * {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}. No effort is made to correct for threading issues in the size calculation @@ -546,19 +548,19 @@ public void refresh(String source) throws EngineException { } @Override - public void addRefreshListener(RefreshListener listener) { + public void addRefreshListener(Translog.Location location, Consumer listener) { requireNonNull(listener, "listener cannot be null"); - Translog.Location listenerLocation = requireNonNull(listener.location(), "listener's location cannot be null"); + Translog.Location listenerLocation = requireNonNull(location, "location cannot be null"); Translog.Location lastRefresh = lastRefreshedLocation; if (lastRefresh != null && lastRefresh.compareTo(listenerLocation) >= 0) { // Location already visible, just call the listener - listener.refreshed(false); + listener.accept(false); return; } if (refreshListenersEstimatedSize < engineConfig.getIndexSettings().getMaxRefreshListeners()) { // We have a free slot so register the listener - refreshListeners.add(listener); + refreshListeners.add(new Tuple<>(location, listener)); refreshListenersEstimatedSize++; return; } @@ -567,7 +569,7 @@ public void addRefreshListener(RefreshListener listener) { * attempts to add a listener can continue. */ refresh("too_many_listeners"); - listener.refreshed(true); + listener.accept(true); } @Override @@ -1209,15 +1211,15 @@ public void afterRefresh(boolean didRefresh) throws IOException { * iterate over the whole queue on every refresh at the cost of some requests having to wait an extra cycle if they get stuck * behind a request that missed the refresh cycle. */ - Iterator itr = refreshListeners.iterator(); + Iterator>> itr = refreshListeners.iterator(); while (itr.hasNext()) { - RefreshListener listener = itr.next(); - if (listener.location().compareTo(currentRefreshLocation) > 0) { + Tuple> listener = itr.next(); + if (listener.v1().compareTo(currentRefreshLocation) > 0) { return; } itr.remove(); refreshListenersEstimatedSize--; - engineConfig.getThreadPool().executor(ThreadPool.Names.LISTENER).execute(() -> listener.refreshed(false)); + engineConfig.getThreadPool().executor(ThreadPool.Names.LISTENER).execute(() -> listener.v2().accept(false)); } refreshListenersEstimatedSize = 0; } diff --git a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index bf498929f889e..a12066639574d 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -35,6 +35,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.function.Consumer; import java.util.function.Function; /** @@ -198,7 +199,7 @@ public void refresh(String source) throws EngineException { } @Override - public void addRefreshListener(RefreshListener listener) { + public void addRefreshListener(Translog.Location location, Consumer listener) { throw new UnsupportedOperationException("Can't listen for a refresh on a shadow engine because it doesn't have a translog"); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index a0aec78538819..67df06f353722 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -63,7 +63,6 @@ import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.engine.Engine.RefreshListener; import org.elasticsearch.index.engine.EngineClosedException; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineException; @@ -123,6 +122,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; public class IndexShard extends AbstractIndexShardComponent { @@ -1542,10 +1542,14 @@ public boolean isRefreshNeeded() { } /** - * Add a listener for refreshes. + * Add a listener for refreshes. + * + * @param location the location to listen for + * @param listener for the refresh. Called with true if registering the listener ran it out of slots and forced a refresh. Called with + * false otherwise. */ - public void addRefreshListener(RefreshListener listener) { - getEngine().addRefreshListener(listener); + public void addRefreshListener(Translog.Location location, Consumer listener) { + getEngine().addRefreshListener(location, listener); } private class IndexShardRecoveryPerformer extends TranslogRecoveryPerformer { diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index bb0a3ad7277ea..1caaf736efddd 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -43,6 +43,7 @@ import org.apache.lucene.index.TieredMergePolicy; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.ReferenceManager.RefreshListener; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.AlreadyClosedException; @@ -70,7 +71,6 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.codec.CodecService; -import org.elasticsearch.index.engine.Engine.RefreshListener; import org.elasticsearch.index.engine.Engine.Searcher; import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor; import org.elasticsearch.index.mapper.ContentPath; @@ -94,7 +94,6 @@ import org.elasticsearch.index.store.DirectoryUtils; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.mapper.MapperRegistry; @@ -126,6 +125,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; import static java.lang.Math.max; import static java.util.Collections.emptyMap; @@ -330,8 +330,8 @@ public void testSegments() throws Exception { Engine.Index second = new Engine.Index(newUid("2"), doc2); engine.index(second); assertThat(second.getTranslogLocation(), greaterThan(first.getTranslogLocation())); - DummyRefreshListener refreshListener = new DummyRefreshListener(second.getTranslogLocation()); - engine.addRefreshListener(refreshListener); + DummyRefreshListener refreshListener = new DummyRefreshListener(); + engine.addRefreshListener(second.getTranslogLocation(), refreshListener); engine.refresh("test"); assertBusy(() -> assertNotNull("The listener should be called in the listener threadpool soon after the refresh", refreshListener.forcedRefresh.get())); @@ -2136,9 +2136,9 @@ public void testTooManyRefreshListeners() throws Exception { // Fill the listener slots List nonForcedListeners = new ArrayList<>(INDEX_SETTINGS.getMaxRefreshListeners()); for (int i = 0; i < defaultSettings.getMaxRefreshListeners(); i++) { - DummyRefreshListener listener = new DummyRefreshListener(index.getTranslogLocation()); + DummyRefreshListener listener = new DummyRefreshListener(); nonForcedListeners.add(listener); - engine.addRefreshListener(listener); + engine.addRefreshListener(index.getTranslogLocation(), listener); } // We shouldn't have called any of them @@ -2147,8 +2147,8 @@ public void testTooManyRefreshListeners() throws Exception { } // Add one more listener which should cause a refresh. In this thread, no less. - DummyRefreshListener forcingListener = new DummyRefreshListener(index.getTranslogLocation()); - engine.addRefreshListener(forcingListener); + DummyRefreshListener forcingListener = new DummyRefreshListener(); + engine.addRefreshListener(index.getTranslogLocation(), forcingListener); assertTrue("Forced listener wasn't forced?", forcingListener.forcedRefresh.get()); // That forces all the listeners through. On the listener thread pool so give them some time with assertBusy. @@ -2173,8 +2173,8 @@ public void testAddRefreshListenerAfterRefresh() throws Exception { } } - DummyRefreshListener listener = new DummyRefreshListener(index.getTranslogLocation()); - engine.addRefreshListener(listener); + DummyRefreshListener listener = new DummyRefreshListener(); + engine.addRefreshListener(index.getTranslogLocation(), listener); assertFalse(listener.forcedRefresh.get()); } } @@ -2210,8 +2210,8 @@ public void testAddRefreshListenerLotsOfThreads() throws Exception { assertEquals(iteration, index.version()); assertEquals(iteration == 1, created); - DummyRefreshListener listener = new DummyRefreshListener(index.getTranslogLocation()); - engine.addRefreshListener(listener); + DummyRefreshListener listener = new DummyRefreshListener(); + engine.addRefreshListener(index.getTranslogLocation(), listener); assertBusy(() -> assertNotNull("listener never called", listener.forcedRefresh.get())); if (threadCount < defaultSettings.getMaxRefreshListeners()) { assertFalse(listener.forcedRefresh.get()); @@ -2260,8 +2260,8 @@ public void testAddRefreshListenerLotsOfThreads() throws Exception { } /** - * It is possible that {@link Engine#addRefreshListener(RefreshListener)} can miss the call to refresh that made the change visible if - * the refresh happens concurrently with the the add. + * It is possible that {@link Engine#addRefreshListener(Translog.Location, Consumer)} can miss the call to refresh that made the change + * visible if the refresh happens concurrently with the the add. */ public void testAddRefreshListenerConcurrentRefresh() throws Exception { try (Store store = createStore(); @@ -2279,8 +2279,8 @@ public void testAddRefreshListenerConcurrentRefresh() throws Exception { Engine.Index index = new Engine.Index(newUid("1"), doc); engine.index(index); - DummyRefreshListener listener = new DummyRefreshListener(index.getTranslogLocation()); - engine.addRefreshListener(listener); + DummyRefreshListener listener = new DummyRefreshListener(); + engine.addRefreshListener(index.getTranslogLocation(), listener); assertBusy(() -> assertNotNull(listener.forcedRefresh.get())); assertFalse(listener.forcedRefresh.get()); } @@ -2291,24 +2291,15 @@ public void testAddRefreshListenerConcurrentRefresh() throws Exception { } } - private static class DummyRefreshListener implements RefreshListener { - private final Translog.Location location; + private static class DummyRefreshListener implements Consumer { /** * When the listener is called this captures it's only argument. */ private AtomicReference forcedRefresh = new AtomicReference<>(); - public DummyRefreshListener(Location location) { - this.location = location; - } - - @Override - public Translog.Location location() { - return location; - } - @Override - public void refreshed(boolean forcedRefresh) { + public void accept(Boolean forcedRefresh) { + assertNotNull(forcedRefresh); Boolean oldValue = this.forcedRefresh.getAndSet(forcedRefresh); assertNull("Listener called twice", oldValue); } From df91cde398eb720143a85a8c6fa19bdc3a74e07d Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 16 May 2016 16:01:03 -0400 Subject: [PATCH 33/86] unused import --- .../java/org/elasticsearch/index/engine/InternalEngineTests.java | 1 - 1 file changed, 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 1caaf736efddd..e36ccd90930b2 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -43,7 +43,6 @@ import org.apache.lucene.index.TieredMergePolicy; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.ReferenceManager.RefreshListener; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.AlreadyClosedException; From d8926d5fc1d24b4da8ccff7e0f0907b98c583c41 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 17 May 2016 11:02:38 -0400 Subject: [PATCH 34/86] Move refresh listeners into IndexShard --- .../elasticsearch/index/engine/Engine.java | 18 ++-- .../index/engine/InternalEngine.java | 93 ------------------ .../index/engine/ShadowEngine.java | 5 - .../elasticsearch/index/shard/IndexShard.java | 94 ++++++++++++++++++- .../index/shard/ShadowIndexShard.java | 7 ++ 5 files changed, 107 insertions(+), 110 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 396c703ea4ea5..6da78dc613b24 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -33,6 +33,7 @@ import org.apache.lucene.index.SnapshotDeletionPolicy; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ReferenceManager.RefreshListener; import org.apache.lucene.search.SearcherManager; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; @@ -81,7 +82,6 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.function.Consumer; import java.util.function.Function; /** @@ -604,6 +604,13 @@ public final boolean refreshNeeded() { return false; } + /** + * Register a listener that is called whenever the searcher is refreshed. See {@link SearcherManager#addListener(RefreshListener)}. + */ + public final void registerSearchRefreshListener(RefreshListener listener) { + getSearcherManager().addListener(listener); + } + /** * Synchronously refreshes the engine for new search operations to reflect the latest * changes. @@ -611,15 +618,6 @@ public final boolean refreshNeeded() { @Nullable public abstract void refresh(String source) throws EngineException; - /** - * Add a listener for refreshes. - * - * @param location the location to listen for - * @param listener for the refresh. Called with true if registering the listener ran it out of slots and forced a refresh. Called with - * false otherwise. - */ - public abstract void addRefreshListener(Translog.Location location, Consumer listener); - /** * Called when our engine is using too much heap and should move buffered indexed/deleted documents to disk. */ diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 9e95f026fd2ac..9f7bc41add995 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -32,8 +32,6 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.ReferenceManager; -import org.apache.lucene.search.ReferenceManager.RefreshListener; import org.apache.lucene.search.SearcherFactory; import org.apache.lucene.search.SearcherManager; import org.apache.lucene.store.AlreadyClosedException; @@ -45,7 +43,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.LoggerInfoStream; @@ -70,20 +67,15 @@ import java.io.IOException; import java.util.Arrays; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.LinkedTransferQueue; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; -import java.util.function.Consumer; import java.util.function.Function; -import static java.util.Objects.requireNonNull; - /** * */ @@ -122,20 +114,6 @@ public class InternalEngine extends Engine { private final AtomicInteger throttleRequestCount = new AtomicInteger(); private final EngineConfig.OpenMode openMode; private final AtomicBoolean allowCommits = new AtomicBoolean(true); - /** - * Refresh listeners. While they are not stored in sorted order they are processed as though they are. - */ - private final LinkedTransferQueue>> refreshListeners = new LinkedTransferQueue<>(); - /** - * The estimated size of refreshListenersEstimatedSize used for triggering refresh when the size gets over - * {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}. No effort is made to correct for threading issues in the size calculation - * beyond it being volatile. - */ - private volatile int refreshListenersEstimatedSize; - /** - * The translog location that was last made visible by a refresh. - */ - private volatile Translog.Location lastRefreshedLocation; public InternalEngine(EngineConfig engineConfig) throws EngineException { super(engineConfig); @@ -313,7 +291,6 @@ private SearcherManager createSearcherManager() throws EngineException { searcherManager = new SearcherManager(directoryReader, searcherFactory); lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store); success = true; - searcherManager.addListener(new RefreshListenerCallingRefreshListener()); return searcherManager; } catch (IOException e) { maybeFailEngine("start", e); @@ -547,31 +524,6 @@ public void refresh(String source) throws EngineException { mergeScheduler.refreshConfig(); } - @Override - public void addRefreshListener(Translog.Location location, Consumer listener) { - requireNonNull(listener, "listener cannot be null"); - Translog.Location listenerLocation = requireNonNull(location, "location cannot be null"); - - Translog.Location lastRefresh = lastRefreshedLocation; - if (lastRefresh != null && lastRefresh.compareTo(listenerLocation) >= 0) { - // Location already visible, just call the listener - listener.accept(false); - return; - } - if (refreshListenersEstimatedSize < engineConfig.getIndexSettings().getMaxRefreshListeners()) { - // We have a free slot so register the listener - refreshListeners.add(new Tuple<>(location, listener)); - refreshListenersEstimatedSize++; - return; - } - /* - * No free slot so force a refresh and call the listener in this thread. Do so outside of the synchronized block so any other - * attempts to add a listener can continue. - */ - refresh("too_many_listeners"); - listener.accept(true); - } - @Override public void writeIndexingBuffer() throws EngineException { @@ -1179,49 +1131,4 @@ public void onSettingsChanged() { public MergeStats getMergeStats() { return mergeScheduler.stats(); } - - /** - * Listens to Lucene's {@linkplain ReferenceManager.RefreshListener} and fires off Elasticsearch's {@linkplain RefreshListener}s. - */ - private class RefreshListenerCallingRefreshListener implements ReferenceManager.RefreshListener { - private Translog.Location currentRefreshLocation; - @Override - public void beforeRefresh() throws IOException { - currentRefreshLocation = translog.getLastWriteLocation(); - } - - @Override - public void afterRefresh(boolean didRefresh) throws IOException { - // This intentionally ignores didRefresh so a refresh call over the API can force rechecking the refreshListeners. - if (null == currentRefreshLocation) { - /* - * The translog had an empty last write location at the start of the refresh so we can't alert anyone to anything. This - * usually happens during recovery. The next refresh cycle out to pick up this refresh. - */ - return; - } - /* - * First set the lastRefreshedLocation so listeners that come in locations before that will just execute inline without messing - * around with refreshListeners at all. - */ - lastRefreshedLocation = currentRefreshLocation; - /* - * Now pop all listeners off the front of refreshListeners that are ready to be called. The listeners won't always be in order - * but they should be pretty close because you don't listen to times super far in the future. This prevents us from having to - * iterate over the whole queue on every refresh at the cost of some requests having to wait an extra cycle if they get stuck - * behind a request that missed the refresh cycle. - */ - Iterator>> itr = refreshListeners.iterator(); - while (itr.hasNext()) { - Tuple> listener = itr.next(); - if (listener.v1().compareTo(currentRefreshLocation) > 0) { - return; - } - itr.remove(); - refreshListenersEstimatedSize--; - engineConfig.getThreadPool().executor(ThreadPool.Names.LISTENER).execute(() -> listener.v2().accept(false)); - } - refreshListenersEstimatedSize = 0; - } - } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index a12066639574d..e69e31d88ef7c 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -198,11 +198,6 @@ public void refresh(String source) throws EngineException { } } - @Override - public void addRefreshListener(Translog.Location location, Consumer listener) { - throw new UnsupportedOperationException("Can't listen for a refresh on a shadow engine because it doesn't have a translog"); - } - @Override public IndexCommit snapshotIndex(boolean flushFirst) throws EngineException { throw new UnsupportedOperationException("Can not take snapshot from a shadow engine"); diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 67df06f353722..a255db07b9a6f 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -27,6 +27,8 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.ReferenceManager; +import org.apache.lucene.search.ReferenceManager.RefreshListener; import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.IOUtils; @@ -40,6 +42,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; @@ -114,16 +117,20 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.EnumSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.LinkedTransferQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import static java.util.Objects.requireNonNull; + public class IndexShard extends AbstractIndexShardComponent { private final ThreadPool threadPool; @@ -194,6 +201,20 @@ public class IndexShard extends AbstractIndexShardComponent { * IndexingMemoryController}). */ private final AtomicBoolean active = new AtomicBoolean(); + /** + * The translog location that was last made visible by a refresh. + */ + private volatile Translog.Location lastRefreshedLocation; + /** + * Refresh listeners. While they are not stored in sorted order they are processed as though they are. + */ + private final LinkedTransferQueue>> refreshListeners = new LinkedTransferQueue<>(); + /** + * The estimated size of refreshListenersEstimatedSize used for triggering refresh when the size gets over + * {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}. No effort is made to correct for threading issues in the size calculation + * beyond it being volatile. + */ + private volatile int refreshListenersEstimatedSize; public IndexShard(ShardId shardId, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache, MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService, @@ -1365,7 +1386,9 @@ private Engine createNewEngine(EngineConfig config) { } protected Engine newEngine(EngineConfig config) { - return engineFactory.newReadWriteEngine(config); + Engine engine = engineFactory.newReadWriteEngine(config); + engine.registerSearchRefreshListener(new RefreshListenerCallingRefreshListener()); + return engine; } /** @@ -1549,7 +1572,27 @@ public boolean isRefreshNeeded() { * false otherwise. */ public void addRefreshListener(Translog.Location location, Consumer listener) { - getEngine().addRefreshListener(location, listener); + requireNonNull(listener, "listener cannot be null"); + Translog.Location listenerLocation = requireNonNull(location, "location cannot be null"); + + Translog.Location lastRefresh = lastRefreshedLocation; + if (lastRefresh != null && lastRefresh.compareTo(listenerLocation) >= 0) { + // Location already visible, just call the listener + listener.accept(false); + return; + } + if (refreshListenersEstimatedSize < indexSettings.getMaxRefreshListeners()) { + // We have a free slot so register the listener + refreshListeners.add(new Tuple<>(location, listener)); + refreshListenersEstimatedSize++; + return; + } + /* + * No free slot so force a refresh and call the listener in this thread. Do so outside of the synchronized block so any other + * attempts to add a listener can continue. + */ + refresh("too_many_listeners"); + listener.accept(true); } private class IndexShardRecoveryPerformer extends TranslogRecoveryPerformer { @@ -1583,4 +1626,51 @@ protected void delete(Engine engine, Engine.Delete engineDelete) { IndexShard.this.delete(engine, engineDelete); } } + + /** + * Listens to Lucene's {@linkplain ReferenceManager.RefreshListener} and fires off listeners added by + * {@linkplain IndexShard#addRefreshListener(Translog.Location, Consumer)}. + */ + private class RefreshListenerCallingRefreshListener implements ReferenceManager.RefreshListener { + private Translog.Location currentRefreshLocation; + + @Override + public void beforeRefresh() throws IOException { + currentRefreshLocation = getEngine().getTranslog().getLastWriteLocation(); + } + + @Override + public void afterRefresh(boolean didRefresh) throws IOException { + // This intentionally ignores didRefresh so a refresh call over the API can force rechecking the refreshListeners. + if (null == currentRefreshLocation) { + /* + * The translog had an empty last write location at the start of the refresh so we can't alert anyone to anything. This + * usually happens during recovery. The next refresh cycle out to pick up this refresh. + */ + return; + } + /* + * First set the lastRefreshedLocation so listeners that come in locations before that will just execute inline without messing + * around with refreshListeners at all. + */ + lastRefreshedLocation = currentRefreshLocation; + /* + * Now pop all listeners off the front of refreshListeners that are ready to be called. The listeners won't always be in order + * but they should be pretty close because you don't listen to times super far in the future. This prevents us from having to + * iterate over the whole queue on every refresh at the cost of some requests having to wait an extra cycle if they get stuck + * behind a request that missed the refresh cycle. + */ + Iterator>> itr = refreshListeners.iterator(); + while (itr.hasNext()) { + Tuple> listener = itr.next(); + if (listener.v1().compareTo(currentRefreshLocation) > 0) { + return; + } + itr.remove(); + refreshListenersEstimatedSize--; + threadPool.executor(ThreadPool.Names.LISTENER).execute(() -> listener.v2().accept(false)); + } + refreshListenersEstimatedSize = 0; + } + } } diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java index e75973f2ccc14..adc5f92374cf2 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java @@ -31,12 +31,14 @@ import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogStats; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.function.Consumer; /** * ShadowIndexShard extends {@link IndexShard} to add file synchronization @@ -100,4 +102,9 @@ public boolean allowsPrimaryPromotion() { public TranslogStats translogStats() { return null; // shadow engine has no translog } + + @Override + public void addRefreshListener(Translog.Location location, Consumer listener) { + throw new UnsupportedOperationException("Can't listen for a refresh on a shadow engine because it doesn't have a translog"); + } } From 179c27c4f829f2c6ded65967652cf85adaf2ae52 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 17 May 2016 16:35:27 -0400 Subject: [PATCH 35/86] Move refresh listeners into their own class They still live at the IndexShard level but they live on their own in RefreshListeners which interacts with IndexShard using a couple of callbacks and a registration method. This lets us test the listeners without standing up an entire IndexShard. We still test the listeners against an InternalEngine, because the interplay between InternalEngine, Translog, and RefreshListeners is complex and important to get right. --- .../elasticsearch/index/shard/IndexShard.java | 97 +----- .../index/shard/RefreshListeners.java | 156 ++++++++++ .../index/engine/InternalEngineTests.java | 184 ----------- .../shard/IndexShardRefreshListenerTests.java | 290 ++++++++++++++++++ 4 files changed, 453 insertions(+), 274 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java create mode 100644 core/src/test/java/org/elasticsearch/index/shard/IndexShardRefreshListenerTests.java diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index a255db07b9a6f..f1da00013e03f 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -27,8 +27,6 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCachingPolicy; -import org.apache.lucene.search.ReferenceManager; -import org.apache.lucene.search.ReferenceManager.RefreshListener; import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.IOUtils; @@ -42,7 +40,6 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; @@ -117,20 +114,16 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.EnumSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.LinkedTransferQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; -import static java.util.Objects.requireNonNull; - public class IndexShard extends AbstractIndexShardComponent { private final ThreadPool threadPool; @@ -201,20 +194,7 @@ public class IndexShard extends AbstractIndexShardComponent { * IndexingMemoryController}). */ private final AtomicBoolean active = new AtomicBoolean(); - /** - * The translog location that was last made visible by a refresh. - */ - private volatile Translog.Location lastRefreshedLocation; - /** - * Refresh listeners. While they are not stored in sorted order they are processed as though they are. - */ - private final LinkedTransferQueue>> refreshListeners = new LinkedTransferQueue<>(); - /** - * The estimated size of refreshListenersEstimatedSize used for triggering refresh when the size gets over - * {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}. No effort is made to correct for threading issues in the size calculation - * beyond it being volatile. - */ - private volatile int refreshListenersEstimatedSize; + private final RefreshListeners refreshListeners; public IndexShard(ShardId shardId, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache, MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService, @@ -265,6 +245,10 @@ public IndexShard(ShardId shardId, IndexSettings indexSettings, ShardPath path, suspendableRefContainer = new SuspendableRefContainer(); searcherWrapper = indexSearcherWrapper; primaryTerm = indexSettings.getIndexMetaData().primaryTerm(shardId.id()); + refreshListeners = new RefreshListeners( + () -> indexSettings.getMaxRefreshListeners(), + () -> refresh("too_many_listeners"), + fire -> threadPool.executor(ThreadPool.Names.LISTENER).execute(fire)); } public Store store() { @@ -1387,7 +1371,7 @@ private Engine createNewEngine(EngineConfig config) { protected Engine newEngine(EngineConfig config) { Engine engine = engineFactory.newReadWriteEngine(config); - engine.registerSearchRefreshListener(new RefreshListenerCallingRefreshListener()); + refreshListeners.listenTo(engine); return engine; } @@ -1572,27 +1556,7 @@ public boolean isRefreshNeeded() { * false otherwise. */ public void addRefreshListener(Translog.Location location, Consumer listener) { - requireNonNull(listener, "listener cannot be null"); - Translog.Location listenerLocation = requireNonNull(location, "location cannot be null"); - - Translog.Location lastRefresh = lastRefreshedLocation; - if (lastRefresh != null && lastRefresh.compareTo(listenerLocation) >= 0) { - // Location already visible, just call the listener - listener.accept(false); - return; - } - if (refreshListenersEstimatedSize < indexSettings.getMaxRefreshListeners()) { - // We have a free slot so register the listener - refreshListeners.add(new Tuple<>(location, listener)); - refreshListenersEstimatedSize++; - return; - } - /* - * No free slot so force a refresh and call the listener in this thread. Do so outside of the synchronized block so any other - * attempts to add a listener can continue. - */ - refresh("too_many_listeners"); - listener.accept(true); + refreshListeners.add(location, listener); } private class IndexShardRecoveryPerformer extends TranslogRecoveryPerformer { @@ -1626,51 +1590,4 @@ protected void delete(Engine engine, Engine.Delete engineDelete) { IndexShard.this.delete(engine, engineDelete); } } - - /** - * Listens to Lucene's {@linkplain ReferenceManager.RefreshListener} and fires off listeners added by - * {@linkplain IndexShard#addRefreshListener(Translog.Location, Consumer)}. - */ - private class RefreshListenerCallingRefreshListener implements ReferenceManager.RefreshListener { - private Translog.Location currentRefreshLocation; - - @Override - public void beforeRefresh() throws IOException { - currentRefreshLocation = getEngine().getTranslog().getLastWriteLocation(); - } - - @Override - public void afterRefresh(boolean didRefresh) throws IOException { - // This intentionally ignores didRefresh so a refresh call over the API can force rechecking the refreshListeners. - if (null == currentRefreshLocation) { - /* - * The translog had an empty last write location at the start of the refresh so we can't alert anyone to anything. This - * usually happens during recovery. The next refresh cycle out to pick up this refresh. - */ - return; - } - /* - * First set the lastRefreshedLocation so listeners that come in locations before that will just execute inline without messing - * around with refreshListeners at all. - */ - lastRefreshedLocation = currentRefreshLocation; - /* - * Now pop all listeners off the front of refreshListeners that are ready to be called. The listeners won't always be in order - * but they should be pretty close because you don't listen to times super far in the future. This prevents us from having to - * iterate over the whole queue on every refresh at the cost of some requests having to wait an extra cycle if they get stuck - * behind a request that missed the refresh cycle. - */ - Iterator>> itr = refreshListeners.iterator(); - while (itr.hasNext()) { - Tuple> listener = itr.next(); - if (listener.v1().compareTo(currentRefreshLocation) > 0) { - return; - } - itr.remove(); - refreshListenersEstimatedSize--; - threadPool.executor(ThreadPool.Names.LISTENER).execute(() -> listener.v2().accept(false)); - } - refreshListenersEstimatedSize = 0; - } - } } diff --git a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java new file mode 100644 index 0000000000000..bbeb494c072a2 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import org.apache.lucene.search.ReferenceManager; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.translog.Translog; + +import java.io.IOException; +import java.util.Iterator; +import java.util.concurrent.LinkedTransferQueue; +import java.util.function.Consumer; +import java.util.function.IntSupplier; + +import static java.util.Objects.requireNonNull; + +/** + * Allows for the registration of listeners that are called when a change becomes visible for search. This functionality is exposed from + * {@link IndexShard} but kept here so it can be tested without standing up the entire thing. + */ +class RefreshListeners { + /** + * Refresh listeners. While they are not stored in sorted order they are processed as though they are. + */ + private final LinkedTransferQueue>> refreshListeners = new LinkedTransferQueue<>(); + + private final IntSupplier getMaxRefreshListeners; + private final Runnable forceRefresh; + private final Consumer fireListener; + + /** + * The estimated size of refreshListenersEstimatedSize used for triggering refresh when the size gets over + * {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}. No effort is made to correct for threading issues in the size calculation + * beyond it being volatile. + */ + private volatile int refreshListenersEstimatedSize; + /** + * The translog location that was last made visible by a refresh. + */ + private volatile Translog.Location lastRefreshedLocation; + + public RefreshListeners(IntSupplier getMaxRefreshListeners, Runnable forceRefresh, Consumer fireListener) { + this.getMaxRefreshListeners = getMaxRefreshListeners; + this.forceRefresh = forceRefresh; + this.fireListener = fireListener; + } + + /** + * Add a listener for refreshes. + * + * @param location the location to listen for + * @param listener for the refresh. Called with true if registering the listener ran it out of slots and forced a refresh. Called with + * false otherwise. + */ + public void add(Translog.Location location, Consumer listener) { + requireNonNull(listener, "listener cannot be null"); + Translog.Location listenerLocation = requireNonNull(location, "location cannot be null"); + + Translog.Location lastRefresh = lastRefreshedLocation; + if (lastRefresh != null && lastRefresh.compareTo(listenerLocation) >= 0) { + // Location already visible, just call the listener + listener.accept(false); + return; + } + if (refreshListenersEstimatedSize < getMaxRefreshListeners.getAsInt()) { + // We have a free slot so register the listener + refreshListeners.add(new Tuple<>(location, listener)); + refreshListenersEstimatedSize++; + return; + } + /* + * No free slot so force a refresh and call the listener in this thread. Do so outside of the synchronized block so any other + * attempts to add a listener can continue. + */ + forceRefresh.run(); + listener.accept(true); + } + + /** + * Start listening to an engine. + */ + public void listenTo(Engine engine) { + engine.registerSearchRefreshListener(new RefreshListenerCallingRefreshListener(engine)); + } + + /** + * Listens to Lucene's {@linkplain ReferenceManager.RefreshListener} and fires off listeners added by + * {@linkplain IndexShard#addRefreshListener(Translog.Location, Consumer)}. + */ + private class RefreshListenerCallingRefreshListener implements ReferenceManager.RefreshListener { + private final Engine engine; + private Translog.Location currentRefreshLocation; + + public RefreshListenerCallingRefreshListener(Engine engine) { + this.engine = engine; + } + + @Override + public void beforeRefresh() throws IOException { + currentRefreshLocation = engine.getTranslog().getLastWriteLocation(); + } + + @Override + public void afterRefresh(boolean didRefresh) throws IOException { + // This intentionally ignores didRefresh so a refresh call over the API can force rechecking the refreshListeners. + if (null == currentRefreshLocation) { + /* + * The translog had an empty last write location at the start of the refresh so we can't alert anyone to anything. This + * usually happens during recovery. The next refresh cycle out to pick up this refresh. + */ + return; + } + /* + * First set the lastRefreshedLocation so listeners that come in locations before that will just execute inline without messing + * around with refreshListeners at all. + */ + lastRefreshedLocation = currentRefreshLocation; + /* + * Now pop all listeners off the front of refreshListeners that are ready to be called. The listeners won't always be in order + * but they should be pretty close because you don't listen to times super far in the future. This prevents us from having to + * iterate over the whole queue on every refresh at the cost of some requests having to wait an extra cycle if they get stuck + * behind a request that missed the refresh cycle. + */ + Iterator>> itr = refreshListeners.iterator(); + while (itr.hasNext()) { + Tuple> listener = itr.next(); + if (listener.v1().compareTo(currentRefreshLocation) > 0) { + return; + } + itr.remove(); + refreshListenersEstimatedSize--; + fireListener.accept(() -> listener.v2().accept(false)); + } + refreshListenersEstimatedSize = 0; + } + } +} diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index e36ccd90930b2..c535d43ea9065 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -329,12 +329,7 @@ public void testSegments() throws Exception { Engine.Index second = new Engine.Index(newUid("2"), doc2); engine.index(second); assertThat(second.getTranslogLocation(), greaterThan(first.getTranslogLocation())); - DummyRefreshListener refreshListener = new DummyRefreshListener(); - engine.addRefreshListener(second.getTranslogLocation(), refreshListener); engine.refresh("test"); - assertBusy(() -> assertNotNull("The listener should be called in the listener threadpool soon after the refresh", - refreshListener.forcedRefresh.get())); - assertFalse("We didn't force a refresh with the index operations!?", refreshListener.forcedRefresh.get()); segments = engine.segments(false); assertThat(segments.size(), equalTo(1)); @@ -2124,183 +2119,4 @@ public void testCurrentTranslogIDisCommitted() throws IOException { } } } - - public void testTooManyRefreshListeners() throws Exception { - try (Store store = createStore(); - Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { - ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - - // Fill the listener slots - List nonForcedListeners = new ArrayList<>(INDEX_SETTINGS.getMaxRefreshListeners()); - for (int i = 0; i < defaultSettings.getMaxRefreshListeners(); i++) { - DummyRefreshListener listener = new DummyRefreshListener(); - nonForcedListeners.add(listener); - engine.addRefreshListener(index.getTranslogLocation(), listener); - } - - // We shouldn't have called any of them - for (DummyRefreshListener listener : nonForcedListeners) { - assertNull("Called listener too early!", listener.forcedRefresh.get()); - } - - // Add one more listener which should cause a refresh. In this thread, no less. - DummyRefreshListener forcingListener = new DummyRefreshListener(); - engine.addRefreshListener(index.getTranslogLocation(), forcingListener); - assertTrue("Forced listener wasn't forced?", forcingListener.forcedRefresh.get()); - - // That forces all the listeners through. On the listener thread pool so give them some time with assertBusy. - for (DummyRefreshListener listener : nonForcedListeners) { - assertBusy( - () -> assertEquals("Expected listener called with unforced refresh!", Boolean.FALSE, listener.forcedRefresh.get())); - } - } - } - - public void testAddRefreshListenerAfterRefresh() throws Exception { - try (Store store = createStore(); - Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { - ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - engine.refresh("I said so"); - if (randomBoolean()) { - engine.index(new Engine.Index(newUid("1"), doc)); - if (randomBoolean()) { - engine.refresh("I said so"); - } - } - - DummyRefreshListener listener = new DummyRefreshListener(); - engine.addRefreshListener(index.getTranslogLocation(), listener); - assertFalse(listener.forcedRefresh.get()); - } - } - - /** - * Uses a whole bunch of threads to index, wait for refresh, and non-realtime get documents to validate that they are visible after - * waiting regardless of what crazy sequence of events causes the refresh listener to fire. - */ - public void testAddRefreshListenerLotsOfThreads() throws Exception { - int threadCount = between(5, max(50, defaultSettings.getMaxRefreshListeners() * 2)); - long runTime = TimeUnit.SECONDS.toNanos(5); - try (Store store = createStore(); - Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { - AtomicBoolean run = new AtomicBoolean(true); - CopyOnWriteArrayList failures = new CopyOnWriteArrayList<>(); - - // These threads add and block until the refresh makes the change visible and then do a non-realtime get. - Thread[] threads = new Thread[threadCount]; - for (int i = 0; i < threadCount; i++) { - final String id = String.format(Locale.ROOT, "%04d", i); - final Term uid = newUid(id); - threads[i] = new Thread(() -> { - int iteration = 0; - while (run.get()) { - try { - iteration++; - Document document = testDocument(); - String value = id + "i" + iteration; - document.add(new TextField("test", value, Field.Store.YES)); - ParsedDocument doc = testParsedDocument(id, id, "test", null, -1, -1, document, B_1, null); - Engine.Index index = new Engine.Index(uid, doc); - boolean created = engine.index(index); - assertEquals(iteration, index.version()); - assertEquals(iteration == 1, created); - - DummyRefreshListener listener = new DummyRefreshListener(); - engine.addRefreshListener(index.getTranslogLocation(), listener); - assertBusy(() -> assertNotNull("listener never called", listener.forcedRefresh.get())); - if (threadCount < defaultSettings.getMaxRefreshListeners()) { - assertFalse(listener.forcedRefresh.get()); - } - - Engine.Get get = new Engine.Get(false, uid); - try (Engine.GetResult getResult = engine.get(get)) { - assertTrue("document not found", getResult.exists()); - assertEquals(iteration, getResult.version()); - SingleFieldsVisitor visitor = new SingleFieldsVisitor("test"); - getResult.docIdAndVersion().context.reader().document(getResult.docIdAndVersion().docId, visitor); - assertEquals(Arrays.asList(value), visitor.fields().get("test")); - } - } catch (Throwable t) { - failures.add(new RuntimeException("failure on the [" + iteration + "] iteration of thread [" + id + "]", t)); - } - } - logger.info("Finished [{}] iterations", iteration); - }); - threads[i].start(); - } - long end = System.nanoTime() + runTime; - while (System.nanoTime() < end && failures.isEmpty()) { - Thread.sleep(100); - engine.refresh("because test"); - } - run.set(false); - // Give the threads a second to finish whatever they were doing - refreshing all the time so they'll finish any blocking. - end = System.nanoTime() + TimeUnit.SECONDS.toNanos(1); - while (System.nanoTime() < end) { - Thread.sleep(100); - engine.refresh("because test"); - } - for (Thread thread : threads) { - thread.join(); - } - if (failures.isEmpty()) { - return; - } - RuntimeException e = new RuntimeException("there were failures"); - for (Throwable failure: failures) { - e.addSuppressed(failure); - } - throw e; - } - } - - /** - * It is possible that {@link Engine#addRefreshListener(Translog.Location, Consumer)} can miss the call to refresh that made the change - * visible if the refresh happens concurrently with the the add. - */ - public void testAddRefreshListenerConcurrentRefresh() throws Exception { - try (Store store = createStore(); - Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { - AtomicBoolean run = new AtomicBoolean(true); - Thread refresher = new Thread(() -> { - while (run.get()) { - engine.refresh("test"); - } - }); - refresher.start(); - try { - for (int i = 0; i < 100; i++) { - ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - - DummyRefreshListener listener = new DummyRefreshListener(); - engine.addRefreshListener(index.getTranslogLocation(), listener); - assertBusy(() -> assertNotNull(listener.forcedRefresh.get())); - assertFalse(listener.forcedRefresh.get()); - } - } finally { - run.set(false); - refresher.join(); - } - } - } - - private static class DummyRefreshListener implements Consumer { - /** - * When the listener is called this captures it's only argument. - */ - private AtomicReference forcedRefresh = new AtomicReference<>(); - - @Override - public void accept(Boolean forcedRefresh) { - assertNotNull(forcedRefresh); - Boolean oldValue = this.forcedRefresh.getAndSet(forcedRefresh); - assertNull("Listener called twice", oldValue); - } - } } diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardRefreshListenerTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardRefreshListenerTests.java new file mode 100644 index 0000000000000..afe60a005cd6c --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardRefreshListenerTests.java @@ -0,0 +1,290 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy; +import org.apache.lucene.index.SnapshotDeletionPolicy; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.codec.CodecService; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineConfig; +import org.elasticsearch.index.engine.InternalEngine; +import org.elasticsearch.index.engine.InternalEngineTests.TranslogHandler; +import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor; +import org.elasticsearch.index.mapper.ParseContext.Document; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.store.DirectoryService; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.TranslogConfig; +import org.elasticsearch.test.DummyShardLock; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; + +/** + * Tests how {@linkplain RefreshListeners} interacts with {@linkplain InternalEngine}. + */ +public class IndexShardRefreshListenerTests extends ESTestCase { + private RefreshListeners listeners; + private Engine engine; + private volatile int maxListeners; + private ThreadPool threadPool; + private Store store; + + @Before + public void setupListeners() throws Exception { + // Setup dependencies of the listeners + maxListeners = randomIntBetween(1, 1000); + listeners = new RefreshListeners( + () -> maxListeners, + () -> engine.refresh("too-many-listeners"), + fire -> fire.run()); + + // Now setup the InternalEngine which is much more complicated because we aren't mocking anything + threadPool = new ThreadPool(getTestName()); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("index", Settings.EMPTY); + ShardId shardId = new ShardId(new Index("index", "_na_"), 1); + Directory directory = newDirectory(); + DirectoryService directoryService = new DirectoryService(shardId, indexSettings) { + @Override + public Directory newDirectory() throws IOException { + return directory; + } + + @Override + public long throttleTimeInNanos() { + return 0; + } + }; + store = new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId)); + IndexWriterConfig iwc = newIndexWriterConfig(); + TranslogConfig translogConfig = new TranslogConfig(shardId, createTempDir("translog"), indexSettings, + BigArrays.NON_RECYCLING_INSTANCE); + Engine.EventListener eventListener = new Engine.EventListener() { + @Override + public void onFailedEngine(String reason, @Nullable Throwable t) { + // we don't need to notify anybody in this test + } + }; + EngineConfig config = new EngineConfig(EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG, shardId, threadPool, indexSettings, null, + store, new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()), newMergePolicy(), iwc.getAnalyzer(), + iwc.getSimilarity(), new CodecService(null, logger), eventListener, new TranslogHandler(shardId.getIndexName(), logger), + IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, + TimeValue.timeValueMinutes(5)); + engine = new InternalEngine(config); + + // Finally, we can listen to the engine + listeners.listenTo(engine); + } + + @After + public void tearDownListeners() throws Exception { + IOUtils.close(engine, store); + terminate(threadPool); + } + + public void testTooMany() throws Exception { + Engine.Index index = index("1"); + + // Fill the listener slots + List nonForcedListeners = new ArrayList<>(maxListeners); + for (int i = 0; i < maxListeners; i++) { + DummyRefreshListener listener = new DummyRefreshListener(); + nonForcedListeners.add(listener); + listeners.add(index.getTranslogLocation(), listener); + } + + // We shouldn't have called any of them + for (DummyRefreshListener listener : nonForcedListeners) { + assertNull("Called listener too early!", listener.forcedRefresh.get()); + } + + // Add one more listener which should cause a refresh. + DummyRefreshListener forcingListener = new DummyRefreshListener(); + listeners.add(index.getTranslogLocation(), forcingListener); + assertTrue("Forced listener wasn't forced?", forcingListener.forcedRefresh.get()); + + // That forces all the listeners through. On the listener thread pool so give them some time with assertBusy. + for (DummyRefreshListener listener : nonForcedListeners) { + assertEquals("Expected listener called with unforced refresh!", Boolean.FALSE, listener.forcedRefresh.get()); + } + } + + public void testAfterRefresh() throws Exception { + Engine.Index index = index("1"); + engine.refresh("I said so"); + if (randomBoolean()) { + index(randomFrom("1" /* same document */, "2" /* different document */)); + if (randomBoolean()) { + engine.refresh("I said so"); + } + } + + DummyRefreshListener listener = new DummyRefreshListener(); + listeners.add(index.getTranslogLocation(), listener); + assertFalse(listener.forcedRefresh.get()); + } + + /** + * Attempts to add a listener at the same time as a refresh occurs by having a background thread force a refresh as fast as it can while + * adding listeners. This can catch the situation where a refresh happens right as the listener is being added such that the listener + * misses the refresh and has to catch the next one. If the listener wasn't able to properly catch the next one then this would fail. + */ + public void testConcurrentRefresh() throws Exception { + AtomicBoolean run = new AtomicBoolean(true); + Thread refresher = new Thread(() -> { + while (run.get()) { + engine.refresh("test"); + } + }); + refresher.start(); + try { + for (int i = 0; i < 100; i++) { + Engine.Index index = index("1"); + + DummyRefreshListener listener = new DummyRefreshListener(); + listeners.add(index.getTranslogLocation(), listener); + assertBusy(() -> assertNotNull(listener.forcedRefresh.get())); + assertFalse(listener.forcedRefresh.get()); + } + } finally { + run.set(false); + refresher.join(); + } + } + + /** + * Uses a bunch of threads to index, wait for refresh, and non-realtime get documents to validate that they are visible after waiting + * regardless of what crazy sequence of events causes the refresh listener to fire. + */ + public void testLotsOfThreads() throws Exception { + int threadCount = between(3, 10); + maxListeners = between(1, threadCount * 2); + + + // This thread just refreshes every once in a while to cause trouble. + ScheduledFuture refresher = threadPool.scheduleWithFixedDelay(() -> engine.refresh("because test"), timeValueMillis(100)); + + // These threads add and block until the refresh makes the change visible and then do a non-realtime get. + Thread[] indexers = new Thread[threadCount]; + for (int thread = 0; thread < threadCount; thread++) { + final String threadId = String.format(Locale.ROOT, "%04d", thread); + indexers[thread] = new Thread(() -> { + for (int iteration = 1; iteration <= 500; iteration++) { + try { + String testFieldValue = String.format(Locale.ROOT, "%s%04d", threadId, iteration); + Engine.Index index = index(threadId, testFieldValue); + assertEquals(iteration, index.version()); + + DummyRefreshListener listener = new DummyRefreshListener(); + listeners.add(index.getTranslogLocation(), listener); + assertBusy(() -> assertNotNull("listener never called", listener.forcedRefresh.get())); + if (threadCount < maxListeners) { + assertFalse(listener.forcedRefresh.get()); + } + + Engine.Get get = new Engine.Get(false, index.uid()); + try (Engine.GetResult getResult = engine.get(get)) { + assertTrue("document not found", getResult.exists()); + assertEquals(iteration, getResult.version()); + SingleFieldsVisitor visitor = new SingleFieldsVisitor("test"); + getResult.docIdAndVersion().context.reader().document(getResult.docIdAndVersion().docId, visitor); + assertEquals(Arrays.asList(testFieldValue), visitor.fields().get("test")); + } + } catch (Throwable t) { + throw new RuntimeException("failure on the [" + iteration + "] iteration of thread [" + threadId + "]", t); + } + } + }); + indexers[thread].start(); + } + + for (Thread indexer: indexers) { + indexer.join(); + } + FutureUtils.cancel(refresher); + } + + private Engine.Index index(String id) { + return index(id, "test"); + } + + private Engine.Index index(String id, String testFieldValue) { + String type = "test"; + String uid = type + ":" + id; + Document document = new Document(); + document.add(new TextField("test", testFieldValue, Field.Store.YES)); + Field uidField = new Field("_uid", type + ":" + id, UidFieldMapper.Defaults.FIELD_TYPE); + Field versionField = new NumericDocValuesField("_version", Versions.MATCH_ANY); + document.add(uidField); + document.add(versionField); + BytesReference source = new BytesArray(new byte[] { 1 }); + ParsedDocument doc = new ParsedDocument(uidField, versionField, id, type, null, -1, -1, Arrays.asList(document), source, null); + Engine.Index index = new Engine.Index(new Term("_uid", uid), doc); + engine.index(index); + return index; + } + + private static class DummyRefreshListener implements Consumer { + /** + * When the listener is called this captures it's only argument. + */ + private AtomicReference forcedRefresh = new AtomicReference<>(); + + @Override + public void accept(Boolean forcedRefresh) { + assertNotNull(forcedRefresh); + Boolean oldValue = this.forcedRefresh.getAndSet(forcedRefresh); + assertNull("Listener called twice", oldValue); + } + } +} From 88171a8322a424e624d48960fb4c98dd43e4d671 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 17 May 2016 16:40:57 -0400 Subject: [PATCH 36/86] Rename test --- ...hardRefreshListenerTests.java => RefreshListenersTests.java} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename core/src/test/java/org/elasticsearch/index/shard/{IndexShardRefreshListenerTests.java => RefreshListenersTests.java} (99%) diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardRefreshListenerTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java similarity index 99% rename from core/src/test/java/org/elasticsearch/index/shard/IndexShardRefreshListenerTests.java rename to core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index afe60a005cd6c..f81657b793067 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardRefreshListenerTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -73,7 +73,7 @@ /** * Tests how {@linkplain RefreshListeners} interacts with {@linkplain InternalEngine}. */ -public class IndexShardRefreshListenerTests extends ESTestCase { +public class RefreshListenersTests extends ESTestCase { private RefreshListeners listeners; private Engine engine; private volatile int maxListeners; From 3322e26211bf681b37132274ee158ae330afc28b Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 17 May 2016 17:20:02 -0400 Subject: [PATCH 37/86] Increase default maximum number of listeners to 1000 --- core/src/main/java/org/elasticsearch/index/IndexSettings.java | 2 +- .../org/elasticsearch/index/shard/RefreshListenersTests.java | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index cb0f6fa447fd3..cbfe19d1a2697 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -115,7 +115,7 @@ public final class IndexSettings { public static final Setting INDEX_GC_DELETES_SETTING = Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS), Property.Dynamic, Property.IndexScope); - public static final Setting MAX_REFRESH_LISTENERS_PER_SHARD = Setting.intSetting("index.max_refresh_listeners", 100, 0, + public static final Setting MAX_REFRESH_LISTENERS_PER_SHARD = Setting.intSetting("index.max_refresh_listeners", 1000, 0, Property.Dynamic, Property.IndexScope); private final Index index; diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index f81657b793067..852105cb23e6f 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -87,7 +87,9 @@ public void setupListeners() throws Exception { listeners = new RefreshListeners( () -> maxListeners, () -> engine.refresh("too-many-listeners"), - fire -> fire.run()); + // Immediately run listeners rather than adding them to the listener thread pool like IndexShard does to simplify the test. + fire -> fire.run() + ); // Now setup the InternalEngine which is much more complicated because we aren't mocking anything threadPool = new ThreadPool(getTestName()); From 55596ea68b5484490c3637fbad0d95564236478b Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 20 May 2016 14:40:06 -0400 Subject: [PATCH 38/86] Remove listener from shardOperationOnPrimary Create instead asyncShardOperationOnPrimary which is called after all of the replica operations are started to handle any async operations. --- .../flush/TransportShardFlushAction.java | 10 ++-- .../refresh/TransportShardRefreshAction.java | 23 +++++---- .../replication/ReplicationOperation.java | 48 ++++++++++++------- .../TransportReplicatedMutationAction.java | 14 ++++-- .../TransportReplicationAction.java | 47 ++++++++++++------ .../ReplicationOperationTests.java | 32 ++++++++----- .../TransportReplicationActionTests.java | 26 ++++------ 7 files changed, 124 insertions(+), 76 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index e8cac9dfc8c40..dc41148d3bbc7 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -39,7 +39,7 @@ /** * */ -public class TransportShardFlushAction extends TransportReplicationAction { +public class TransportShardFlushAction extends TransportReplicationAction { public static final String NAME = FlushAction.NAME + "[s]"; @@ -57,12 +57,16 @@ protected ReplicationResponse newResponseInstance() { } @Override - protected ShardFlushRequest shardOperationOnPrimary(ShardFlushRequest shardRequest, ActionListener listener) { + protected Tuple shardOperationOnPrimary(ShardFlushRequest shardRequest) { IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id()); indexShard.flush(shardRequest.getRequest()); logger.trace("{} flush request executed on primary", indexShard.shardId()); + return new Tuple<>(shardRequest, null); + } + + @Override + protected void asyncShardOperationOnPrimary(Void stash, ShardFlushRequest shardRequest, ActionListener listener) { listener.onResponse(new ReplicationResponse()); - return shardRequest; } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index d8d5da34d211f..d07d338c1803d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; @@ -37,10 +38,8 @@ import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; -/** - * - */ -public class TransportShardRefreshAction extends TransportReplicationAction { +public class TransportShardRefreshAction + extends TransportReplicationAction { public static final String NAME = RefreshAction.NAME + "[s]"; @@ -48,8 +47,8 @@ public class TransportShardRefreshAction extends TransportReplicationAction listener) { + protected Tuple shardOperationOnPrimary(BasicReplicationRequest shardRequest) { IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id()); indexShard.refresh("api"); - listener.onResponse(new ReplicationResponse()); logger.trace("{} refresh request executed on primary", indexShard.shardId()); - return shardRequest; + return new Tuple<>(shardRequest, null); + } + + @Override + protected void asyncShardOperationOnPrimary(Void stash, BasicReplicationRequest shardRequest, + ActionListener listener) { + listener.onResponse(new ReplicationResponse()); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 90d61bcb29082..b23019a801da8 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.index.engine.VersionConflictEngineException; @@ -45,8 +46,12 @@ import java.util.function.Consumer; import java.util.function.Supplier; -public class ReplicationOperation, ReplicaRequest extends ReplicationRequest, - Response extends ReplicationResponse> { +public class ReplicationOperation< + Request extends ReplicationRequest, + AsyncStash, + ReplicaRequest extends ReplicationRequest, + Response extends ReplicationResponse + > { final private ESLogger logger; final private Request request; final private Supplier clusterStateSupplier; @@ -66,7 +71,7 @@ public class ReplicationOperation, R final private AtomicInteger successfulShards = new AtomicInteger(); final private boolean executeOnReplicas; final private boolean checkWriteConsistency; - final private Primary primary; + final private Primary primary; final private Replicas replicasProxy; final private AtomicBoolean finished = new AtomicBoolean(); final protected ActionListener finalResponseListener; @@ -75,7 +80,7 @@ public class ReplicationOperation, R private final List shardReplicaFailures = Collections.synchronizedList(new ArrayList<>()); - ReplicationOperation(Request request, Primary primary, + ReplicationOperation(Request request, Primary primary, ActionListener listener, boolean executeOnReplicas, boolean checkWriteConsistency, Replicas replicas, @@ -102,8 +107,10 @@ void execute() throws Exception { } totalShards.incrementAndGet(); - pendingShards.addAndGet(2); // increase by 2 - one for the primary shard and one for the coordination of replicas - ReplicaRequest replicaRequest = performOnPrimary(primaryRouting, request); + pendingShards.incrementAndGet(); + Tuple primaryResult = primary.perform(request); + ReplicaRequest replicaRequest = primaryResult.v1(); + AsyncStash asyncStash = primaryResult.v2(); assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term"; if (logger.isTraceEnabled()) { logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request); @@ -130,17 +137,15 @@ void execute() throws Exception { performOnReplica(shard.buildTargetRelocatingShard(), replicaRequest); } } - // Decrement for the replica coordination - decPendingAndFinishIfNeeded(); - } - - private ReplicaRequest performOnPrimary(final ShardRouting primaryRouting, Request request) throws Exception { - return primary.perform(request, new ActionListener() { + /* + * Wait until after we've started the replica requests before we start any asyn actions on the primary so we don't have a race + * between the replica returning and the primary starting. + */ + primary.performAsync(asyncStash, request, new ActionListener() { @Override public void onResponse(Response response) { finalResponse = response; successfulShards.incrementAndGet(); - // Decrement for the primary decPendingAndFinishIfNeeded(); } @@ -311,8 +316,12 @@ public static boolean isConflictException(Throwable e) { } - interface Primary, ReplicaRequest extends ReplicationRequest, - Response extends ReplicationResponse> { + interface Primary< + Request extends ReplicationRequest, + AsyncStash, + ReplicaRequest extends ReplicationRequest, + Response extends ReplicationResponse + > { /** routing entry for this primary */ ShardRouting routingEntry(); @@ -326,10 +335,15 @@ interface Primary, ReplicaRequest ex * also complete after. Deal with it. * * @param request the request to perform - * @param listener for the request to be completed. * @return the request to send to the repicas */ - ReplicaRequest perform(Request request, ActionListener listener) throws Exception; + Tuple perform(Request request) throws Exception; + + /** + * Start and listen for the completion of any asynchronous actions taken on the primary as part of this request. If there are no + * such actions then this will call the listener directly. + */ + void performAsync(AsyncStash stash, Request request, ActionListener listener) throws Exception; } interface Replicas> { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java index 9bf653c71baf8..10d194ac1351b 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java @@ -21,9 +21,11 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.replication.TransportReplicatedMutationAction.WriteResult; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; @@ -42,7 +44,7 @@ public abstract class TransportReplicatedMutationAction< Request extends ReplicatedMutationRequest, Response extends ReplicatedMutationResponse - > extends TransportReplicationAction { + > extends TransportReplicationAction, Request, Response> { protected TransportReplicatedMutationAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, @@ -69,7 +71,7 @@ protected abstract WriteResult onPrimaryShard(IndexService indexServic protected abstract Translog.Location onReplicaShard(Request request, IndexShard indexShard); @Override - protected Request shardOperationOnPrimary(Request request, ActionListener listener) throws Exception { + protected Tuple> shardOperationOnPrimary(Request request) throws Exception { IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexShard indexShard = indexService.getShard(request.shardId().id()); WriteResult result = onPrimaryShard(indexService, indexShard, request); @@ -78,6 +80,13 @@ protected Request shardOperationOnPrimary(Request request, ActionListener(request, result); + } + + @Override + protected void asyncShardOperationOnPrimary(WriteResult result, Request request, ActionListener listener) { + IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); + IndexShard indexShard = indexService.getShard(request.shardId().id()); if (request.shouldBlockUntilRefresh() && false == request.isRefresh()) { indexShard.addRefreshListener(result.location, forcedRefresh -> { if (forcedRefresh) { @@ -89,7 +98,6 @@ protected Request shardOperationOnPrimary(Request request, ActionListener, - ReplicaRequest extends ReplicationRequest, - Response extends ReplicationResponse> extends TransportAction { +public abstract class TransportReplicationAction< + Request extends ReplicationRequest, + AsyncStash, + ReplicaRequest extends ReplicationRequest, + Response extends ReplicationResponse + > extends TransportAction { final protected TransportService transportService; final protected ClusterService clusterService; @@ -147,13 +151,22 @@ protected void resolveRequest(MetaData metaData, IndexMetaData indexMetaData, Re } /** - * Primary operation on node with primary copy + * Synchronous portion of primary operation on node with primary copy * * @param shardRequest the request to the primary shard - * @param listener called when the operation is complete with the result of the operation, assuming all the replicas succeed - * @return the request to the replicas. + * @return Tuple of the request to send to the replicas and the information needed by the { + * {@link #asyncShardOperationOnPrimary(Object, ReplicationRequest, ActionListener)} to do its job */ - protected abstract ReplicaRequest shardOperationOnPrimary(Request shardRequest, ActionListener listener) throws Exception; + protected abstract Tuple shardOperationOnPrimary(Request shardRequest) throws Exception; + + /** + * Asynchronous portion of primary operation on node with primary copy + * + * @param stash information saved from the synchronous phase of the operation for use in the async phase of the operation + * @param shardRequest the request to the primary shard + * @param listener implementers call this success or failure when the asynchronous operations are complete. + */ + protected abstract void asyncShardOperationOnPrimary(AsyncStash stash, Request shardRequest, ActionListener listener); /** * Replica operation on nodes with replica copies. While this does take a listener it should not return until it has completed any @@ -279,9 +292,8 @@ public void handleException(TransportException exp) { } } - protected ReplicationOperation - createReplicatedOperation(Request request, ActionListener listener, - PrimaryShardReference primaryShardReference, boolean executeOnReplicas) { + protected ReplicationOperation createReplicatedOperation(Request request, + ActionListener listener, PrimaryShardReference primaryShardReference, boolean executeOnReplicas) { return new ReplicationOperation<>(request, primaryShardReference, listener, executeOnReplicas, checkWriteConsistency(), replicasProxy, clusterService::state, logger, actionName ); @@ -736,7 +748,7 @@ protected boolean shouldExecuteReplication(Settings settings) { return IndexMetaData.isIndexUsingShadowReplicas(settings) == false; } - class PrimaryShardReference implements ReplicationOperation.Primary, Releasable { + class PrimaryShardReference implements ReplicationOperation.Primary, Releasable { private final IndexShard indexShard; private final Releasable operationLock; @@ -765,10 +777,15 @@ public void failShard(String reason, Throwable e) { } @Override - public ReplicaRequest perform(Request request, ActionListener listener) throws Exception { - ReplicaRequest replicaRequest = shardOperationOnPrimary(request, listener); - replicaRequest.primaryTerm(indexShard.getPrimaryTerm()); - return replicaRequest; + public Tuple perform(Request request) throws Exception { + Tuple result = shardOperationOnPrimary(request); + result.v1().primaryTerm(indexShard.getPrimaryTerm()); + return result; + } + + @Override + public void performAsync(AsyncStash stash, Request request, ActionListener listener) throws Exception { + asyncShardOperationOnPrimary(stash, request, listener); } @Override diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index 8b0aba2e7cd49..791f5e1fb0e13 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; @@ -231,11 +232,11 @@ private void testClusterStateChangeAfterPrimaryOperation(final ShardId shardId, final ShardRouting primaryShard = state.get().routingTable().shardRoutingTable(shardId).primaryShard(); final TestPrimary primary = new TestPrimary(primaryShard, primaryTerm) { @Override - public Request perform(Request request, ActionListener listener) throws Exception { - Request replicaRequest = super.perform(request, listener); + public Tuple perform(Request request) throws Exception { + Tuple result = super.perform(request); state.set(changedState); logger.debug("--> state after primary operation:\n{}", state.get().prettyPrint()); - return replicaRequest; + return result; } }; @@ -363,13 +364,15 @@ public void readFrom(StreamInput in) throws IOException { static class Response extends ReplicationResponse { } - static class TestPrimary implements ReplicationOperation.Primary { + static class TestPrimary implements ReplicationOperation.Primary { final ShardRouting routing; final long term; + final String stash; TestPrimary(ShardRouting routing, long term) { this.routing = routing; this.term = term; + stash = randomAsciiOfLength(5); } @Override @@ -383,13 +386,18 @@ public void failShard(String message, Throwable throwable) { } @Override - public Request perform(Request request, ActionListener listener) throws Exception { + public Tuple perform(Request request) throws Exception { if (request.processedOnPrimary.compareAndSet(false, true) == false) { fail("processed [" + request + "] twice"); } request.primaryTerm(term); + return new Tuple<>(request, stash); + } + + @Override + public void performAsync(String stash, Request request, ActionListener listener) throws Exception { + assertEquals(this.stash, stash); listener.onResponse(new Response()); - return request; } } @@ -435,15 +443,15 @@ public void failShard(ShardRouting replica, ShardRouting primary, String message } } - class TestReplicationOperation extends ReplicationOperation { - public TestReplicationOperation(Request request, Primary primary, ActionListener listener, - Replicas replicas, Supplier clusterStateSupplier) { + class TestReplicationOperation extends ReplicationOperation { + public TestReplicationOperation(Request request, Primary primary, + ActionListener listener, Replicas replicas, Supplier clusterStateSupplier) { this(request, primary, listener, true, false, replicas, clusterStateSupplier, ReplicationOperationTests.this.logger, "test"); } - public TestReplicationOperation(Request request, Primary primary, ActionListener listener, - boolean executeOnReplicas, boolean checkWriteConsistency, Replicas replicas, - Supplier clusterStateSupplier, ESLogger logger, String opType) { + public TestReplicationOperation(Request request, Primary primary, + ActionListener listener, boolean executeOnReplicas, boolean checkWriteConsistency, Replicas replicas, + Supplier clusterStateSupplier, ESLogger logger, String opType) { super(request, primary, listener, executeOnReplicas, checkWriteConsistency, replicas, clusterStateSupplier, logger, opType); } } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index ad3d0f2c967ca..9ff56b067a0a1 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -42,6 +42,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lease.Releasable; @@ -476,18 +477,7 @@ public void testPrimaryReference() throws Exception { }; Action.PrimaryShardReference primary = action.new PrimaryShardReference(shard, releasable); final Request request = new Request(); - Request replicaRequest = primary.perform(request, new ActionListener() { - @Override - public void onResponse(Response response) { - // Ok, nothing to do - } - - @Override - public void onFailure(Throwable e) { - // Currently can't even be called. - throw new RuntimeException(e); - } - }); + Request replicaRequest = primary.perform(request).v1(); assertThat(replicaRequest.primaryTerm(), equalTo(primaryTerm)); @@ -759,7 +749,7 @@ public void readFrom(StreamInput in) throws IOException { static class Response extends ReplicationResponse { } - class Action extends TransportReplicationAction { + class Action extends TransportReplicationAction { Action(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, @@ -776,11 +766,15 @@ protected Response newResponseInstance() { } @Override - protected Request shardOperationOnPrimary(Request shardRequest, ActionListener listener) throws Exception { + protected Tuple shardOperationOnPrimary(Request shardRequest) throws Exception { boolean executedBefore = shardRequest.processedOnPrimary.getAndSet(true); assert executedBefore == false : "request has already been executed on the primary"; + return new Tuple<>(shardRequest, null); + } + + @Override + protected void asyncShardOperationOnPrimary(Void stash, Request shardRequest, ActionListener listener) { listener.onResponse(new Response()); - return shardRequest; } @Override @@ -835,7 +829,7 @@ protected Releasable acquireReplicaOperationLock(ShardId shardId, long primaryTe } } - class NoopReplicationOperation extends ReplicationOperation { + class NoopReplicationOperation extends ReplicationOperation { public NoopReplicationOperation(Request request, ActionListener listener) { super(request, null, listener, true, true, null, null, TransportReplicationActionTests.this.logger, "noop"); From 1ff50c2faf56665d221f00a18d9ac88745904bf5 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 20 May 2016 15:01:53 -0400 Subject: [PATCH 39/86] Remove Translog#lastWriteLocation I wasn't being careful enough with locks so it wasn't right anyway. Instead this builds a synthetic Tranlog.Location when you call getWriteLocation with much more relaxed equality guarantees. Rather than being equal to the last Translog.Location returned it is simply guaranteed to be greater than the last translog returned and less than the next. --- .../index/translog/Translog.java | 11 +++----- .../index/translog/TranslogTests.java | 27 ++++++++++++++----- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index 3a11250695909..c85852f3babe5 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -129,11 +129,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private final AtomicBoolean closed = new AtomicBoolean(); private final TranslogConfig config; private final String translogUUID; - /** - * The last location of any write operation. - */ - // NOCOMMIT Decide if we should keep this. We can probably build it synthetically when needed, especially if we're will to relax "size". - private volatile Location lastWriteLocation; /** * Creates a new Translog instance. This method will create a new transaction log unless the given {@link TranslogConfig} has @@ -439,7 +434,6 @@ public Location add(Operation operation) throws IOException { ensureOpen(); Location location = current.add(bytes); assert assertBytesAtLocation(location, bytes); - lastWriteLocation = location; return location; } } catch (AlreadyClosedException | IOException ex) { @@ -454,11 +448,12 @@ public Location add(Operation operation) throws IOException { } /** - * The last location that was written to the translog. + * The a {@linkplain Location} that will sort after the {@linkplain Location} returned by the last write but before any locations which + * can be returned by the next write. */ public Location getLastWriteLocation() { try (ReleasableLock lock = readLock.acquire()) { - return lastWriteLocation; + return new Location(current.generation, current.sizeInBytes() - 1, Integer.MAX_VALUE); } } diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 4a08dd8026bc7..19c2223a15ac0 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.hamcrest.Matchers; @@ -205,26 +206,40 @@ private String randomNonTranslogPatternString(int min, int max) { } public void testRead() throws IOException { - assertNull(translog.getLastWriteLocation()); + Location loc0 = translog.getLastWriteLocation(); + assertNotNull(loc0); + Translog.Location loc1 = translog.add(new Translog.Index("test", "1", new byte[]{1})); - assertEquals(loc1, translog.getLastWriteLocation()); + assertThat(loc1, greaterThan(loc0)); + assertThat(translog.getLastWriteLocation(), greaterThan(loc1)); Translog.Location loc2 = translog.add(new Translog.Index("test", "2", new byte[]{2})); - assertEquals(loc2, translog.getLastWriteLocation()); assertThat(loc2, greaterThan(loc1)); + assertThat(translog.getLastWriteLocation(), greaterThan(loc2)); assertThat(translog.read(loc1).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{1}))); assertThat(translog.read(loc2).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{2}))); + + Translog.Location lastLocBeforeSync = translog.getLastWriteLocation(); translog.sync(); - assertEquals(loc2, translog.getLastWriteLocation()); + assertEquals(lastLocBeforeSync, translog.getLastWriteLocation()); assertThat(translog.read(loc1).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{1}))); assertThat(translog.read(loc2).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{2}))); + Translog.Location loc3 = translog.add(new Translog.Index("test", "2", new byte[]{3})); - assertEquals(loc3, translog.getLastWriteLocation()); assertThat(loc3, greaterThan(loc2)); + assertThat(translog.getLastWriteLocation(), greaterThan(loc3)); assertThat(translog.read(loc3).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{3}))); + + lastLocBeforeSync = translog.getLastWriteLocation(); translog.sync(); + assertEquals(lastLocBeforeSync, translog.getLastWriteLocation()); assertThat(translog.read(loc3).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{3}))); translog.prepareCommit(); - assertEquals(loc3, translog.getLastWriteLocation()); + /* + * The commit adds to the lastWriteLocation even though is isn't really a write. This is just an implementation artifact but it can + * safely be ignored because the lastWriteLocation continues to be greater than the Location returned from the last write operation + * and less than the location of the next write operation. + */ + assertThat(translog.getLastWriteLocation(), greaterThan(lastLocBeforeSync)); assertThat(translog.read(loc3).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{3}))); translog.commit(); assertNull(translog.read(loc1)); From 91149e0580233bf79c2273b419fe9374ca746648 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 20 May 2016 15:17:40 -0400 Subject: [PATCH 40/86] Finally! --- .../support/replication/ReplicationOperation.java | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index b23019a801da8..9d7b993d5437a 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -151,12 +151,15 @@ public void onResponse(Response response) { @Override public void onFailure(Throwable primaryException) { - RestStatus restStatus = ExceptionsHelper.status(primaryException); - shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure(primaryRouting.shardId(), primaryRouting.currentNodeId(), - primaryException, restStatus, false)); - String message = String.format(Locale.ROOT, "failed to perform %s on primary %s", opType, primaryRouting); - logger.warn("[{}] {}", primaryException, primaryRouting.shardId(), message); - decPendingAndFinishIfNeeded(); + try { + RestStatus restStatus = ExceptionsHelper.status(primaryException); + shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure(primaryRouting.shardId(), primaryRouting.currentNodeId(), + primaryException, restStatus, false)); + String message = String.format(Locale.ROOT, "failed to perform %s on primary %s", opType, primaryRouting); + logger.warn("[{}] {}", primaryException, primaryRouting.shardId(), message); + } finally { + decPendingAndFinishIfNeeded(); + } } }); } From b8cadcef565908b276484f7f5f988fd58b38d8b6 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 20 May 2016 16:17:20 -0400 Subject: [PATCH 41/86] Docs --- docs/reference/docs.asciidoc | 4 ++ .../docs/block-until-refresh.asciidoc | 42 +++++++++++++++++++ docs/reference/docs/bulk.asciidoc | 10 +++++ docs/reference/docs/delete.asciidoc | 10 +++++ docs/reference/docs/index_.asciidoc | 10 +++++ docs/reference/docs/update.asciidoc | 11 ++++- docs/reference/index-modules.asciidoc | 5 +++ 7 files changed, 91 insertions(+), 1 deletion(-) create mode 100644 docs/reference/docs/block-until-refresh.asciidoc diff --git a/docs/reference/docs.asciidoc b/docs/reference/docs.asciidoc index 465d2e60c7726..295cd95f6dae7 100644 --- a/docs/reference/docs.asciidoc +++ b/docs/reference/docs.asciidoc @@ -15,6 +15,8 @@ This section describes the following CRUD APIs: .Multi-document APIs * <> * <> +* <> +* <> NOTE: All CRUD APIs are single-index APIs. The `index` parameter accepts a single index name, or an `alias` which points to a single index. @@ -40,3 +42,5 @@ include::docs/reindex.asciidoc[] include::docs/termvectors.asciidoc[] include::docs/multi-termvectors.asciidoc[] + +include::docs/block-until-refresh.asciidoc[] diff --git a/docs/reference/docs/block-until-refresh.asciidoc b/docs/reference/docs/block-until-refresh.asciidoc new file mode 100644 index 0000000000000..bae90d6a38cd9 --- /dev/null +++ b/docs/reference/docs/block-until-refresh.asciidoc @@ -0,0 +1,42 @@ +[[block-until-refresh]] +== Block Until Refresh + +The <>, <>, <>, and +<> APIs support setting `block_until_refresh` to `true` which +will cause Elasticsearch to wait for the changes made by the request to become +visible to search before returning. The operation that makes the changes +visible is called a refresh and Elasticsearch automatically performs one on +every shard that has been modified every `index.refresh_interval` which +defaults to one second. That setting is <>. + +Refreshes can also be forced with the <> and that will also +cause any requests that blocking-until-refresh to return. Setting `refresh` to +`true` in any of the APIs that support it will also force a refresh but that +refresh typically is only on the shards affected by the request. + +=== Compared to Setting `refresh` to `true` + +All of the APIs that support `block_until_refresh` also support `refresh`, but +`refresh` is a much heavier operation, causing Elasticsearch to immediately +make all pending changes visible for search on the shards affected by the +request immediately. Frequently forcing refreshes causes Elasticsearch to make +many small segments. Searching and creating many small segments is much less +efficient than searching a single large segment. And small segments must +eventually be merged into larger segments, causing yet more work. + +On the other hand, setting `refresh` causes the request to be returned as fast +as the segment can be made visible. Setting `block_until_refresh` slots the +request into process that will be called by default every second and a second +is a long time sometimes. So the tradeoff is clear: `block_until_refresh` is +slower but puts less load on Elasticsearch. + +=== Forced a Refresh Anyway + +If a `block_until_refresh` request comes in when there are already +`index.max_refresh_listeners` (defaults to 1000) requests +blocking-until-refresh then that request will behave just as though it had +`refresh` set to `true` on it. It will force a refresh. This keeps the promise +that when a `block_until_refresh` request returns that its changes are visible +for search while preventing unchecked resource usage for blocked requests. If +a request forced a refresh because it ran out of listener slots then its +response will contain `"forced_refresh": true`. diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index 8e7045fac12bc..c0119881cb790 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -174,6 +174,16 @@ expire. Setting it to `true` can trigger additional load, and may slow down indexing. Due to its costly nature, the `refresh` parameter is set on the bulk request level and is not supported on each individual bulk item. +[float] +[[bulk-block-until-refresh]] +=== Block Until Refresh + +If `refresh` is too heavy for your use case then you can instead set +`block_until_refresh` to `true` to wait until the operation has been made +visible for search by a refresh. This is *much* lower overhead than `refresh` +but the downside is that the request will wait for the next refresh. See +<> for more details. + [float] [[bulk-update]] === Update diff --git a/docs/reference/docs/delete.asciidoc b/docs/reference/docs/delete.asciidoc index f36ffe7abf9d9..f7d5fcaa22de7 100644 --- a/docs/reference/docs/delete.asciidoc +++ b/docs/reference/docs/delete.asciidoc @@ -119,6 +119,16 @@ searchable. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slows down indexing). +[float] +[[delete-block-until-refresh]] +=== Block Until Refresh + +If `refresh` is too heavy for your use case then you can instead set +`block_until_refresh` to `true` to wait until the operation has been made +visible for search by a refresh. This is *much* lower overhead than `refresh` +but the downside is that the request will wait for the next refresh. See +<> for more details. + [float] [[delete-timeout]] === Timeout diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index 27ac85b959542..b04b8b4b80e55 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -362,6 +362,16 @@ poor performance, both from an indexing and a search standpoint. Note, getting a document using the get API is completely realtime and doesn't require a refresh. +[float] +[[index-block-until-refresh]] +=== Block Until Refresh + +If `refresh` is too heavy for your use case then you can instead set +`block_until_refresh` to `true` to wait until the operation has been made +visible for search by a refresh. This is *much* lower overhead than `refresh` +but the downside is that the request will wait for the next refresh. See +<> for more details. + [float] [[index-noop]] === Noop Updates diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 405f9b0494bf0..1a0bdc59434ab 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -237,7 +237,16 @@ The write consistency of the index/delete operation. Refresh the relevant primary and replica shards (not the whole index) immediately after the operation occurs, so that the updated document appears -in search results immediately. +in search results immediately. This should *ONLY* be done after careful thought +and verification that it does not lead to poor performance, both from an +indexing and a search standpoint. + +`block_until_refresh`:: + +Wait to reply to the request until the primary and replica shards have been +refreshed to make this operation's changes visible for search. Use this if +`refresh` is too inefficient and you can tolerate slower responses. See +<> for more details. `fields`:: diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index c7119af416884..e66c55125aeba 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -136,6 +136,11 @@ specific index module: experimental[] Disables the purge of <> on the current index. +`index.max_refresh_listeners`:: + + Maximum number of refresh listeners available on each shard of the index. + These listeners are used to implement <>. + [float] === Settings in other index modules From 13807ad10b6f5ecd39f98c9f20874f9f352c5bc2 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 20 May 2016 22:53:15 -0400 Subject: [PATCH 42/86] Move async parts of replica operation outside of the lock --- .../flush/TransportShardFlushAction.java | 10 +- .../refresh/TransportShardRefreshAction.java | 7 +- .../replication/ReplicationOperation.java | 4 +- .../TransportReplicatedMutationAction.java | 21 +++-- .../TransportReplicationAction.java | 92 +++++++++---------- .../elasticsearch/index/shard/IndexShard.java | 2 +- .../TransportReplicationActionTests.java | 10 +- 7 files changed, 71 insertions(+), 75 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index dc41148d3bbc7..b3b0c60bc29a2 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -36,10 +36,8 @@ import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; -/** - * - */ -public class TransportShardFlushAction extends TransportReplicationAction { +public class TransportShardFlushAction + extends TransportReplicationAction { public static final String NAME = FlushAction.NAME + "[s]"; @@ -70,11 +68,11 @@ protected void asyncShardOperationOnPrimary(Void stash, ShardFlushRequest shardR } @Override - protected void shardOperationOnReplica(ShardFlushRequest request, ActionListener listener) { + protected Void shardOperationOnReplica(ShardFlushRequest request) { IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); indexShard.flush(request.getRequest()); logger.trace("{} flush request executed on replica", indexShard.shardId()); - listener.onResponse(TransportResponse.Empty.INSTANCE); + return null; } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index d07d338c1803d..54d946741bdae 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -35,11 +35,10 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; public class TransportShardRefreshAction - extends TransportReplicationAction { + extends TransportReplicationAction { public static final String NAME = RefreshAction.NAME + "[s]"; @@ -71,12 +70,12 @@ protected void asyncShardOperationOnPrimary(Void stash, BasicReplicationRequest } @Override - protected void shardOperationOnReplica(BasicReplicationRequest request, ActionListener listener) { + protected Void shardOperationOnReplica(BasicReplicationRequest request) { final ShardId shardId = request.shardId(); IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id()); indexShard.refresh("api"); logger.trace("{} refresh request executed on replica", indexShard.shardId()); - listener.onResponse(TransportResponse.Empty.INSTANCE); + return null; } @Override diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 9d7b993d5437a..9aae08bdf23a9 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -153,8 +153,8 @@ public void onResponse(Response response) { public void onFailure(Throwable primaryException) { try { RestStatus restStatus = ExceptionsHelper.status(primaryException); - shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure(primaryRouting.shardId(), primaryRouting.currentNodeId(), - primaryException, restStatus, false)); + shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure(primaryRouting.shardId(), + primaryRouting.currentNodeId(), primaryException, restStatus, false)); String message = String.format(Locale.ROOT, "failed to perform %s on primary %s", opType, primaryRouting); logger.warn("[{}] {}", primaryException, primaryRouting.shardId(), message); } finally { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java index 10d194ac1351b..2160f51eb9407 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java @@ -35,6 +35,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.TransportResponse.Empty; import java.util.function.Supplier; @@ -44,7 +45,7 @@ public abstract class TransportReplicatedMutationAction< Request extends ReplicatedMutationRequest, Response extends ReplicatedMutationResponse - > extends TransportReplicationAction, Request, Response> { + > extends TransportReplicationAction, Request, Translog.Location, Response> { protected TransportReplicatedMutationAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, @@ -85,9 +86,9 @@ protected Tuple> shardOperationOnPrimary(Request @Override protected void asyncShardOperationOnPrimary(WriteResult result, Request request, ActionListener listener) { - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(request.shardId().id()); if (request.shouldBlockUntilRefresh() && false == request.isRefresh()) { + IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); + IndexShard indexShard = indexService.getShard(request.shardId().id()); indexShard.addRefreshListener(result.location, forcedRefresh -> { if (forcedRefresh) { logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); @@ -101,16 +102,24 @@ protected void asyncShardOperationOnPrimary(WriteResult result, Reques } @Override - protected final void shardOperationOnReplica(Request request, ActionListener listener) { - final ShardId shardId = request.shardId(); + protected final Translog.Location shardOperationOnReplica(Request request) { + ShardId shardId = request.shardId(); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.getShard(shardId.id()); Translog.Location location = onReplicaShard(request, indexShard); + // NOCOMMIT should this move into the asyncShardOperationOnReplica? It'd be outside of the lock. processAfterWrite(request.isRefresh(), indexShard, location); + return location; + } + + @Override + protected void asyncShardOperationOnReplica(Translog.Location location, Request request, ActionListener listener) { if (request.shouldBlockUntilRefresh() && false == request.isRefresh() && location != null) { + ShardId shardId = request.shardId(); + IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + IndexShard indexShard = indexService.getShard(shardId.id()); indexShard.addRefreshListener(location, forcedRefresh -> { logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); - // TODO mark the response?!? listener.onResponse(TransportResponse.Empty.INSTANCE); }); } else { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index c5c13eb04f0c2..b090aad70eff2 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -69,7 +69,6 @@ import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Supplier; @@ -83,8 +82,9 @@ */ public abstract class TransportReplicationAction< Request extends ReplicationRequest, - AsyncStash, + PrimaryAsyncStash, ReplicaRequest extends ReplicationRequest, + ReplicaAsyncStash, Response extends ReplicationResponse > extends TransportAction { @@ -157,7 +157,7 @@ protected void resolveRequest(MetaData metaData, IndexMetaData indexMetaData, Re * @return Tuple of the request to send to the replicas and the information needed by the { * {@link #asyncShardOperationOnPrimary(Object, ReplicationRequest, ActionListener)} to do its job */ - protected abstract Tuple shardOperationOnPrimary(Request shardRequest) throws Exception; + protected abstract Tuple shardOperationOnPrimary(Request shardRequest) throws Exception; /** * Asynchronous portion of primary operation on node with primary copy @@ -166,13 +166,21 @@ protected void resolveRequest(MetaData metaData, IndexMetaData indexMetaData, Re * @param shardRequest the request to the primary shard * @param listener implementers call this success or failure when the asynchronous operations are complete. */ - protected abstract void asyncShardOperationOnPrimary(AsyncStash stash, Request shardRequest, ActionListener listener); + protected abstract void asyncShardOperationOnPrimary(PrimaryAsyncStash stash, Request shardRequest, ActionListener listener); /** - * Replica operation on nodes with replica copies. While this does take a listener it should not return until it has completed any - * operations that it must take under the shard lock. The listener is for waiting for things like index to become visible in search. + * Replica operation on nodes with replica copies. This is done under a replica operation lock. */ - protected abstract void shardOperationOnReplica(ReplicaRequest shardRequest, ActionListener listener); + protected abstract ReplicaAsyncStash shardOperationOnReplica(ReplicaRequest shardRequest); + + /** + * Asynchronous portion of replica operation on nodes with replica copies. Default implementation assumes there *is no* asynchronous + * portion and just immediately calls the listener. This is done outside of the replica operation lock. + */ + protected void asyncShardOperationOnReplica(ReplicaAsyncStash stash, ReplicaRequest shardRequest, + ActionListener listener) { + listener.onResponse(TransportResponse.Empty.INSTANCE); + } /** * True if write consistency should be checked for an implementation @@ -292,7 +300,7 @@ public void handleException(TransportException exp) { } } - protected ReplicationOperation createReplicatedOperation(Request request, + protected ReplicationOperation createReplicatedOperation(Request request, ActionListener listener, PrimaryShardReference primaryShardReference, boolean executeOnReplicas) { return new ReplicationOperation<>(request, primaryShardReference, listener, executeOnReplicas, checkWriteConsistency(), replicasProxy, clusterService::state, logger, actionName @@ -356,10 +364,6 @@ public RetryOnReplicaException(StreamInput in) throws IOException { } private final class AsyncReplicaAction extends AbstractRunnable { - /** - * The number of operations remaining before we can reply. See javadoc for {@link #operationComplete()} more. - */ - private final AtomicInteger operationsUntilReply = new AtomicInteger(2); private final ReplicaRequest request; private final TransportChannel channel; /** @@ -422,45 +426,31 @@ protected void responseWithFailure(Throwable t) { protected void doRun() throws Exception { setPhase(task, "replica"); assert request.shardId() != null : "request shardId must be set"; + ReplicaAsyncStash stash; try (Releasable ignored = acquireReplicaOperationLock(request.shardId(), request.primaryTerm())) { - shardOperationOnReplica(request, new ActionListener() { - @Override - public void onResponse(Empty response) { - operationComplete(); + stash = shardOperationOnReplica(request); + } + setPhase(task, "replica_async"); + asyncShardOperationOnReplica(stash, request, new ActionListener() { + @Override + public void onResponse(Empty response) { + if (logger.isTraceEnabled()) { + logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), + request); } - - @Override - public void onFailure(Throwable e) { - AsyncReplicaAction.this.onFailure(e); + setPhase(task, "finished"); + try { + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } catch (Exception e) { + onFailure(e); } - }); - } - operationComplete(); - } + } - /** - * Handle a portion of the operation finishing. Called twice: once after the operation returns and the lock is released and once - * after the listener returns. We only reply over the channel when both have finished but we don't know in which order they will - * finish. - * - * The reason we can't reply until both is finished is a bit unclear - but the advantage of doing it this ways is that we never - * ever ever reply while we have the operation lock. And it is just a good idea in general not to do network IO while you have a - * lock. So that is something. - */ - private void operationComplete() { - if (operationsUntilReply.decrementAndGet() != 0) { - return; - } - if (logger.isTraceEnabled()) { - logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), - request); - } - setPhase(task, "finished"); - try { - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } catch (Exception e) { - onFailure(e); - } + @Override + public void onFailure(Throwable e) { + AsyncReplicaAction.this.onFailure(e); + } + }); } } @@ -748,7 +738,7 @@ protected boolean shouldExecuteReplication(Settings settings) { return IndexMetaData.isIndexUsingShadowReplicas(settings) == false; } - class PrimaryShardReference implements ReplicationOperation.Primary, Releasable { + class PrimaryShardReference implements ReplicationOperation.Primary, Releasable { private final IndexShard indexShard; private final Releasable operationLock; @@ -777,14 +767,14 @@ public void failShard(String reason, Throwable e) { } @Override - public Tuple perform(Request request) throws Exception { - Tuple result = shardOperationOnPrimary(request); + public Tuple perform(Request request) throws Exception { + Tuple result = shardOperationOnPrimary(request); result.v1().primaryTerm(indexShard.getPrimaryTerm()); return result; } @Override - public void performAsync(AsyncStash stash, Request request, ActionListener listener) throws Exception { + public void performAsync(PrimaryAsyncStash stash, Request request, ActionListener listener) throws Exception { asyncShardOperationOnPrimary(stash, request, listener); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index f1da00013e03f..dff34f6eb57bf 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1427,7 +1427,7 @@ public Releasable acquirePrimaryOperationLock() { } /** - * acquires operation log. If the given primary term is lower then the one in {@link #shardRouting} + * Acquires operation lock. If the given primary term is lower then the one in {@link #shardRouting} * an {@link IllegalArgumentException} is thrown. */ public Releasable acquireReplicaOperationLock(long opPrimaryTerm) { diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 9ff56b067a0a1..343c5fee977ed 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -668,13 +668,13 @@ public void testReplicasCounter() throws Exception { final ReplicationTask task = maybeTask(); Action action = new Action(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool) { @Override - protected void shardOperationOnReplica(Request request, ActionListener listener) { + protected Void shardOperationOnReplica(Request request) { assertIndexShardCounter(1); assertPhase(task, "replica"); if (throwException) { throw new ElasticsearchException("simulated"); } - super.shardOperationOnReplica(request, listener); + return super.shardOperationOnReplica(request); } }; final Action.ReplicaOperationTransportHandler replicaOperationTransportHandler = action.new ReplicaOperationTransportHandler(); @@ -749,7 +749,7 @@ public void readFrom(StreamInput in) throws IOException { static class Response extends ReplicationResponse { } - class Action extends TransportReplicationAction { + class Action extends TransportReplicationAction { Action(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, @@ -778,9 +778,9 @@ protected void asyncShardOperationOnPrimary(Void stash, Request shardRequest, Ac } @Override - protected void shardOperationOnReplica(Request request, ActionListener listener) { + protected Void shardOperationOnReplica(Request request) { request.processedOnReplicas.incrementAndGet(); - listener.onResponse(TransportResponse.Empty.INSTANCE); + return null; } @Override From 87be7eaed09a274cc6a99d1a3da81d2d7bf9dd64 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 23 May 2016 15:49:13 -0400 Subject: [PATCH 43/86] Revert "Move async parts of replica operation outside of the lock" This reverts commit 13807ad10b6f5ecd39f98c9f20874f9f352c5bc2. --- .../flush/TransportShardFlushAction.java | 10 +- .../refresh/TransportShardRefreshAction.java | 7 +- .../replication/ReplicationOperation.java | 4 +- .../TransportReplicatedMutationAction.java | 21 ++--- .../TransportReplicationAction.java | 92 ++++++++++--------- .../elasticsearch/index/shard/IndexShard.java | 2 +- .../TransportReplicationActionTests.java | 10 +- 7 files changed, 75 insertions(+), 71 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index b3b0c60bc29a2..dc41148d3bbc7 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -36,8 +36,10 @@ import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; -public class TransportShardFlushAction - extends TransportReplicationAction { +/** + * + */ +public class TransportShardFlushAction extends TransportReplicationAction { public static final String NAME = FlushAction.NAME + "[s]"; @@ -68,11 +70,11 @@ protected void asyncShardOperationOnPrimary(Void stash, ShardFlushRequest shardR } @Override - protected Void shardOperationOnReplica(ShardFlushRequest request) { + protected void shardOperationOnReplica(ShardFlushRequest request, ActionListener listener) { IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); indexShard.flush(request.getRequest()); logger.trace("{} flush request executed on replica", indexShard.shardId()); - return null; + listener.onResponse(TransportResponse.Empty.INSTANCE); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index 54d946741bdae..d07d338c1803d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -35,10 +35,11 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; public class TransportShardRefreshAction - extends TransportReplicationAction { + extends TransportReplicationAction { public static final String NAME = RefreshAction.NAME + "[s]"; @@ -70,12 +71,12 @@ protected void asyncShardOperationOnPrimary(Void stash, BasicReplicationRequest } @Override - protected Void shardOperationOnReplica(BasicReplicationRequest request) { + protected void shardOperationOnReplica(BasicReplicationRequest request, ActionListener listener) { final ShardId shardId = request.shardId(); IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id()); indexShard.refresh("api"); logger.trace("{} refresh request executed on replica", indexShard.shardId()); - return null; + listener.onResponse(TransportResponse.Empty.INSTANCE); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 9aae08bdf23a9..9d7b993d5437a 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -153,8 +153,8 @@ public void onResponse(Response response) { public void onFailure(Throwable primaryException) { try { RestStatus restStatus = ExceptionsHelper.status(primaryException); - shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure(primaryRouting.shardId(), - primaryRouting.currentNodeId(), primaryException, restStatus, false)); + shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure(primaryRouting.shardId(), primaryRouting.currentNodeId(), + primaryException, restStatus, false)); String message = String.format(Locale.ROOT, "failed to perform %s on primary %s", opType, primaryRouting); logger.warn("[{}] {}", primaryException, primaryRouting.shardId(), message); } finally { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java index 2160f51eb9407..10d194ac1351b 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java @@ -35,7 +35,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.transport.TransportResponse.Empty; import java.util.function.Supplier; @@ -45,7 +44,7 @@ public abstract class TransportReplicatedMutationAction< Request extends ReplicatedMutationRequest, Response extends ReplicatedMutationResponse - > extends TransportReplicationAction, Request, Translog.Location, Response> { + > extends TransportReplicationAction, Request, Response> { protected TransportReplicatedMutationAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, @@ -86,9 +85,9 @@ protected Tuple> shardOperationOnPrimary(Request @Override protected void asyncShardOperationOnPrimary(WriteResult result, Request request, ActionListener listener) { + IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); + IndexShard indexShard = indexService.getShard(request.shardId().id()); if (request.shouldBlockUntilRefresh() && false == request.isRefresh()) { - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(request.shardId().id()); indexShard.addRefreshListener(result.location, forcedRefresh -> { if (forcedRefresh) { logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); @@ -102,24 +101,16 @@ protected void asyncShardOperationOnPrimary(WriteResult result, Reques } @Override - protected final Translog.Location shardOperationOnReplica(Request request) { - ShardId shardId = request.shardId(); + protected final void shardOperationOnReplica(Request request, ActionListener listener) { + final ShardId shardId = request.shardId(); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.getShard(shardId.id()); Translog.Location location = onReplicaShard(request, indexShard); - // NOCOMMIT should this move into the asyncShardOperationOnReplica? It'd be outside of the lock. processAfterWrite(request.isRefresh(), indexShard, location); - return location; - } - - @Override - protected void asyncShardOperationOnReplica(Translog.Location location, Request request, ActionListener listener) { if (request.shouldBlockUntilRefresh() && false == request.isRefresh() && location != null) { - ShardId shardId = request.shardId(); - IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - IndexShard indexShard = indexService.getShard(shardId.id()); indexShard.addRefreshListener(location, forcedRefresh -> { logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + // TODO mark the response?!? listener.onResponse(TransportResponse.Empty.INSTANCE); }); } else { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index b090aad70eff2..c5c13eb04f0c2 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -69,6 +69,7 @@ import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Supplier; @@ -82,9 +83,8 @@ */ public abstract class TransportReplicationAction< Request extends ReplicationRequest, - PrimaryAsyncStash, + AsyncStash, ReplicaRequest extends ReplicationRequest, - ReplicaAsyncStash, Response extends ReplicationResponse > extends TransportAction { @@ -157,7 +157,7 @@ protected void resolveRequest(MetaData metaData, IndexMetaData indexMetaData, Re * @return Tuple of the request to send to the replicas and the information needed by the { * {@link #asyncShardOperationOnPrimary(Object, ReplicationRequest, ActionListener)} to do its job */ - protected abstract Tuple shardOperationOnPrimary(Request shardRequest) throws Exception; + protected abstract Tuple shardOperationOnPrimary(Request shardRequest) throws Exception; /** * Asynchronous portion of primary operation on node with primary copy @@ -166,21 +166,13 @@ protected void resolveRequest(MetaData metaData, IndexMetaData indexMetaData, Re * @param shardRequest the request to the primary shard * @param listener implementers call this success or failure when the asynchronous operations are complete. */ - protected abstract void asyncShardOperationOnPrimary(PrimaryAsyncStash stash, Request shardRequest, ActionListener listener); + protected abstract void asyncShardOperationOnPrimary(AsyncStash stash, Request shardRequest, ActionListener listener); /** - * Replica operation on nodes with replica copies. This is done under a replica operation lock. + * Replica operation on nodes with replica copies. While this does take a listener it should not return until it has completed any + * operations that it must take under the shard lock. The listener is for waiting for things like index to become visible in search. */ - protected abstract ReplicaAsyncStash shardOperationOnReplica(ReplicaRequest shardRequest); - - /** - * Asynchronous portion of replica operation on nodes with replica copies. Default implementation assumes there *is no* asynchronous - * portion and just immediately calls the listener. This is done outside of the replica operation lock. - */ - protected void asyncShardOperationOnReplica(ReplicaAsyncStash stash, ReplicaRequest shardRequest, - ActionListener listener) { - listener.onResponse(TransportResponse.Empty.INSTANCE); - } + protected abstract void shardOperationOnReplica(ReplicaRequest shardRequest, ActionListener listener); /** * True if write consistency should be checked for an implementation @@ -300,7 +292,7 @@ public void handleException(TransportException exp) { } } - protected ReplicationOperation createReplicatedOperation(Request request, + protected ReplicationOperation createReplicatedOperation(Request request, ActionListener listener, PrimaryShardReference primaryShardReference, boolean executeOnReplicas) { return new ReplicationOperation<>(request, primaryShardReference, listener, executeOnReplicas, checkWriteConsistency(), replicasProxy, clusterService::state, logger, actionName @@ -364,6 +356,10 @@ public RetryOnReplicaException(StreamInput in) throws IOException { } private final class AsyncReplicaAction extends AbstractRunnable { + /** + * The number of operations remaining before we can reply. See javadoc for {@link #operationComplete()} more. + */ + private final AtomicInteger operationsUntilReply = new AtomicInteger(2); private final ReplicaRequest request; private final TransportChannel channel; /** @@ -426,31 +422,45 @@ protected void responseWithFailure(Throwable t) { protected void doRun() throws Exception { setPhase(task, "replica"); assert request.shardId() != null : "request shardId must be set"; - ReplicaAsyncStash stash; try (Releasable ignored = acquireReplicaOperationLock(request.shardId(), request.primaryTerm())) { - stash = shardOperationOnReplica(request); - } - setPhase(task, "replica_async"); - asyncShardOperationOnReplica(stash, request, new ActionListener() { - @Override - public void onResponse(Empty response) { - if (logger.isTraceEnabled()) { - logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), - request); + shardOperationOnReplica(request, new ActionListener() { + @Override + public void onResponse(Empty response) { + operationComplete(); } - setPhase(task, "finished"); - try { - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } catch (Exception e) { - onFailure(e); + + @Override + public void onFailure(Throwable e) { + AsyncReplicaAction.this.onFailure(e); } - } + }); + } + operationComplete(); + } - @Override - public void onFailure(Throwable e) { - AsyncReplicaAction.this.onFailure(e); - } - }); + /** + * Handle a portion of the operation finishing. Called twice: once after the operation returns and the lock is released and once + * after the listener returns. We only reply over the channel when both have finished but we don't know in which order they will + * finish. + * + * The reason we can't reply until both is finished is a bit unclear - but the advantage of doing it this ways is that we never + * ever ever reply while we have the operation lock. And it is just a good idea in general not to do network IO while you have a + * lock. So that is something. + */ + private void operationComplete() { + if (operationsUntilReply.decrementAndGet() != 0) { + return; + } + if (logger.isTraceEnabled()) { + logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), + request); + } + setPhase(task, "finished"); + try { + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } catch (Exception e) { + onFailure(e); + } } } @@ -738,7 +748,7 @@ protected boolean shouldExecuteReplication(Settings settings) { return IndexMetaData.isIndexUsingShadowReplicas(settings) == false; } - class PrimaryShardReference implements ReplicationOperation.Primary, Releasable { + class PrimaryShardReference implements ReplicationOperation.Primary, Releasable { private final IndexShard indexShard; private final Releasable operationLock; @@ -767,14 +777,14 @@ public void failShard(String reason, Throwable e) { } @Override - public Tuple perform(Request request) throws Exception { - Tuple result = shardOperationOnPrimary(request); + public Tuple perform(Request request) throws Exception { + Tuple result = shardOperationOnPrimary(request); result.v1().primaryTerm(indexShard.getPrimaryTerm()); return result; } @Override - public void performAsync(PrimaryAsyncStash stash, Request request, ActionListener listener) throws Exception { + public void performAsync(AsyncStash stash, Request request, ActionListener listener) throws Exception { asyncShardOperationOnPrimary(stash, request, listener); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index dff34f6eb57bf..f1da00013e03f 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1427,7 +1427,7 @@ public Releasable acquirePrimaryOperationLock() { } /** - * Acquires operation lock. If the given primary term is lower then the one in {@link #shardRouting} + * acquires operation log. If the given primary term is lower then the one in {@link #shardRouting} * an {@link IllegalArgumentException} is thrown. */ public Releasable acquireReplicaOperationLock(long opPrimaryTerm) { diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 343c5fee977ed..9ff56b067a0a1 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -668,13 +668,13 @@ public void testReplicasCounter() throws Exception { final ReplicationTask task = maybeTask(); Action action = new Action(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool) { @Override - protected Void shardOperationOnReplica(Request request) { + protected void shardOperationOnReplica(Request request, ActionListener listener) { assertIndexShardCounter(1); assertPhase(task, "replica"); if (throwException) { throw new ElasticsearchException("simulated"); } - return super.shardOperationOnReplica(request); + super.shardOperationOnReplica(request, listener); } }; final Action.ReplicaOperationTransportHandler replicaOperationTransportHandler = action.new ReplicaOperationTransportHandler(); @@ -749,7 +749,7 @@ public void readFrom(StreamInput in) throws IOException { static class Response extends ReplicationResponse { } - class Action extends TransportReplicationAction { + class Action extends TransportReplicationAction { Action(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, @@ -778,9 +778,9 @@ protected void asyncShardOperationOnPrimary(Void stash, Request shardRequest, Ac } @Override - protected Void shardOperationOnReplica(Request request) { + protected void shardOperationOnReplica(Request request, ActionListener listener) { request.processedOnReplicas.incrementAndGet(); - return null; + listener.onResponse(TransportResponse.Empty.INSTANCE); } @Override From 7056b96ea412f275005b93e3570bcff895859ed5 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 23 May 2016 15:49:32 -0400 Subject: [PATCH 44/86] Patch from boaz --- .../action/index/TransportIndexAction.java | 71 +++++----- .../ReplicatedMutationRequest.java | 31 ++-- .../replication/ReplicationOperation.java | 104 ++++++-------- .../TransportReplicationAction.java | 132 +++++++++++++++--- .../TransportReplicationActionTests.java | 4 +- 5 files changed, 200 insertions(+), 142 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index ddd48eb58ae04..b2f4cc7500609 100644 --- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.replication.ReplicationOperation; -import org.elasticsearch.action.support.replication.TransportReplicatedMutationAction; +import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; @@ -44,6 +44,7 @@ import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; @@ -61,7 +62,7 @@ *
  • allowIdGeneration: If the id is set not, should it be generated. Defaults to true. * */ -public class TransportIndexAction extends TransportReplicatedMutationAction { +public class TransportIndexAction extends TransportReplicationAction { private final AutoCreateIndex autoCreateIndex; private final boolean allowIdGeneration; @@ -77,7 +78,7 @@ public TransportIndexAction(Settings settings, TransportService transportService ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, AutoCreateIndex autoCreateIndex) { super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, - actionFilters, indexNameExpressionResolver, IndexRequest::new, ThreadPool.Names.INDEX); + actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexResponse::new, ThreadPool.Names.INDEX); this.mappingUpdatedAction = mappingUpdatedAction; this.createIndexAction = createIndexAction; this.autoCreateIndex = autoCreateIndex; @@ -140,9 +141,38 @@ protected IndexResponse newResponseInstance() { } @Override - protected WriteResult onPrimaryShard(IndexService indexService, IndexShard indexShard, IndexRequest request) - throws Exception { - return executeIndexRequestOnPrimary(request, indexShard, mappingUpdatedAction); + protected PrimaryResult shardOperationOnPrimary(IndexRequest request) throws Exception { + IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); + IndexShard indexShard = indexService.getShard(request.shardId().id()); + Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard); + Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); + final ShardId shardId = indexShard.shardId(); + if (update != null) { + mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update); + operation = prepareIndexOperationOnPrimary(request, indexShard); + update = operation.parsedDoc().dynamicMappingsUpdate(); + if (update != null) { + throw new ReplicationOperation.RetryOnPrimaryException(shardId, + "Dynamic mappings are not available on the node that holds the primary yet"); + } + } + final boolean created = indexShard.index(operation); + + // update the version on request so it will happen on the replicas + final long version = operation.version(); + request.version(version); + request.versionType(request.versionType().versionTypeForReplicationAndRecovery()); + + assert request.versionType().validateVersionForWrites(request.version()); + + final Location location = operation.getTranslogLocation(); + return new WritePrimaryResult( + request, + new IndexResponse(shardId, request.type(), request.id(), request.version(), created), + location, + indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null, + request.refreshPolicy(), indexShard + ); } @Override @@ -175,34 +205,5 @@ public static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, return indexShard.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType()); } - /** - * Execute the given {@link IndexRequest} on a primary shard, throwing a - * {@link ReplicationOperation.RetryOnPrimaryException} if the operation needs to be re-tried. - */ - public static WriteResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, MappingUpdatedAction mappingUpdatedAction) throws Exception { - Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard); - Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); - final ShardId shardId = indexShard.shardId(); - if (update != null) { - mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update); - operation = prepareIndexOperationOnPrimary(request, indexShard); - update = operation.parsedDoc().dynamicMappingsUpdate(); - if (update != null) { - throw new ReplicationOperation.RetryOnPrimaryException(shardId, - "Dynamic mappings are not available on the node that holds the primary yet"); - } - } - final boolean created = indexShard.index(operation); - - // update the version on request so it will happen on the replicas - final long version = operation.version(); - request.version(version); - request.versionType(request.versionType().versionTypeForReplicationAndRecovery()); - - assert request.versionType().validateVersionForWrites(request.version()); - - return new WriteResult<>(new IndexResponse(shardId, request.type(), request.id(), request.version(), created), operation.getTranslogLocation()); - } - } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java index ed016f314890b..bd3732b982aa6 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -30,8 +29,14 @@ * Base class for requests that modify data in some shard like delete, index, and shardBulk. */ public class ReplicatedMutationRequest> extends ReplicationRequest { - private boolean refresh; - private boolean blockUntilRefresh; + + public enum RefreshPolicy { + NONE, + IMMEDIATE, + WAIT_UNTIL + } + + private RefreshPolicy refreshPolicy = RefreshPolicy.NONE; /** * Create an empty request. @@ -52,29 +57,15 @@ public ReplicatedMutationRequest(ShardId shardId) { * to false. */ @SuppressWarnings("unchecked") - public R setRefresh(boolean refresh) { - this.refresh = refresh; + public R setRefresh(RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; return (R) this; } - public boolean isRefresh() { - return this.refresh; - } + public RefreshPolicy refreshPolicy() { - /** - * Should this request block until it has been made visible for search by a refresh? Unlike {@link #setRefresh(boolean)} this is quite - * safe to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} - * for the limit. A bulk request counts as one request on each shard that it touches. - */ - @SuppressWarnings("unchecked") - public R setBlockUntilRefresh(boolean blockUntilRefresh) { - this.blockUntilRefresh = blockUntilRefresh; - return (R) this; } - public boolean shouldBlockUntilRefresh() { - return blockUntilRefresh; - } @Override public void readFrom(StreamInput in) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 9d7b993d5437a..490a8dc839cbb 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -28,7 +28,6 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.index.engine.VersionConflictEngineException; @@ -47,11 +46,9 @@ import java.util.function.Supplier; public class ReplicationOperation< - Request extends ReplicationRequest, - AsyncStash, - ReplicaRequest extends ReplicationRequest, - Response extends ReplicationResponse - > { + Request extends ReplicationRequest, + ReplicaRequest extends ReplicationRequest, + PrimaryResultT extends ReplicationOperation.PrimaryResult> { final private ESLogger logger; final private Request request; final private Supplier clusterStateSupplier; @@ -71,17 +68,17 @@ public class ReplicationOperation< final private AtomicInteger successfulShards = new AtomicInteger(); final private boolean executeOnReplicas; final private boolean checkWriteConsistency; - final private Primary primary; + final private Primary primary; final private Replicas replicasProxy; final private AtomicBoolean finished = new AtomicBoolean(); - final protected ActionListener finalResponseListener; + final protected ActionListener resultListener; - private volatile Response finalResponse = null; + private volatile PrimaryResultT primaryResult = null; private final List shardReplicaFailures = Collections.synchronizedList(new ArrayList<>()); - ReplicationOperation(Request request, Primary primary, - ActionListener listener, + ReplicationOperation(Request request, Primary primary, + ActionListener listener, boolean executeOnReplicas, boolean checkWriteConsistency, Replicas replicas, Supplier clusterStateSupplier, ESLogger logger, String opType) { @@ -89,7 +86,7 @@ public class ReplicationOperation< this.executeOnReplicas = executeOnReplicas; this.replicasProxy = replicas; this.primary = primary; - this.finalResponseListener = listener; + this.resultListener = listener; this.logger = logger; this.request = request; this.clusterStateSupplier = clusterStateSupplier; @@ -108,9 +105,8 @@ void execute() throws Exception { totalShards.incrementAndGet(); pendingShards.incrementAndGet(); - Tuple primaryResult = primary.perform(request); - ReplicaRequest replicaRequest = primaryResult.v1(); - AsyncStash asyncStash = primaryResult.v2(); + primaryResult = primary.perform(request); + ReplicaRequest replicaRequest = primaryResult.replicaRequest(); assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term"; if (logger.isTraceEnabled()) { logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request); @@ -137,31 +133,9 @@ void execute() throws Exception { performOnReplica(shard.buildTargetRelocatingShard(), replicaRequest); } } - /* - * Wait until after we've started the replica requests before we start any asyn actions on the primary so we don't have a race - * between the replica returning and the primary starting. - */ - primary.performAsync(asyncStash, request, new ActionListener() { - @Override - public void onResponse(Response response) { - finalResponse = response; - successfulShards.incrementAndGet(); - decPendingAndFinishIfNeeded(); - } - @Override - public void onFailure(Throwable primaryException) { - try { - RestStatus restStatus = ExceptionsHelper.status(primaryException); - shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure(primaryRouting.shardId(), primaryRouting.currentNodeId(), - primaryException, restStatus, false)); - String message = String.format(Locale.ROOT, "failed to perform %s on primary %s", opType, primaryRouting); - logger.warn("[{}] {}", primaryException, primaryRouting.shardId(), message); - } finally { - decPendingAndFinishIfNeeded(); - } - } - }); + successfulShards.incrementAndGet(); + decPendingAndFinishIfNeeded(); } private void performOnReplica(final ShardRouting shard, final ReplicaRequest replicaRequest) { @@ -276,19 +250,19 @@ private void finish() { failuresArray = new ReplicationResponse.ShardInfo.Failure[shardReplicaFailures.size()]; shardReplicaFailures.toArray(failuresArray); } - finalResponse.setShardInfo(new ReplicationResponse.ShardInfo( + primaryResult.setShardInfo(new ReplicationResponse.ShardInfo( totalShards.get(), successfulShards.get(), failuresArray ) ); - finalResponseListener.onResponse(finalResponse); + resultListener.onResponse(primaryResult); } } private void finishAsFailed(Throwable throwable) { if (finished.compareAndSet(false, true)) { - finalResponseListener.onFailure(throwable); + resultListener.onFailure(throwable); } } @@ -320,16 +294,19 @@ public static boolean isConflictException(Throwable e) { interface Primary< - Request extends ReplicationRequest, - AsyncStash, - ReplicaRequest extends ReplicationRequest, - Response extends ReplicationResponse - > { + Request extends ReplicationRequest, + ReplicaRequest extends ReplicationRequest, + PrimaryResultT extends PrimaryResult + > { - /** routing entry for this primary */ + /** + * routing entry for this primary + */ ShardRouting routingEntry(); - /** fail the primary, typically due to the fact that the operation has learned the primary has been demoted by the master */ + /** + * fail the primary, typically due to the fact that the operation has learned the primary has been demoted by the master + */ void failShard(String message, Throwable throwable); /** @@ -340,13 +317,8 @@ interface Primary< * @param request the request to perform * @return the request to send to the repicas */ - Tuple perform(Request request) throws Exception; + PrimaryResultT perform(Request request) throws Exception; - /** - * Start and listen for the completion of any asynchronous actions taken on the primary as part of this request. If there are no - * such actions then this will call the listener directly. - */ - void performAsync(AsyncStash stash, Request request, ActionListener listener) throws Exception; } interface Replicas> { @@ -354,19 +326,20 @@ interface Replicas> { /** * performs the the given request on the specified replica * - * @param replica {@link ShardRouting} of the shard this request should be executed on + * @param replica {@link ShardRouting} of the shard this request should be executed on * @param replicaRequest operation to peform - * @param listener a callback to call once the operation has been complicated, either successfully or with an error. + * @param listener a callback to call once the operation has been complicated, either successfully or with an error. */ void performOn(ShardRouting replica, ReplicaRequest replicaRequest, ActionListener listener); /** * Fail the specified shard, removing it from the current set of active shards - * @param replica shard to fail - * @param primary the primary shard that requested the failure - * @param message a (short) description of the reason - * @param throwable the original exception which caused the ReplicationOperation to request the shard to be failed - * @param onSuccess a callback to call when the shard has been successfully removed from the active set. + * + * @param replica shard to fail + * @param primary the primary shard that requested the failure + * @param message a (short) description of the reason + * @param throwable the original exception which caused the ReplicationOperation to request the shard to be failed + * @param onSuccess a callback to call when the shard has been successfully removed from the active set. * @param onPrimaryDemoted a callback to call when the shard can not be failed because the current primary has been demoted * by the master. * @param onIgnoredFailure a callback to call when failing a shard has failed, but it that failure can be safely ignored and the @@ -391,4 +364,11 @@ public RetryOnPrimaryException(StreamInput in) throws IOException { super(in); } } + + interface PrimaryResult> { + + R replicaRequest(); + + void setShardInfo(ReplicationResponse.ShardInfo shardInfo); + } } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index c5c13eb04f0c2..cab8690005bcd 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -40,7 +40,6 @@ import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.Settings; @@ -52,6 +51,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.Task; @@ -83,7 +83,6 @@ */ public abstract class TransportReplicationAction< Request extends ReplicationRequest, - AsyncStash, ReplicaRequest extends ReplicationRequest, Response extends ReplicationResponse > extends TransportAction { @@ -155,18 +154,18 @@ protected void resolveRequest(MetaData metaData, IndexMetaData indexMetaData, Re * * @param shardRequest the request to the primary shard * @return Tuple of the request to send to the replicas and the information needed by the { - * {@link #asyncShardOperationOnPrimary(Object, ReplicationRequest, ActionListener)} to do its job + //* {@link #asyncShardOperationOnPrimary(Object, ReplicationRequest, ActionListener)} to do its job */ - protected abstract Tuple shardOperationOnPrimary(Request shardRequest) throws Exception; + protected abstract PrimaryResult shardOperationOnPrimary(Request shardRequest) throws Exception; - /** - * Asynchronous portion of primary operation on node with primary copy - * - * @param stash information saved from the synchronous phase of the operation for use in the async phase of the operation - * @param shardRequest the request to the primary shard - * @param listener implementers call this success or failure when the asynchronous operations are complete. - */ - protected abstract void asyncShardOperationOnPrimary(AsyncStash stash, Request shardRequest, ActionListener listener); +// /** +// * Asynchronous portion of primary operation on node with primary copy +// * +// * @param stash information saved from the synchronous phase of the operation for use in the async phase of the operation +// * @param shardRequest the request to the primary shard +// * @param listener implementers call this success or failure when the asynchronous operations are complete. +// */ +// protected abstract void asyncShardOperationOnPrimary(AsyncStash stash, Request shardRequest, ActionListener listener); /** * Replica operation on nodes with replica copies. While this does take a listener it should not return until it has completed any @@ -282,7 +281,17 @@ public void handleException(TransportException exp) { final IndexMetaData indexMetaData = clusterService.state().getMetaData().index(request.shardId().getIndex()); final boolean executeOnReplicas = (indexMetaData == null) || shouldExecuteReplication(indexMetaData.getSettings()); final ActionListener listener = createResponseListener(channel, replicationTask, primaryShardReference); - createReplicatedOperation(request, listener, primaryShardReference, executeOnReplicas).execute(); + createReplicatedOperation(request, new ActionListener() { + @Override + public void onResponse(PrimaryResult result) { + result.respond(listener); + } + + @Override + public void onFailure(Throwable e) { + listener.onFailure(e); + } + }, primaryShardReference, executeOnReplicas).execute(); success = true; } } finally { @@ -292,8 +301,9 @@ public void handleException(TransportException exp) { } } - protected ReplicationOperation createReplicatedOperation(Request request, - ActionListener listener, PrimaryShardReference primaryShardReference, boolean executeOnReplicas) { + protected ReplicationOperation createReplicatedOperation( + Request request, ActionListener listener, + PrimaryShardReference primaryShardReference, boolean executeOnReplicas) { return new ReplicationOperation<>(request, primaryShardReference, listener, executeOnReplicas, checkWriteConsistency(), replicasProxy, clusterService::state, logger, actionName ); @@ -331,6 +341,82 @@ public void onFailure(Throwable e) { } } + protected class PrimaryResult implements ReplicationOperation.PrimaryResult { + final ReplicaRequest replicaRequest; + final Response finalResponse; + + public PrimaryResult(ReplicaRequest replicaRequest, Response finalResponse) { + this.replicaRequest = replicaRequest; + this.finalResponse = finalResponse; + } + + @Override + public ReplicaRequest replicaRequest() { + return replicaRequest; + } + + @Override + public void setShardInfo(ReplicationResponse.ShardInfo shardInfo) { + finalResponse.setShardInfo(shardInfo); + } + + public void respond(ActionListener listener) { + listener.onResponse(finalResponse); + } + } + + protected class WritePrimaryResult extends PrimaryResult { + boolean refreshNeeded; + boolean forcedRefresh; + ActionListener listener = null; + + public WritePrimaryResult(ReplicaRequest replicaRequest, Response finalResponse, + Translog.Location location, + boolean fsyncTranslog, + ReplicatedMutationRequest.RefreshPolicy refreshPolicy, + IndexShard indexShard) { + super(replicaRequest, finalResponse); + switch (refreshPolicy) { + case IMMEDIATE: + indexShard.refresh("bla"); + synchronized (this) { + forcedRefresh = true; + refreshNeeded = false; + } + break; + case WAIT_UNTIL: + refreshNeeded = true; + indexShard.addRefreshListener(location, forcedRefresh -> { + synchronized (WritePrimaryResult.this) { + WritePrimaryResult.this.forcedRefresh = forcedRefresh; + if (forcedRefresh) { + // TODO:: logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + } + refreshNeeded = false; + respondIfNeeded(); + } + }); + + break; + } + if (fsyncTranslog) { + indexShard.sync(location); + } + } + + @Override + public void respond(ActionListener listener) { + this.listener = listener; + respondIfNeeded(); + } + + protected synchronized void respondIfNeeded() { + if (refreshNeeded == false && listener != null) { + super.respond(listener); + } + } + } + class ReplicaOperationTransportHandler implements TransportRequestHandler { @Override public void messageReceived(final ReplicaRequest request, final TransportChannel channel) throws Exception { @@ -748,7 +834,7 @@ protected boolean shouldExecuteReplication(Settings settings) { return IndexMetaData.isIndexUsingShadowReplicas(settings) == false; } - class PrimaryShardReference implements ReplicationOperation.Primary, Releasable { + class PrimaryShardReference implements ReplicationOperation.Primary, Releasable { private final IndexShard indexShard; private final Releasable operationLock; @@ -777,16 +863,16 @@ public void failShard(String reason, Throwable e) { } @Override - public Tuple perform(Request request) throws Exception { - Tuple result = shardOperationOnPrimary(request); - result.v1().primaryTerm(indexShard.getPrimaryTerm()); + public PrimaryResult perform(Request request) throws Exception { + PrimaryResult result = shardOperationOnPrimary(request); + result.replicaRequest().primaryTerm(indexShard.getPrimaryTerm()); return result; } - @Override - public void performAsync(AsyncStash stash, Request request, ActionListener listener) throws Exception { - asyncShardOperationOnPrimary(stash, request, listener); - } +// @Override +// public void performAsync(AsyncStash stash, Request request, ActionListener listener) throws Exception { +// asyncShardOperationOnPrimary(stash, request, listener); +// } @Override public ShardRouting routingEntry() { diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 9ff56b067a0a1..ebfa12f3f6c58 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -624,7 +624,7 @@ public void execute() throws Exception { if (throwExceptionOnRun) { throw new ElasticsearchException("simulated exception, during performOnPrimary"); } else if (respondWithError) { - this.finalResponseListener.onFailure(new ElasticsearchException("simulated exception, as a response")); + this.resultListener.onFailure(new ElasticsearchException("simulated exception, as a response")); } else { super.execute(); } @@ -837,7 +837,7 @@ public NoopReplicationOperation(Request request, ActionListener listen @Override public void execute() throws Exception { - this.finalResponseListener.onResponse(new Response()); + this.resultListener.onResponse(new Response()); } } From 8eebaa89c0a1ee74982fbe0d56d1485ca2ae09db Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 23 May 2016 20:52:49 -0400 Subject: [PATCH 45/86] Take boaz's changes to their logic conclusion and unbreak important stuff like bulk --- .../action/DocWriteResponse.java | 10 +- .../flush/TransportShardFlushAction.java | 12 +- .../refresh/TransportShardRefreshAction.java | 13 +- .../action/bulk/BulkRequest.java | 53 ++---- .../action/bulk/BulkRequestBuilder.java | 25 +-- .../action/bulk/BulkShardRequest.java | 17 +- .../action/bulk/BulkShardResponse.java | 11 +- .../action/bulk/TransportBulkAction.java | 3 +- .../action/bulk/TransportShardBulkAction.java | 29 +-- .../action/delete/DeleteRequest.java | 4 +- .../action/delete/DeleteRequestBuilder.java | 25 +-- .../action/delete/TransportDeleteAction.java | 12 +- .../action/index/IndexRequest.java | 4 +- .../action/index/IndexRequestBuilder.java | 25 +-- .../action/index/TransportIndexAction.java | 68 +++---- .../action/ingest/IngestActionFilter.java | 2 +- .../action/support/WriteRequest.java | 95 +++++++++ .../action/support/WriteRequestBuilder.java | 49 +++++ ...quest.java => ReplicatedWriteRequest.java} | 42 ++-- ...onse.java => ReplicatedWriteResponse.java} | 9 +- .../replication/ReplicationOperation.java | 15 +- .../TransportReplicatedMutationAction.java | 151 --------------- .../TransportReplicationAction.java | 66 +------ .../replication/TransportWriteAction.java | 180 ++++++++++++++++++ .../action/update/TransportUpdateAction.java | 3 - .../action/update/UpdateHelper.java | 7 +- .../action/update/UpdateRequest.java | 48 ++--- .../action/update/UpdateRequestBuilder.java | 28 +-- .../rest/action/bulk/RestBulkAction.java | 3 +- .../rest/action/delete/RestDeleteAction.java | 3 +- .../rest/action/index/RestIndexAction.java | 3 +- .../rest/action/update/RestUpdateAction.java | 3 +- .../action/bulk/BulkRequestTests.java | 17 +- .../action/bulk/BulkShardRequestTests.java | 7 +- .../ReplicationOperationTests.java | 68 ++++--- .../TransportReplicationActionTests.java | 57 +++--- .../elasticsearch/aliases/IndexAliasesIT.java | 16 +- .../cluster/allocation/ClusterRerouteIT.java | 3 +- .../index/BlockUntilRefreshIT.java | 22 ++- ...lFieldMapperPositionIncrementGapTests.java | 3 +- .../percolator/PercolatorIT.java | 39 ++-- .../elasticsearch/routing/AliasRoutingIT.java | 29 +-- .../routing/SimpleRoutingIT.java | 28 +-- .../search/child/ChildQuerySearchIT.java | 8 +- .../suggest/CompletionSuggestSearch2xIT.java | 25 +-- .../elasticsearch/messy/tests/BulkTests.java | 9 +- 46 files changed, 667 insertions(+), 682 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/action/support/WriteRequest.java create mode 100644 core/src/main/java/org/elasticsearch/action/support/WriteRequestBuilder.java rename core/src/main/java/org/elasticsearch/action/support/replication/{ReplicatedMutationRequest.java => ReplicatedWriteRequest.java} (60%) rename core/src/main/java/org/elasticsearch/action/support/replication/{ReplicatedMutationResponse.java => ReplicatedWriteResponse.java} (75%) delete mode 100644 core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java create mode 100644 core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 3c0c6e8250e69..286b4d64e96d4 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.action; -import org.elasticsearch.action.support.replication.ReplicatedMutationRequest; -import org.elasticsearch.action.support.replication.ReplicatedMutationResponse; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.replication.ReplicatedWriteResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; import org.elasticsearch.common.io.stream.StreamInput; @@ -35,7 +35,7 @@ /** * A base class for the response of a write operation that involves a single doc */ -public abstract class DocWriteResponse extends ReplicatedMutationResponse implements StatusToXContent { +public abstract class DocWriteResponse extends ReplicationResponse implements ReplicatedWriteResponse, StatusToXContent { private ShardId shardId; private String id; @@ -91,8 +91,8 @@ public long getVersion() { } /** - * Did this request force a refresh? Requests that set {@link ReplicatedMutationRequest#setRefresh(boolean)} to true should always - * return true for this. Requests that set {@link ReplicatedMutationRequest#setBlockUntilRefresh(boolean)} to true should only return + * Did this request force a refresh? Requests that set {@link WriteRequest#setRefresh(boolean)} to true should always + * return true for this. Requests that set {@link WriteRequest#setBlockUntilRefresh(boolean)} to true should only return * this if they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}). */ public boolean forcedRefresh() { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index dc41148d3bbc7..516d4aa98d977 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; @@ -39,7 +38,7 @@ /** * */ -public class TransportShardFlushAction extends TransportReplicationAction { +public class TransportShardFlushAction extends TransportReplicationAction { public static final String NAME = FlushAction.NAME + "[s]"; @@ -57,16 +56,11 @@ protected ReplicationResponse newResponseInstance() { } @Override - protected Tuple shardOperationOnPrimary(ShardFlushRequest shardRequest) { + protected PrimaryResult shardOperationOnPrimary(ShardFlushRequest shardRequest) { IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id()); indexShard.flush(shardRequest.getRequest()); logger.trace("{} flush request executed on primary", indexShard.shardId()); - return new Tuple<>(shardRequest, null); - } - - @Override - protected void asyncShardOperationOnPrimary(Void stash, ShardFlushRequest shardRequest, ActionListener listener) { - listener.onResponse(new ReplicationResponse()); + return new PrimaryResult(shardRequest, new ReplicationResponse()); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index d07d338c1803d..bb6e2ba17a16c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; @@ -39,7 +38,7 @@ import org.elasticsearch.transport.TransportService; public class TransportShardRefreshAction - extends TransportReplicationAction { + extends TransportReplicationAction { public static final String NAME = RefreshAction.NAME + "[s]"; @@ -57,17 +56,11 @@ protected ReplicationResponse newResponseInstance() { } @Override - protected Tuple shardOperationOnPrimary(BasicReplicationRequest shardRequest) { + protected PrimaryResult shardOperationOnPrimary(BasicReplicationRequest shardRequest) { IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id()); indexShard.refresh("api"); logger.trace("{} refresh request executed on primary", indexShard.shardId()); - return new Tuple<>(shardRequest, null); - } - - @Override - protected void asyncShardOperationOnPrimary(Void stash, BasicReplicationRequest shardRequest, - ActionListener listener) { - listener.onResponse(new ReplicationResponse()); + return new PrimaryResult(shardRequest, new ReplicationResponse()); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 0e36ef1d7d820..1770ff89c52ff 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -38,7 +39,6 @@ import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import java.io.IOException; @@ -54,7 +54,7 @@ * Note that we only support refresh on the bulk request not per item. * @see org.elasticsearch.client.Client#bulk(BulkRequest) */ -public class BulkRequest extends ActionRequest implements CompositeIndicesRequest { +public class BulkRequest extends ActionRequest implements CompositeIndicesRequest, WriteRequest { private static final int REQUEST_OVERHEAD = 50; @@ -63,11 +63,7 @@ public class BulkRequest extends ActionRequest implements Composite protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT; private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT; - private boolean refresh = false; - /** - * Should this request block until all of its results are visible for search? - */ - private boolean blockUntilRefresh = false; + private RefreshPolicy refreshPolicy = RefreshPolicy.NONE; private long sizeInBytes = 0; @@ -438,18 +434,15 @@ public WriteConsistencyLevel consistencyLevel() { return this.consistencyLevel; } - /** - * Should a refresh be executed post this bulk operation causing the operations to - * be searchable. Note, heavy indexing should not set this to true. Defaults - * to false. - */ - public BulkRequest refresh(boolean refresh) { - this.refresh = refresh; + @Override + public BulkRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; return this; } - public boolean refresh() { - return this.refresh; + @Override + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; } /** @@ -471,20 +464,6 @@ public TimeValue timeout() { return timeout; } - /** - * Should this request block until it has been made visible for search by a refresh? Unlike {@link #refresh(boolean)} this is quite safe - * to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} for - * the limit. A bulk request counts as one request on each shard that it touches. - */ - public BulkRequest setBlockUntilRefresh(boolean blockUntilRefresh) { - this.blockUntilRefresh = blockUntilRefresh; - return this; - } - - public boolean shouldBlockUntilRefresh() { - return blockUntilRefresh; - } - private int findNextMarker(byte marker, int from, BytesReference data, int length) { for (int i = from; i < length; i++) { if (data.get(i) == marker) { @@ -518,10 +497,10 @@ public ActionRequestValidationException validate() { } for (ActionRequest request : requests) { // We first check if refresh has been set - if ((request instanceof DeleteRequest && ((DeleteRequest)request).isRefresh()) || - (request instanceof UpdateRequest && ((UpdateRequest)request).isRefresh()) || - (request instanceof IndexRequest && ((IndexRequest)request).isRefresh())) { - validationException = addValidationError("Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.", validationException); + if (request instanceof WriteRequest + && ((WriteRequest) request).getRefreshPolicy() != RefreshPolicy.NONE) { + validationException = addValidationError( + "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", validationException); } ActionRequestValidationException ex = request.validate(); if (ex != null) { @@ -556,8 +535,7 @@ public void readFrom(StreamInput in) throws IOException { requests.add(request); } } - refresh = in.readBoolean(); - blockUntilRefresh = in.readBoolean(); + refreshPolicy = RefreshPolicy.readFrom(in); timeout = TimeValue.readTimeValue(in); } @@ -576,8 +554,7 @@ public void writeTo(StreamOutput out) throws IOException { } request.writeTo(out); } - out.writeBoolean(refresh); - out.writeBoolean(blockUntilRefresh); + refreshPolicy.writeTo(out); timeout.writeTo(out); } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java index 031f0b506b29c..4f2b7aa702ecf 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java @@ -25,18 +25,19 @@ import org.elasticsearch.action.delete.DeleteRequestBuilder; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.IndexSettings; /** * A bulk request holds an ordered {@link IndexRequest}s and {@link DeleteRequest}s and allows to executes * it in a single batch. */ -public class BulkRequestBuilder extends ActionRequestBuilder { +public class BulkRequestBuilder extends ActionRequestBuilder + implements WriteRequestBuilder { public BulkRequestBuilder(ElasticsearchClient client, BulkAction action) { super(client, action, new BulkRequest()); @@ -117,16 +118,6 @@ public BulkRequestBuilder setConsistencyLevel(WriteConsistencyLevel consistencyL return this; } - /** - * Should a refresh be executed post this bulk operation causing the operations to - * be searchable. Note, heavy indexing should not set this to true. Defaults - * to false. - */ - public BulkRequestBuilder setRefresh(boolean refresh) { - request.refresh(refresh); - return this; - } - /** * A timeout to wait if the index operation can't be performed immediately. Defaults to 1m. */ @@ -143,16 +134,6 @@ public final BulkRequestBuilder setTimeout(String timeout) { return this; } - /** - * Should this request block until it has been made visible for search by a refresh? Unlike {@link #setRefresh(boolean)} this is quite - * safe to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} - * for the limit. A bulk request counts as one request on each shard that it touches. Defaults to false. - */ - public final BulkRequestBuilder setBlockUntilRefresh(boolean blockUntilRefresh) { - request.setBlockUntilRefresh(blockUntilRefresh); - return this; - } - /** * The number of actions currently in the bulk. */ diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index 41978114e3c11..321b7e2a8e505 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.support.replication.ReplicatedMutationRequest; +import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; @@ -31,17 +31,17 @@ /** * */ -public class BulkShardRequest extends ReplicatedMutationRequest { +public class BulkShardRequest extends ReplicatedWriteRequest { private BulkItemRequest[] items; public BulkShardRequest() { } - BulkShardRequest(BulkRequest bulkRequest, ShardId shardId, boolean refresh, BulkItemRequest[] items) { + BulkShardRequest(BulkRequest bulkRequest, ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) { super(shardId); this.items = items; - this.setRefresh(refresh); + setRefreshPolicy(refreshPolicy); } BulkItemRequest[] items() { @@ -89,8 +89,15 @@ public String toString() { // This is included in error messages so we'll try to make it somewhat user friendly. StringBuilder b = new StringBuilder("BulkShardRequest to ["); b.append(index).append("] containing [").append(items.length).append("] requests"); - if (isRefresh()) { + switch (getRefreshPolicy()) { + case IMMEDIATE: b.append(" and a refresh"); + break; + case WAIT_UNTIL: + b.append(" blocking until refresh"); + break; + case NONE: + break; } return b.toString(); } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java index b4372958cd5f9..3b8f0727b2909 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java @@ -20,8 +20,9 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.support.replication.ReplicatedMutationRequest; -import org.elasticsearch.action.support.replication.ReplicatedMutationResponse; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.replication.ReplicatedWriteResponse; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.IndexSettings; @@ -32,7 +33,7 @@ /** * */ -public class BulkShardResponse extends ReplicatedMutationResponse { +public class BulkShardResponse extends ReplicationResponse implements ReplicatedWriteResponse { private ShardId shardId; private BulkItemResponse[] responses; @@ -56,8 +57,8 @@ public BulkItemResponse[] getResponses() { /** * Each DocWriteResponse already has a location for whether or not it forced a refresh so we just set that information on the response. * - * Requests that set {@link ReplicatedMutationRequest#setRefresh(boolean)} to true should always set this to true. Requests that set - * {@link ReplicatedMutationRequest#setBlockUntilRefresh(boolean)} to true should only set this to true if they run out of refresh + * Requests that set {@link WriteRequest#setRefresh(boolean)} to true should always set this to true. Requests that set + * {@link WriteRequest#setBlockUntilRefresh(boolean)} to true should only set this to true if they run out of refresh * listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}). */ @Override diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index d097adfeaa005..4cbebd0739a1e 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -344,11 +344,10 @@ void executeBulk(Task task, final BulkRequest bulkRequest, final long startTimeN for (Map.Entry> entry : requestsByShard.entrySet()) { final ShardId shardId = entry.getKey(); final List requests = entry.getValue(); - BulkShardRequest bulkShardRequest = new BulkShardRequest(bulkRequest, shardId, bulkRequest.refresh(), + BulkShardRequest bulkShardRequest = new BulkShardRequest(bulkRequest, shardId, bulkRequest.getRefreshPolicy(), requests.toArray(new BulkItemRequest[requests.size()])); bulkShardRequest.consistencyLevel(bulkRequest.consistencyLevel()); bulkShardRequest.timeout(bulkRequest.timeout()); - bulkShardRequest.setBlockUntilRefresh(bulkRequest.shouldBlockUntilRefresh()); if (task != null) { bulkShardRequest.setParentTask(nodeId, task.getId()); } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index f469efaaf8988..35d670961d6b0 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -30,7 +30,7 @@ import org.elasticsearch.action.index.TransportIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.ReplicationRequest; -import org.elasticsearch.action.support.replication.TransportReplicatedMutationAction; +import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; @@ -68,7 +68,7 @@ /** * Performs the index operation. */ -public class TransportShardBulkAction extends TransportReplicatedMutationAction { +public class TransportShardBulkAction extends TransportWriteAction { private final static String OP_TYPE_UPDATE = "update"; private final static String OP_TYPE_DELETE = "delete"; @@ -107,7 +107,9 @@ protected boolean resolveIndex() { } @Override - protected WriteResult onPrimaryShard(IndexService indexService, IndexShard indexShard, BulkShardRequest request) { + protected WriteResult onPrimaryShard(BulkShardRequest request, IndexShard indexShard) throws Exception { + ShardId shardId = request.shardId(); + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); final IndexMetaData metaData = indexService.getIndexSettings().getIndexMetaData(); long[] preVersions = new long[request.items().length]; @@ -123,7 +125,8 @@ protected WriteResult onPrimaryShard(IndexService indexServic for (int i = 0; i < items.length; i++) { responses[i] = items[i].getPrimaryResponse(); } - return new WriteResult<>(new BulkShardResponse(request.shardId(), responses), location); + BulkShardResponse response = new BulkShardResponse(request.shardId(), responses); + return new WriteResult<>(response, location); } private Translog.Location handleItem(IndexMetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) { @@ -150,9 +153,9 @@ private Translog.Location index(IndexMetaData metaData, BulkShardRequest request preVersionTypes[requestIndex] = indexRequest.versionType(); try { WriteResult result = shardIndexOperation(request, indexRequest, metaData, indexShard, true); - location = locationToSync(location, result.location); + location = locationToSync(location, result.getLocation()); // add the response - IndexResponse indexResponse = result.response(); + IndexResponse indexResponse = result.getResponse(); setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(), indexResponse)); } catch (Throwable e) { // rethrow the failure if we are going to retry on primary and let parent failure to handle it @@ -193,8 +196,8 @@ private Translog.Location delete(BulkShardRequest request, IndexShard indexShard try { // add the response final WriteResult writeResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); - DeleteResponse deleteResponse = writeResult.response(); - location = locationToSync(location, writeResult.location); + DeleteResponse deleteResponse = writeResult.getResponse(); + location = locationToSync(location, writeResult.getLocation()); setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, deleteResponse)); } catch (Throwable e) { // rethrow the failure if we are going to retry on primary and let parent failure to handle it @@ -233,7 +236,7 @@ private Tuple update(IndexMetaData metaData, } if (updateResult.success()) { if (updateResult.writeResult != null) { - location = locationToSync(location, updateResult.writeResult.location); + location = locationToSync(location, updateResult.writeResult.getLocation()); } switch (updateResult.result.operation()) { case UPSERT: @@ -243,7 +246,7 @@ private Tuple update(IndexMetaData metaData, IndexRequest indexRequest = updateResult.request(); BytesReference indexSourceAsBytes = indexRequest.source(); // add the response - IndexResponse indexResponse = result.response(); + IndexResponse indexResponse = result.getResponse(); UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.isCreated()); if (updateRequest.fields() != null && updateRequest.fields().length > 0) { Tuple> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true); @@ -255,7 +258,7 @@ private Tuple update(IndexMetaData metaData, case DELETE: @SuppressWarnings("unchecked") WriteResult writeResult = updateResult.writeResult; - DeleteResponse response = writeResult.response(); + DeleteResponse response = writeResult.getResponse(); DeleteRequest deleteRequest = updateResult.request(); updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), false); updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null)); @@ -327,8 +330,8 @@ private void setResponse(BulkItemRequest request, BulkItemResponse response) { } } - private WriteResult shardIndexOperation(BulkShardRequest request, IndexRequest indexRequest, IndexMetaData metaData, - IndexShard indexShard, boolean processed) throws Throwable { + private WriteResult shardIndexOperation(BulkShardRequest request, IndexRequest indexRequest, IndexMetaData metaData, + IndexShard indexShard, boolean processed) throws Throwable { MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type()); if (!processed) { diff --git a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java index 7fe997073d85c..bdf09e3e532fd 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java +++ b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -21,7 +21,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocumentRequest; -import org.elasticsearch.action.support.replication.ReplicatedMutationRequest; +import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -43,7 +43,7 @@ * @see org.elasticsearch.client.Client#delete(DeleteRequest) * @see org.elasticsearch.client.Requests#deleteRequest(String) */ -public class DeleteRequest extends ReplicatedMutationRequest implements DocumentRequest { +public class DeleteRequest extends ReplicatedWriteRequest implements DocumentRequest { private String type; private String id; diff --git a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java index d87dc5bfd54f1..b9b0f95f8de90 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java @@ -19,16 +19,17 @@ package org.elasticsearch.action.delete; +import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.support.replication.ReplicationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; /** * A delete document action request builder. */ -public class DeleteRequestBuilder extends ReplicationRequestBuilder { +public class DeleteRequestBuilder extends ReplicationRequestBuilder + implements WriteRequestBuilder { public DeleteRequestBuilder(ElasticsearchClient client, DeleteAction action) { super(client, action, new DeleteRequest()); @@ -72,26 +73,6 @@ public DeleteRequestBuilder setRouting(String routing) { return this; } - /** - * Should a refresh be executed post this index operation causing the operation to - * be searchable. Note, heavy indexing should not set this to true. Defaults - * to false. - */ - public DeleteRequestBuilder setRefresh(boolean refresh) { - request.setRefresh(refresh); - return this; - } - - /** - * Should this request block until it has been made visible for search by a refresh? Unlike {@link #setRefresh(boolean)} this is quite - * safe to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} - * for the limit. A bulk request counts as one request on each shard that it touches. Defaults to false. - */ - public DeleteRequestBuilder setBlockUntilRefresh(boolean blockUntilRefresh) { - request.setBlockUntilRefresh(blockUntilRefresh); - return this; - } - /** * Sets the version, which will cause the delete operation to only be performed if a matching * version exists and no changes happened on the doc since then. diff --git a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 89ded46c0158e..beced23c338a8 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; -import org.elasticsearch.action.support.replication.TransportReplicatedMutationAction; +import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -36,7 +36,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; @@ -50,7 +49,7 @@ /** * Performs the delete operation. */ -public class TransportDeleteAction extends TransportReplicatedMutationAction { +public class TransportDeleteAction extends TransportWriteAction { private final AutoCreateIndex autoCreateIndex; private final TransportCreateIndexAction createIndexAction; @@ -119,7 +118,7 @@ protected DeleteResponse newResponseInstance() { } @Override - protected WriteResult onPrimaryShard(IndexService indexService, IndexShard indexShard, DeleteRequest request) { + protected WriteResult onPrimaryShard(DeleteRequest request, IndexShard indexShard) { return executeDeleteRequestOnPrimary(request, indexShard); } @@ -136,9 +135,8 @@ public static WriteResult executeDeleteRequestOnPrimary(DeleteRe request.version(delete.version()); assert request.versionType().validateVersionForWrites(request.version()); - return new WriteResult<>( - new DeleteResponse(indexShard.shardId(), request.type(), request.id(), delete.version(), delete.found()), - delete.getTranslogLocation()); + DeleteResponse response = new DeleteResponse(indexShard.shardId(), request.type(), request.id(), delete.version(), delete.found()); + return new WriteResult<>(response, delete.getTranslogLocation()); } public static Engine.Delete executeDeleteRequestOnReplica(DeleteRequest request, IndexShard indexShard) { diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java index ffd71eacd0b6a..bc1e631e559ff 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.TimestampParsingException; -import org.elasticsearch.action.support.replication.ReplicatedMutationRequest; +import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -67,7 +67,7 @@ * @see org.elasticsearch.client.Requests#indexRequest(String) * @see org.elasticsearch.client.Client#index(IndexRequest) */ -public class IndexRequest extends ReplicatedMutationRequest implements DocumentRequest { +public class IndexRequest extends ReplicatedWriteRequest implements DocumentRequest { /** * Operation type controls if the type of the index operation. diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index 7ee910973ca14..20587bf0ea99d 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.index; +import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.support.replication.ReplicationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; @@ -26,7 +27,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import java.util.Map; @@ -34,7 +34,8 @@ /** * An index document action request builder. */ -public class IndexRequestBuilder extends ReplicationRequestBuilder { +public class IndexRequestBuilder extends ReplicationRequestBuilder + implements WriteRequestBuilder { public IndexRequestBuilder(ElasticsearchClient client, IndexAction action) { super(client, action, new IndexRequest()); @@ -221,26 +222,6 @@ public IndexRequestBuilder setCreate(boolean create) { return this; } - /** - * Should a refresh be executed post this index operation causing the operation to - * be searchable. Note, heavy indexing should not set this to true. Defaults - * to false. - */ - public IndexRequestBuilder setRefresh(boolean refresh) { - request.setRefresh(refresh); - return this; - } - - /** - * Should this request block until it has been made visible for search by a refresh? Unlike {@link #setRefresh(boolean)} this is quite - * safe to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} - * for the limit. A bulk request counts as one request on each shard that it touches. Defaults to false. - */ - public IndexRequestBuilder setBlockUntilRefresh(boolean blockUntilRefresh) { - request.setBlockUntilRefresh(blockUntilRefresh); - return this; - } - /** * Sets the version, which will cause the index operation to only be performed if a matching * version exists and no changes happened on the doc since then. diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index b2f4cc7500609..6c46f78ba1184 100644 --- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.replication.ReplicationOperation; -import org.elasticsearch.action.support.replication.TransportReplicationAction; +import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; @@ -38,13 +38,11 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; @@ -62,7 +60,7 @@ *
  • allowIdGeneration: If the id is set not, should it be generated. Defaults to true. * */ -public class TransportIndexAction extends TransportReplicationAction { +public class TransportIndexAction extends TransportWriteAction { private final AutoCreateIndex autoCreateIndex; private final boolean allowIdGeneration; @@ -78,7 +76,7 @@ public TransportIndexAction(Settings settings, TransportService transportService ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, AutoCreateIndex autoCreateIndex) { super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, - actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexResponse::new, ThreadPool.Names.INDEX); + actionFilters, indexNameExpressionResolver, IndexRequest::new, ThreadPool.Names.INDEX); this.mappingUpdatedAction = mappingUpdatedAction; this.createIndexAction = createIndexAction; this.autoCreateIndex = autoCreateIndex; @@ -141,38 +139,8 @@ protected IndexResponse newResponseInstance() { } @Override - protected PrimaryResult shardOperationOnPrimary(IndexRequest request) throws Exception { - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(request.shardId().id()); - Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard); - Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); - final ShardId shardId = indexShard.shardId(); - if (update != null) { - mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update); - operation = prepareIndexOperationOnPrimary(request, indexShard); - update = operation.parsedDoc().dynamicMappingsUpdate(); - if (update != null) { - throw new ReplicationOperation.RetryOnPrimaryException(shardId, - "Dynamic mappings are not available on the node that holds the primary yet"); - } - } - final boolean created = indexShard.index(operation); - - // update the version on request so it will happen on the replicas - final long version = operation.version(); - request.version(version); - request.versionType(request.versionType().versionTypeForReplicationAndRecovery()); - - assert request.versionType().validateVersionForWrites(request.version()); - - final Location location = operation.getTranslogLocation(); - return new WritePrimaryResult( - request, - new IndexResponse(shardId, request.type(), request.id(), request.version(), created), - location, - indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null, - request.refreshPolicy(), indexShard - ); + protected WriteResult onPrimaryShard(IndexRequest request, IndexShard indexShard) throws Exception { + return executeIndexRequestOnPrimary(request, indexShard, mappingUpdatedAction); } @Override @@ -205,5 +173,31 @@ public static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, return indexShard.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType()); } + public static WriteResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, + MappingUpdatedAction mappingUpdatedAction) throws Exception { + Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard); + Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); + final ShardId shardId = indexShard.shardId(); + if (update != null) { + mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update); + operation = prepareIndexOperationOnPrimary(request, indexShard); + update = operation.parsedDoc().dynamicMappingsUpdate(); + if (update != null) { + throw new ReplicationOperation.RetryOnPrimaryException(shardId, + "Dynamic mappings are not available on the node that holds the primary yet"); + } + } + final boolean created = indexShard.index(operation); + + // update the version on request so it will happen on the replicas + final long version = operation.version(); + request.version(version); + request.versionType(request.versionType().versionTypeForReplicationAndRecovery()); + + assert request.versionType().validateVersionForWrites(request.version()); + + IndexResponse response = new IndexResponse(shardId, request.type(), request.id(), request.version(), created); + return new WriteResult<>(response, operation.getTranslogLocation()); + } } diff --git a/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java b/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java index 1eb9337c814b0..850cac040dd00 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java @@ -162,7 +162,7 @@ BulkRequest getBulkRequest() { return bulkRequest; } else { BulkRequest modifiedBulkRequest = new BulkRequest(); - modifiedBulkRequest.refresh(bulkRequest.refresh()); + modifiedBulkRequest.setRefreshPolicy(bulkRequest.getRefreshPolicy()); modifiedBulkRequest.consistencyLevel(bulkRequest.consistencyLevel()); modifiedBulkRequest.timeout(bulkRequest.timeout()); diff --git a/core/src/main/java/org/elasticsearch/action/support/WriteRequest.java b/core/src/main/java/org/elasticsearch/action/support/WriteRequest.java new file mode 100644 index 0000000000000..58cf38d68a82a --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/WriteRequest.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; + +/** + * Base class for requests that modify data in some shard like delete, index, and shardBulk. + */ +public interface WriteRequest> { + /** + * Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}), wait for a refresh ( + * {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}, the default). + */ + R setRefreshPolicy(RefreshPolicy refreshPolicy); + + /** + * Parse the refresh policy from a string, only modifying it if the string is non null. Convenient to use with request parsing. + */ + @SuppressWarnings("unchecked") + default R setRefreshPolicy(String refreshPolicy) { + if (refreshPolicy != null) { + setRefreshPolicy(RefreshPolicy.parse(refreshPolicy)); + } + return (R) this; + } + + /** + * Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}), wait for a refresh ( + * {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}, the default). + */ + RefreshPolicy getRefreshPolicy(); + + enum RefreshPolicy implements Writeable { + /** + * Don't refresh after this request. The default. + */ + NONE, + /** + * Force a refresh as part of this request. This refresh policy does not scale for high indexing or search throughput but is useful + * to present a consistent view to for indices with very low traffic. And it is wonderful for tests! + */ + IMMEDIATE, + /** + * Leave this request open until a refresh has made the contents of this request visible to search. This refresh policy is + * compatible with high indexing and search throughput but it causes the request to wait to reply until a refresh occurs. + */ + WAIT_UNTIL; + + /** + * Parse the string representation of a refresh policy, usually from a request parameter. + */ + public static RefreshPolicy parse(String string) { + switch (string) { + case "false": + return NONE; + case "true": + return IMMEDIATE; + case "wait_for": + return WAIT_UNTIL; + } + throw new IllegalArgumentException("Unknown value for refresh: [" + string + "]"); + } + + public static RefreshPolicy readFrom(StreamInput in) throws IOException { + return RefreshPolicy.values()[in.readByte()]; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeByte((byte) ordinal()); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/support/WriteRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/support/WriteRequestBuilder.java new file mode 100644 index 0000000000000..225d84560debb --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/WriteRequestBuilder.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; + +public interface WriteRequestBuilder> { + WriteRequest request(); + + /** + * Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}), wait for a refresh ( + * {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}, the default). + */ + @SuppressWarnings("unchecked") + default B setRefreshPolicy(RefreshPolicy refreshPolicy) { + request().setRefreshPolicy(refreshPolicy); + return (B) this; + } + + /** + * If set to true then this request will force an immediate refresh. Backwards compatibility layer for Elasticsearch's old + * {@code setRefresh} calls. + * + * @deprecated use setRefreshPolicy instead + */ + @Deprecated + @SuppressWarnings("unchecked") + default B setRefresh(boolean refresh) { + request().setRefreshPolicy(refresh ? RefreshPolicy.IMMEDIATE : RefreshPolicy.NONE); + return (B) this; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java similarity index 60% rename from core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java rename to core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java index bd3732b982aa6..4942eb348ec6c 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java @@ -19,65 +19,47 @@ package org.elasticsearch.action.support.replication; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -/** - * Base class for requests that modify data in some shard like delete, index, and shardBulk. - */ -public class ReplicatedMutationRequest> extends ReplicationRequest { - - public enum RefreshPolicy { - NONE, - IMMEDIATE, - WAIT_UNTIL - } - +public abstract class ReplicatedWriteRequest> extends ReplicationRequest implements WriteRequest { private RefreshPolicy refreshPolicy = RefreshPolicy.NONE; /** - * Create an empty request. + * Constructor for deserialization. */ - public ReplicatedMutationRequest() { + public ReplicatedWriteRequest() { } - /** - * Creates a new request with resolved shard id. - */ - public ReplicatedMutationRequest(ShardId shardId) { + public ReplicatedWriteRequest(ShardId shardId) { super(shardId); } - /** - * Should a refresh be executed post this index operation causing the operation to - * be searchable. Note, heavy indexing should not set this to true. Defaults - * to false. - */ + @Override @SuppressWarnings("unchecked") - public R setRefresh(RefreshPolicy refreshPolicy) { + public R setRefreshPolicy(RefreshPolicy refreshPolicy) { this.refreshPolicy = refreshPolicy; return (R) this; } - public RefreshPolicy refreshPolicy() { - + @Override + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; } - @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - refresh = in.readBoolean(); - blockUntilRefresh = in.readBoolean(); + refreshPolicy = RefreshPolicy.readFrom(in); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeBoolean(refresh); - out.writeBoolean(blockUntilRefresh); + refreshPolicy.writeTo(out); } } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationResponse.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteResponse.java similarity index 75% rename from core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationResponse.java rename to core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteResponse.java index 2c5d228848245..d89fe7963cef3 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedMutationResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteResponse.java @@ -19,17 +19,18 @@ package org.elasticsearch.action.support.replication; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.index.IndexSettings; /** - * Base class for responses that modify data in some shard like delete, index, and shardBulk. + * Interface for responses that modify data in some shard like delete, index, and shardBulk. */ -public abstract class ReplicatedMutationResponse extends ReplicationResponse { +public interface ReplicatedWriteResponse { /** * Mark the request with if it was forced to refresh the index. All implementations by default assume that the request didn't force a * refresh unless set otherwise so it mostly only makes sense to call this with {@code true}. Requests that set - * {@link ReplicatedMutationRequest#setRefresh(boolean)} to true should always set this to true. Requests that set - * {@link ReplicatedMutationRequest#setBlockUntilRefresh(boolean)} to true should only set this to true if they run out of refresh + * {@link WriteRequest#setRefresh(boolean)} to true should always set this to true. Requests that set + * {@link WriteRequest#setBlockUntilRefresh(boolean)} to true should only set this to true if they run out of refresh * listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}). */ public abstract void setForcedRefresh(boolean forcedRefresh); diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 490a8dc839cbb..f5781b75a60e7 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -46,9 +46,10 @@ import java.util.function.Supplier; public class ReplicationOperation< - Request extends ReplicationRequest, - ReplicaRequest extends ReplicationRequest, - PrimaryResultT extends ReplicationOperation.PrimaryResult> { + Request extends ReplicationRequest, + ReplicaRequest extends ReplicationRequest, + PrimaryResultT extends ReplicationOperation.PrimaryResult + > { final private ESLogger logger; final private Request request; final private Supplier clusterStateSupplier; @@ -294,10 +295,10 @@ public static boolean isConflictException(Throwable e) { interface Primary< - Request extends ReplicationRequest, - ReplicaRequest extends ReplicationRequest, - PrimaryResultT extends PrimaryResult - > { + Request extends ReplicationRequest, + ReplicaRequest extends ReplicationRequest, + PrimaryResultT extends PrimaryResult + > { /** * routing entry for this primary diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java deleted file mode 100644 index 10d194ac1351b..0000000000000 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicatedMutationAction.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.support.replication; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.replication.TransportReplicatedMutationAction.WriteResult; -import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportService; - -import java.util.function.Supplier; - -/** - * Base class for transport actions that modify data in some shard like index, delete, and shardBulk. - */ -public abstract class TransportReplicatedMutationAction< - Request extends ReplicatedMutationRequest, - Response extends ReplicatedMutationResponse - > extends TransportReplicationAction, Request, Response> { - - protected TransportReplicatedMutationAction(Settings settings, String actionName, TransportService transportService, - ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, - String executor) { - super(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, - indexNameExpressionResolver, request, request, executor); - } - - /** - * Called with a reference to the primary shard. - * - * @return the result of the write - basically just the response to send back and the translog location of the {@linkplain IndexShard} - * after the write was completed - */ - protected abstract WriteResult onPrimaryShard(IndexService indexService, IndexShard indexShard, Request request) - throws Exception; - - /** - * Called once per replica with a reference to the {@linkplain IndexShard} to modify. - * - * @return the translog location of the {@linkplain IndexShard} after the write was completed - */ - protected abstract Translog.Location onReplicaShard(Request request, IndexShard indexShard); - - @Override - protected Tuple> shardOperationOnPrimary(Request request) throws Exception { - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(request.shardId().id()); - WriteResult result = onPrimaryShard(indexService, indexShard, request); - processAfterWrite(request.isRefresh(), indexShard, result.location); - if (request.isRefresh()) { - // Only setForcedRefresh if it is true because this can touch every item in a bulk request - result.response.setForcedRefresh(true); - } - return new Tuple<>(request, result); - } - - @Override - protected void asyncShardOperationOnPrimary(WriteResult result, Request request, ActionListener listener) { - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(request.shardId().id()); - if (request.shouldBlockUntilRefresh() && false == request.isRefresh()) { - indexShard.addRefreshListener(result.location, forcedRefresh -> { - if (forcedRefresh) { - logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); - result.response.setForcedRefresh(true); - } - listener.onResponse(result.response); - }); - } else { - listener.onResponse(result.response); - } - } - - @Override - protected final void shardOperationOnReplica(Request request, ActionListener listener) { - final ShardId shardId = request.shardId(); - IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - IndexShard indexShard = indexService.getShard(shardId.id()); - Translog.Location location = onReplicaShard(request, indexShard); - processAfterWrite(request.isRefresh(), indexShard, location); - if (request.shouldBlockUntilRefresh() && false == request.isRefresh() && location != null) { - indexShard.addRefreshListener(location, forcedRefresh -> { - logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); - // TODO mark the response?!? - listener.onResponse(TransportResponse.Empty.INSTANCE); - }); - } else { - listener.onResponse(TransportResponse.Empty.INSTANCE); - } - } - - protected final void processAfterWrite(boolean refresh, IndexShard indexShard, Translog.Location location) { - if (refresh) { - try { - indexShard.refresh("refresh_flag_index"); - } catch (Throwable e) { - // ignore - } - } - if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null) { - indexShard.sync(location); - } - indexShard.maybeFlush(); - } - - protected static class WriteResult { - public final T response; - public final Translog.Location location; - - public WriteResult(T response, Translog.Location location) { - this.response = response; - this.location = location; - } - - public T response() { - // this sets total, pending and failed to 0 and this is ok, because we will embed this into the replica - // request and not use it - response.setShardInfo(new ReplicationResponse.ShardInfo()); - return response; - } - } -} diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index cab8690005bcd..4b650046252c0 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -51,7 +51,6 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.Task; @@ -158,15 +157,6 @@ protected void resolveRequest(MetaData metaData, IndexMetaData indexMetaData, Re */ protected abstract PrimaryResult shardOperationOnPrimary(Request shardRequest) throws Exception; -// /** -// * Asynchronous portion of primary operation on node with primary copy -// * -// * @param stash information saved from the synchronous phase of the operation for use in the async phase of the operation -// * @param shardRequest the request to the primary shard -// * @param listener implementers call this success or failure when the asynchronous operations are complete. -// */ -// protected abstract void asyncShardOperationOnPrimary(AsyncStash stash, Request shardRequest, ActionListener listener); - /** * Replica operation on nodes with replica copies. While this does take a listener it should not return until it has completed any * operations that it must take under the shard lock. The listener is for waiting for things like index to become visible in search. @@ -302,8 +292,8 @@ public void onFailure(Throwable e) { } protected ReplicationOperation createReplicatedOperation( - Request request, ActionListener listener, - PrimaryShardReference primaryShardReference, boolean executeOnReplicas) { + Request request, ActionListener listener, + PrimaryShardReference primaryShardReference, boolean executeOnReplicas) { return new ReplicationOperation<>(request, primaryShardReference, listener, executeOnReplicas, checkWriteConsistency(), replicasProxy, clusterService::state, logger, actionName ); @@ -365,58 +355,6 @@ public void respond(ActionListener listener) { } } - protected class WritePrimaryResult extends PrimaryResult { - boolean refreshNeeded; - boolean forcedRefresh; - ActionListener listener = null; - - public WritePrimaryResult(ReplicaRequest replicaRequest, Response finalResponse, - Translog.Location location, - boolean fsyncTranslog, - ReplicatedMutationRequest.RefreshPolicy refreshPolicy, - IndexShard indexShard) { - super(replicaRequest, finalResponse); - switch (refreshPolicy) { - case IMMEDIATE: - indexShard.refresh("bla"); - synchronized (this) { - forcedRefresh = true; - refreshNeeded = false; - } - break; - case WAIT_UNTIL: - refreshNeeded = true; - indexShard.addRefreshListener(location, forcedRefresh -> { - synchronized (WritePrimaryResult.this) { - WritePrimaryResult.this.forcedRefresh = forcedRefresh; - if (forcedRefresh) { - // TODO:: logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); - } - refreshNeeded = false; - respondIfNeeded(); - } - }); - - break; - } - if (fsyncTranslog) { - indexShard.sync(location); - } - } - - @Override - public void respond(ActionListener listener) { - this.listener = listener; - respondIfNeeded(); - } - - protected synchronized void respondIfNeeded() { - if (refreshNeeded == false && listener != null) { - super.respond(listener); - } - } - } - class ReplicaOperationTransportHandler implements TransportRequestHandler { @Override public void messageReceived(final ReplicaRequest request, final TransportChannel channel) throws Exception { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java new file mode 100644 index 0000000000000..e765fca3e4e01 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -0,0 +1,180 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.replication; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.Translog.Location; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportService; + +import java.util.function.Supplier; + +/** + * Base class for transport actions that modify data in some shard like index, delete, and shardBulk. + */ +public abstract class TransportWriteAction< + Request extends ReplicatedWriteRequest, + Response extends ReplicationResponse & ReplicatedWriteResponse + > extends TransportReplicationAction { + + protected TransportWriteAction(Settings settings, String actionName, TransportService transportService, + ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, + String executor) { + super(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, + indexNameExpressionResolver, request, request, executor); + } + + /** + * Called on the primary with a reference to the {@linkplain IndexShard} to modify. + */ + protected abstract WriteResult onPrimaryShard(Request request, IndexShard indexShard) throws Exception; + + /** + * Called once per replica with a reference to the {@linkplain IndexShard} to modify. + * + * @return the translog location of the {@linkplain IndexShard} after the write was completed + */ + protected abstract Translog.Location onReplicaShard(Request request, IndexShard indexShard); + + @Override + protected final WritePrimaryResult shardOperationOnPrimary(Request request) throws Exception { + final ShardId shardId = request.shardId(); + IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + IndexShard indexShard = indexService.getShard(shardId.id()); + WriteResult result = onPrimaryShard(request, indexShard); + return new WritePrimaryResult(request, result.getResponse(), result.getLocation(), indexShard); + } + + @Override + protected final void shardOperationOnReplica(Request request, ActionListener listener) { + final ShardId shardId = request.shardId(); + IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + IndexShard indexShard = indexService.getShard(shardId.id()); + Translog.Location location = onReplicaShard(request, indexShard); + // NOCOMMIT deduplicate with the WritePrimaryResult + boolean forked = false; + switch (request.getRefreshPolicy()) { + case IMMEDIATE: + indexShard.refresh("refresh_flag_index"); + break; + case WAIT_UNTIL: + forked = true; + indexShard.addRefreshListener(location, forcedRefresh -> { + if (forcedRefresh) { + logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + } + listener.onResponse(null); + }); + break; + case NONE: + break; + } + boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; + if (fsyncTranslog) { + indexShard.sync(location); + } + indexShard.maybeFlush(); + if (false == forked) { + listener.onResponse(null); + } + } + + public static class WriteResult { + private final Response response; + private final Translog.Location location; + + public WriteResult(Response response, Location location) { + this.response = response; + this.location = location; + } + + public Response getResponse() { + return response; + } + + public Translog.Location getLocation() { + return location; + } + } + + protected class WritePrimaryResult extends PrimaryResult { + boolean refreshNeeded; + volatile ActionListener listener = null; + + public WritePrimaryResult(Request request, Response finalResponse, + Translog.Location location, + IndexShard indexShard) { + super(request, finalResponse); + switch (request.getRefreshPolicy()) { + case IMMEDIATE: + indexShard.refresh("refresh_flag_index"); + finalResponse.setForcedRefresh(true); + synchronized (this) { + refreshNeeded = false; + } + break; + case WAIT_UNTIL: + refreshNeeded = true; + indexShard.addRefreshListener(location, forcedRefresh -> { + synchronized (WritePrimaryResult.this) { + if (forcedRefresh) { + finalResponse.setForcedRefresh(true); + logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + } + refreshNeeded = false; + respondIfNeeded(); + } + }); + break; + case NONE: + break; + } + boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; + if (fsyncTranslog) { + indexShard.sync(location); + } + indexShard.maybeFlush(); + } + + @Override + public void respond(ActionListener listener) { + this.listener = listener; + respondIfNeeded(); + } + + protected synchronized void respondIfNeeded() { + if (refreshNeeded == false && listener != null) { + super.respond(listener); + } + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index 1e33cc4c32faf..ca55a63c1d606 100644 --- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -176,7 +176,6 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< switch (result.operation()) { case UPSERT: IndexRequest upsertRequest = result.action(); - upsertRequest.setBlockUntilRefresh(request.shouldBlockUntilRefresh()); // we fetch it from the index request so we don't generate the bytes twice, its already done in the index request final BytesReference upsertSourceBytes = upsertRequest.source(); indexAction.execute(upsertRequest, new ActionListener() { @@ -215,7 +214,6 @@ protected void doRun() { break; case INDEX: IndexRequest indexRequest = result.action(); - indexRequest.setBlockUntilRefresh(request.shouldBlockUntilRefresh()); // we fetch it from the index request so we don't generate the bytes twice, its already done in the index request final BytesReference indexSourceBytes = indexRequest.source(); indexAction.execute(indexRequest, new ActionListener() { @@ -247,7 +245,6 @@ protected void doRun() { break; case DELETE: DeleteRequest deleteRequest = result.action(); - deleteRequest.setBlockUntilRefresh(request.shouldBlockUntilRefresh()); deleteAction.execute(deleteRequest, new ActionListener() { @Override public void onResponse(DeleteResponse response) { diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index fc8af371d3a95..0c9c1c67978a6 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -131,7 +131,7 @@ protected Result prepare(ShardId shardId, UpdateRequest request, final GetResult // it has to be a "create!" .create(true) .ttl(ttl) - .setRefresh(request.isRefresh()) + .setRefreshPolicy(request.getRefreshPolicy()) .routing(request.routing()) .parent(request.parent()) .consistencyLevel(request.consistencyLevel()); @@ -229,12 +229,13 @@ protected Result prepare(ShardId shardId, UpdateRequest request, final GetResult .version(updateVersion).versionType(request.versionType()) .consistencyLevel(request.consistencyLevel()) .timestamp(timestamp).ttl(ttl) - .setRefresh(request.isRefresh()); + .setRefreshPolicy(request.getRefreshPolicy()); return new Result(indexRequest, Operation.INDEX, updatedSourceAsMap, updateSourceContentType); } else if ("delete".equals(operation)) { DeleteRequest deleteRequest = Requests.deleteRequest(request.index()).type(request.type()).id(request.id()).routing(routing).parent(parent) .version(updateVersion).versionType(request.versionType()) - .consistencyLevel(request.consistencyLevel()); + .consistencyLevel(request.consistencyLevel()) + .setRefreshPolicy(request.getRefreshPolicy()); return new Result(deleteRequest, Operation.DELETE, updatedSourceAsMap, updateSourceContentType); } else if ("none".equals(operation)) { UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(), getResult.getVersion(), false); diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index e1a33b34fb63b..e0846c1ce5dcb 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; @@ -35,7 +36,6 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.Script; @@ -54,7 +54,8 @@ /** */ -public class UpdateRequest extends InstanceShardOperationRequest implements DocumentRequest { +public class UpdateRequest extends InstanceShardOperationRequest + implements DocumentRequest, WriteRequest { private String type; private String id; @@ -73,7 +74,7 @@ public class UpdateRequest extends InstanceShardOperationRequest private VersionType versionType = VersionType.INTERNAL; private int retryOnConflict = 0; - private boolean refresh = false; + private RefreshPolicy refreshPolicy = RefreshPolicy.NONE; private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT; @@ -86,12 +87,6 @@ public class UpdateRequest extends InstanceShardOperationRequest @Nullable private IndexRequest doc; - /** - * Should this request block until all of its results are visible for search? - */ - private boolean blockUntilRefresh = false; - - public UpdateRequest() { } @@ -429,18 +424,15 @@ public VersionType versionType() { return this.versionType; } - /** - * Should a refresh be executed post this update operation causing the operation to - * be searchable. Note, heavy indexing should not set this to true. Defaults - * to false. - */ - public UpdateRequest setRefresh(boolean refresh) { - this.refresh = refresh; + @Override + public UpdateRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; return this; } - public boolean isRefresh() { - return this.refresh; + @Override + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; } public WriteConsistencyLevel consistencyLevel() { @@ -725,20 +717,6 @@ public UpdateRequest scriptedUpsert(boolean scriptedUpsert) { return this; } - /** - * Should this request block until it has been made visible for search by a refresh? Unlike {@link #setRefresh(boolean)} this is quite safe - * to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} for - * the limit. Defaults to false. - */ - public UpdateRequest setBlockUntilRefresh(boolean blockUntilRefresh) { - this.blockUntilRefresh = blockUntilRefresh; - return this; - } - - public boolean shouldBlockUntilRefresh() { - return blockUntilRefresh; - } - @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -751,7 +729,7 @@ public void readFrom(StreamInput in) throws IOException { script = new Script(in); } retryOnConflict = in.readVInt(); - refresh = in.readBoolean(); + refreshPolicy = RefreshPolicy.readFrom(in); if (in.readBoolean()) { doc = new IndexRequest(); doc.readFrom(in); @@ -772,7 +750,6 @@ public void readFrom(StreamInput in) throws IOException { versionType = VersionType.fromValue(in.readByte()); detectNoop = in.readBoolean(); scriptedUpsert = in.readBoolean(); - blockUntilRefresh = in.readBoolean(); } @Override @@ -789,7 +766,7 @@ public void writeTo(StreamOutput out) throws IOException { script.writeTo(out); } out.writeVInt(retryOnConflict); - out.writeBoolean(refresh); + refreshPolicy.writeTo(out); if (doc == null) { out.writeBoolean(false); } else { @@ -823,7 +800,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeByte(versionType.getValue()); out.writeBoolean(detectNoop); out.writeBoolean(scriptedUpsert); - out.writeBoolean(blockUntilRefresh); } } diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java index cecb3ffd65d98..403f4265fcdf4 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java @@ -21,21 +21,20 @@ import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.script.Script; import java.util.Map; -/** - */ -public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder { +public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder + implements WriteRequestBuilder { public UpdateRequestBuilder(ElasticsearchClient client, UpdateAction action) { super(client, action, new UpdateRequest()); @@ -122,17 +121,6 @@ public UpdateRequestBuilder setVersionType(VersionType versionType) { return this; } - - /** - * Should a refresh be executed post this update operation causing the operation to - * be searchable. Note, heavy indexing should not set this to true. Defaults - * to false. - */ - public UpdateRequestBuilder setRefresh(boolean refresh) { - request.setRefresh(refresh); - return this; - } - /** * Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT} */ @@ -326,16 +314,6 @@ public UpdateRequestBuilder setScriptedUpsert(boolean scriptedUpsert) { return this; } - /** - * Should this request block until it has been made visible for search by a refresh? Unlike {@link #setRefresh(boolean)} this is quite - * safe to use under heavy indexing so long as few total operations use it. See {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD} - * for the limit. Defaults to false. - */ - public UpdateRequestBuilder setBlockUntilRefresh(boolean blockUntilRefresh) { - request.setBlockUntilRefresh(blockUntilRefresh); - return this; - } - /** * Set the new ttl of the document as a long. Note that if detectNoop is true (the default) * and the source of the document isn't changed then the ttl update won't take diff --git a/core/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java b/core/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java index ccd64eeccd641..d9dbb21e80445 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java @@ -84,8 +84,7 @@ public void handleRequest(final RestRequest request, final RestChannel channel, bulkRequest.consistencyLevel(WriteConsistencyLevel.fromString(consistencyLevel)); } bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT)); - bulkRequest.refresh(request.paramAsBoolean("refresh", bulkRequest.refresh())); - bulkRequest.setBlockUntilRefresh(request.paramAsBoolean("block_until_refresh", bulkRequest.shouldBlockUntilRefresh())); + bulkRequest.setRefreshPolicy(request.param("refresh")); bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields, defaultPipeline, null, allowExplicitIndex); client.bulk(bulkRequest, new RestBuilderListener(channel) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java b/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java index 5273713f14a84..29316893504fe 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java @@ -51,8 +51,7 @@ public void handleRequest(final RestRequest request, final RestChannel channel, deleteRequest.routing(request.param("routing")); deleteRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing deleteRequest.timeout(request.paramAsTime("timeout", DeleteRequest.DEFAULT_TIMEOUT)); - deleteRequest.setRefresh(request.paramAsBoolean("refresh", deleteRequest.isRefresh())); - deleteRequest.setBlockUntilRefresh(request.paramAsBoolean("block_until_refresh", deleteRequest.shouldBlockUntilRefresh())); + deleteRequest.setRefreshPolicy(request.param("refresh")); deleteRequest.version(RestActions.parseVersion(request)); deleteRequest.versionType(VersionType.fromString(request.param("version_type"), deleteRequest.versionType())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java index 887e170a27459..f807e68088a2a 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java @@ -80,8 +80,7 @@ public void handleRequest(final RestRequest request, final RestChannel channel, indexRequest.setPipeline(request.param("pipeline")); indexRequest.source(request.content()); indexRequest.timeout(request.paramAsTime("timeout", IndexRequest.DEFAULT_TIMEOUT)); - indexRequest.setRefresh(request.paramAsBoolean("refresh", indexRequest.isRefresh())); - indexRequest.setBlockUntilRefresh(request.paramAsBoolean("block_until_refresh", indexRequest.shouldBlockUntilRefresh())); + indexRequest.setRefreshPolicy(request.param("refresh")); indexRequest.version(RestActions.parseVersion(request)); indexRequest.versionType(VersionType.fromString(request.param("version_type"), indexRequest.versionType())); String sOpType = request.param("op_type"); diff --git a/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java b/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java index 7e55c6f0341ae..bdea4e33e6d30 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java @@ -58,8 +58,7 @@ public void handleRequest(final RestRequest request, final RestChannel channel, updateRequest.routing(request.param("routing")); updateRequest.parent(request.param("parent")); updateRequest.timeout(request.paramAsTime("timeout", updateRequest.timeout())); - updateRequest.setRefresh(request.paramAsBoolean("refresh", updateRequest.isRefresh())); - updateRequest.setBlockUntilRefresh(request.paramAsBoolean("block_until_refresh", updateRequest.shouldBlockUntilRefresh())); + updateRequest.setRefreshPolicy(request.param("refresh")); String consistencyLevel = request.param("consistency"); if (consistencyLevel != null) { updateRequest.consistencyLevel(WriteConsistencyLevel.fromString(consistencyLevel)); diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java index 425d5ef975a9c..a8ad7aaa62d3b 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.Requests; import org.elasticsearch.common.Strings; @@ -180,22 +181,22 @@ public void testSimpleBulk10() throws Exception { public void testBulkRequestWithRefresh() throws Exception { BulkRequest bulkRequest = new BulkRequest(); // We force here a "id is missing" validation error - bulkRequest.add(new DeleteRequest("index", "type", null).setRefresh(true)); + bulkRequest.add(new DeleteRequest("index", "type", null).setRefreshPolicy(RefreshPolicy.IMMEDIATE)); // We force here a "type is missing" validation error bulkRequest.add(new DeleteRequest("index", null, "id")); - bulkRequest.add(new DeleteRequest("index", "type", "id").setRefresh(true)); - bulkRequest.add(new UpdateRequest("index", "type", "id").doc("{}").setRefresh(true)); - bulkRequest.add(new IndexRequest("index", "type", "id").source("{}").setRefresh(true)); + bulkRequest.add(new DeleteRequest("index", "type", "id").setRefreshPolicy(RefreshPolicy.IMMEDIATE)); + bulkRequest.add(new UpdateRequest("index", "type", "id").doc("{}").setRefreshPolicy(RefreshPolicy.IMMEDIATE)); + bulkRequest.add(new IndexRequest("index", "type", "id").source("{}").setRefreshPolicy(RefreshPolicy.IMMEDIATE)); ActionRequestValidationException validate = bulkRequest.validate(); assertThat(validate, notNullValue()); assertThat(validate.validationErrors(), not(empty())); assertThat(validate.validationErrors(), contains( - "Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.", + "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", "id is missing", "type is missing", - "Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.", - "Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.", - "Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.")); + "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", + "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", + "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.")); } // issue 15120 diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkShardRequestTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkShardRequestTests.java index ff1a24d690014..b26d2531ff0f3 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkShardRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkShardRequestTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.bulk; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; @@ -28,9 +29,11 @@ public class BulkShardRequestTests extends ESTestCase { public void testToString() { String index = randomSimpleString(random(), 10); int count = between(1, 100); - BulkShardRequest r = new BulkShardRequest(null, new ShardId(index, "ignored", 0), false, new BulkItemRequest[count]); + BulkShardRequest r = new BulkShardRequest(null, new ShardId(index, "ignored", 0), RefreshPolicy.NONE, new BulkItemRequest[count]); assertEquals("BulkShardRequest to [" + index + "] containing [" + count + "] requests", r.toString()); - r = new BulkShardRequest(null, new ShardId(index, "ignored", 0), true, new BulkItemRequest[count]); + r = new BulkShardRequest(null, new ShardId(index, "ignored", 0), RefreshPolicy.IMMEDIATE, new BulkItemRequest[count]); assertEquals("BulkShardRequest to [" + index + "] containing [" + count + "] requests and a refresh", r.toString()); + r = new BulkShardRequest(null, new ShardId(index, "ignored", 0), RefreshPolicy.WAIT_UNTIL, new BulkItemRequest[count]); + assertEquals("BulkShardRequest to [" + index + "] containing [" + count + "] requests blocking until refresh", r.toString()); } } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index 791f5e1fb0e13..55e2a9d3cf28b 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -24,12 +24,12 @@ import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; @@ -101,7 +101,7 @@ public void testReplication() throws Exception { } Request request = new Request(shardId); - PlainActionFuture listener = new PlainActionFuture<>(); + PlainActionFuture listener = new PlainActionFuture<>(); final ClusterState finalState = state; final TestReplicaProxy replicasProxy = new TestReplicaProxy(expectedFailures); final TestReplicationOperation op = new TestReplicationOperation(request, @@ -113,7 +113,7 @@ public void testReplication() throws Exception { assertThat(request.processedOnReplicas, equalTo(expectedReplicas)); assertThat(replicasProxy.failedReplicas, equalTo(expectedFailedShards)); assertTrue("listener is not marked as done", listener.isDone()); - Response.ShardInfo shardInfo = listener.actionGet().getShardInfo(); + ShardInfo shardInfo = listener.actionGet().getShardInfo(); assertThat(shardInfo.getFailed(), equalTo(expectedFailedShards.size())); assertThat(shardInfo.getFailures(), arrayWithSize(expectedFailedShards.size())); assertThat(shardInfo.getSuccessful(), equalTo(1 + expectedReplicas.size() - expectedFailures.size())); @@ -134,7 +134,7 @@ public void testReplicationWithShadowIndex() throws Exception { final ShardRouting primaryShard = indexShardRoutingTable.primaryShard(); Request request = new Request(shardId); - PlainActionFuture listener = new PlainActionFuture<>(); + PlainActionFuture listener = new PlainActionFuture<>(); final TestReplicationOperation op = new TestReplicationOperation(request, new TestPrimary(primaryShard, primaryTerm), listener, false, false, new TestReplicaProxy(), () -> state, logger, "test"); @@ -142,7 +142,7 @@ public void testReplicationWithShadowIndex() throws Exception { assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); assertThat(request.processedOnReplicas, equalTo(Collections.emptySet())); assertTrue("listener is not marked as done", listener.isDone()); - Response.ShardInfo shardInfo = listener.actionGet().getShardInfo(); + ShardInfo shardInfo = listener.actionGet().getShardInfo(); assertThat(shardInfo.getFailed(), equalTo(0)); assertThat(shardInfo.getFailures(), arrayWithSize(0)); assertThat(shardInfo.getSuccessful(), equalTo(1)); @@ -171,7 +171,7 @@ public void testDemotedPrimary() throws Exception { expectedFailures.put(failedReplica, new CorruptIndexException("simulated", (String) null)); Request request = new Request(shardId); - PlainActionFuture listener = new PlainActionFuture<>(); + PlainActionFuture listener = new PlainActionFuture<>(); final ClusterState finalState = state; final TestReplicaProxy replicasProxy = new TestReplicaProxy(expectedFailures) { @Override @@ -232,8 +232,8 @@ private void testClusterStateChangeAfterPrimaryOperation(final ShardId shardId, final ShardRouting primaryShard = state.get().routingTable().shardRoutingTable(shardId).primaryShard(); final TestPrimary primary = new TestPrimary(primaryShard, primaryTerm) { @Override - public Tuple perform(Request request) throws Exception { - Tuple result = super.perform(request); + public Result perform(Request request) throws Exception { + Result result = super.perform(request); state.set(changedState); logger.debug("--> state after primary operation:\n{}", state.get().prettyPrint()); return result; @@ -241,7 +241,7 @@ public Tuple perform(Request request) throws Exception { }; Request request = new Request(shardId); - PlainActionFuture listener = new PlainActionFuture<>(); + PlainActionFuture listener = new PlainActionFuture<>(); final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, new TestReplicaProxy(), state::get); op.execute(); @@ -295,7 +295,7 @@ public void testWriteConsistency() throws Exception { state.prettyPrint()); final long primaryTerm = state.metaData().index(index).primaryTerm(shardId.id()); final IndexShardRoutingTable shardRoutingTable = state.routingTable().index(index).shard(shardId.id()); - PlainActionFuture listener = new PlainActionFuture<>(); + PlainActionFuture listener = new PlainActionFuture<>(); final ShardRouting primaryShard = shardRoutingTable.primaryShard(); final TestReplicationOperation op = new TestReplicationOperation(request, new TestPrimary(primaryShard, primaryTerm), @@ -361,18 +361,13 @@ public void readFrom(StreamInput in) throws IOException { } } - static class Response extends ReplicationResponse { - } - - static class TestPrimary implements ReplicationOperation.Primary { + static class TestPrimary implements ReplicationOperation.Primary { final ShardRouting routing; final long term; - final String stash; TestPrimary(ShardRouting routing, long term) { this.routing = routing; this.term = term; - stash = randomAsciiOfLength(5); } @Override @@ -386,18 +381,35 @@ public void failShard(String message, Throwable throwable) { } @Override - public Tuple perform(Request request) throws Exception { + public Result perform(Request request) throws Exception { if (request.processedOnPrimary.compareAndSet(false, true) == false) { fail("processed [" + request + "] twice"); } request.primaryTerm(term); - return new Tuple<>(request, stash); + return new Result(request); } - @Override - public void performAsync(String stash, Request request, ActionListener listener) throws Exception { - assertEquals(this.stash, stash); - listener.onResponse(new Response()); + static class Result implements ReplicationOperation.PrimaryResult { + private final Request request; + private ShardInfo shardInfo; + + public Result(Request request) { + this.request = request; + } + + @Override + public Request replicaRequest() { + return request; + } + + @Override + public void setShardInfo(ShardInfo shardInfo) { + this.shardInfo = shardInfo; + } + + public ShardInfo getShardInfo() { + return shardInfo; + } } } @@ -443,15 +455,15 @@ public void failShard(ShardRouting replica, ShardRouting primary, String message } } - class TestReplicationOperation extends ReplicationOperation { - public TestReplicationOperation(Request request, Primary primary, - ActionListener listener, Replicas replicas, Supplier clusterStateSupplier) { + class TestReplicationOperation extends ReplicationOperation { + public TestReplicationOperation(Request request, Primary primary, + ActionListener listener, Replicas replicas, Supplier clusterStateSupplier) { this(request, primary, listener, true, false, replicas, clusterStateSupplier, ReplicationOperationTests.this.logger, "test"); } - public TestReplicationOperation(Request request, Primary primary, - ActionListener listener, boolean executeOnReplicas, boolean checkWriteConsistency, Replicas replicas, - Supplier clusterStateSupplier, ESLogger logger, String opType) { + public TestReplicationOperation(Request request, Primary primary, + ActionListener listener, boolean executeOnReplicas, boolean checkWriteConsistency, + Replicas replicas, Supplier clusterStateSupplier, ESLogger logger, String opType) { super(request, primary, listener, executeOnReplicas, checkWriteConsistency, replicas, clusterStateSupplier, logger, opType); } } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index ebfa12f3f6c58..08c01e718c6e1 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -42,7 +42,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lease.Releasable; @@ -154,7 +153,7 @@ public void testBlocks() throws ExecutionException, InterruptedException { ClusterBlocks.Builder block = ClusterBlocks.builder() .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block)); - TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); + Action.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); reroutePhase.run(); assertListenerThrows("primary phase should fail operation", listener, ClusterBlockException.class); assertPhase(task, "failed"); @@ -198,7 +197,7 @@ public void testNotStartedPrimary() throws InterruptedException, ExecutionExcept Request request = new Request(shardId).timeout("1ms"); PlainActionFuture listener = new PlainActionFuture<>(); - TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); + Action.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); reroutePhase.run(); assertListenerThrows("unassigned primary didn't cause a timeout", listener, UnavailableShardsException.class); assertPhase(task, "failed"); @@ -244,7 +243,7 @@ public void testNoRerouteOnStaleClusterState() throws InterruptedException, Exec Request request = new Request(shardId).timeout("1ms").routedBasedOnClusterVersion(clusterService.state().version() + 1); PlainActionFuture listener = new PlainActionFuture<>(); - TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(null, request, listener); + Action.ReroutePhase reroutePhase = action.new ReroutePhase(null, request, listener); reroutePhase.run(); assertListenerThrows("cluster state too old didn't cause a timeout", listener, UnavailableShardsException.class); @@ -284,7 +283,7 @@ public void testUnknownIndexOrShardOnReroute() throws InterruptedException { PlainActionFuture listener = new PlainActionFuture<>(); ReplicationTask task = maybeTask(); - TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); + Action.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); reroutePhase.run(); assertListenerThrows("must throw index not found exception", listener, IndexNotFoundException.class); assertPhase(task, "failed"); @@ -311,7 +310,7 @@ public void testStalePrimaryShardOnReroute() throws InterruptedException { PlainActionFuture listener = new PlainActionFuture<>(); ReplicationTask task = maybeTask(); - TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); + Action.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); reroutePhase.run(); CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); assertThat(capturedRequests, arrayWithSize(1)); @@ -363,7 +362,7 @@ public void testRoutePhaseExecutesRequest() { Request request = new Request(shardId); PlainActionFuture listener = new PlainActionFuture<>(); - TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); + Action.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); reroutePhase.run(); assertThat(request.shardId(), equalTo(shardId)); logger.info("--> primary is assigned to [{}], checking request forwarded", primaryNodeId); @@ -392,9 +391,9 @@ public void testPrimaryPhaseExecutesOrDelegatesRequestToRelocationTarget() throw AtomicBoolean executed = new AtomicBoolean(); Action.PrimaryOperationTransportHandler primaryPhase = action.new PrimaryOperationTransportHandler() { @Override - protected ReplicationOperation createReplicatedOperation(Request request, ActionListener actionListener, - Action.PrimaryShardReference primaryShardReference, - boolean executeOnReplicas) { + protected ReplicationOperation createReplicatedOperation(Request request, + ActionListener actionListener, Action.PrimaryShardReference primaryShardReference, + boolean executeOnReplicas) { return new NoopReplicationOperation(request, actionListener) { public void execute() throws Exception { assertPhase(task, "primary"); @@ -447,9 +446,9 @@ public void testPrimaryPhaseExecutesDelegatedRequestOnRelocationTarget() throws AtomicBoolean executed = new AtomicBoolean(); Action.PrimaryOperationTransportHandler primaryPhase = action.new PrimaryOperationTransportHandler() { @Override - protected ReplicationOperation createReplicatedOperation(Request request, ActionListener actionListener, - Action.PrimaryShardReference primaryShardReference, - boolean executeOnReplicas) { + protected ReplicationOperation createReplicatedOperation(Request request, + ActionListener actionListener, Action.PrimaryShardReference primaryShardReference, + boolean executeOnReplicas) { return new NoopReplicationOperation(request, actionListener) { public void execute() throws Exception { assertPhase(task, "primary"); @@ -477,7 +476,7 @@ public void testPrimaryReference() throws Exception { }; Action.PrimaryShardReference primary = action.new PrimaryShardReference(shard, releasable); final Request request = new Request(); - Request replicaRequest = primary.perform(request).v1(); + Request replicaRequest = primary.perform(request).replicaRequest; assertThat(replicaRequest.primaryTerm(), equalTo(primaryTerm)); @@ -582,9 +581,9 @@ public void testShadowIndexDisablesReplication() throws Exception { setState(clusterService, state); Action.PrimaryOperationTransportHandler primaryPhase = action.new PrimaryOperationTransportHandler() { @Override - protected ReplicationOperation createReplicatedOperation(Request request, ActionListener actionListener, - Action.PrimaryShardReference primaryShardReference, - boolean executeOnReplicas) { + protected ReplicationOperation createReplicatedOperation(Request request, + ActionListener actionListener, Action.PrimaryShardReference primaryShardReference, + boolean executeOnReplicas) { assertFalse(executeOnReplicas); return new NoopReplicationOperation(request, actionListener); } @@ -609,9 +608,9 @@ public void testCounterOnPrimary() throws Exception { Action.PrimaryOperationTransportHandler primaryPhase = action.new PrimaryOperationTransportHandler() { @Override - protected ReplicationOperation createReplicatedOperation(Request request, ActionListener listener, - Action.PrimaryShardReference primaryShardReference, - boolean executeOnReplicas) { + protected ReplicationOperation createReplicatedOperation(Request request, + ActionListener listener, Action.PrimaryShardReference primaryShardReference, + boolean executeOnReplicas) { assertIndexShardCounter(1); if (throwExceptionOnCreation) { throw new ElasticsearchException("simulated exception, during createReplicatedOperation"); @@ -749,7 +748,7 @@ public void readFrom(StreamInput in) throws IOException { static class Response extends ReplicationResponse { } - class Action extends TransportReplicationAction { + class Action extends TransportReplicationAction { Action(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, @@ -766,15 +765,10 @@ protected Response newResponseInstance() { } @Override - protected Tuple shardOperationOnPrimary(Request shardRequest) throws Exception { + protected PrimaryResult shardOperationOnPrimary(Request shardRequest) throws Exception { boolean executedBefore = shardRequest.processedOnPrimary.getAndSet(true); assert executedBefore == false : "request has already been executed on the primary"; - return new Tuple<>(shardRequest, null); - } - - @Override - protected void asyncShardOperationOnPrimary(Void stash, Request shardRequest, ActionListener listener) { - listener.onResponse(new Response()); + return new PrimaryResult(shardRequest, new Response()); } @Override @@ -829,15 +823,14 @@ protected Releasable acquireReplicaOperationLock(ShardId shardId, long primaryTe } } - class NoopReplicationOperation extends ReplicationOperation { - - public NoopReplicationOperation(Request request, ActionListener listener) { + class NoopReplicationOperation extends ReplicationOperation { + public NoopReplicationOperation(Request request, ActionListener listener) { super(request, null, listener, true, true, null, null, TransportReplicationActionTests.this.logger, "noop"); } @Override public void execute() throws Exception { - this.resultListener.onResponse(new Response()); + this.resultListener.onResponse(action.new PrimaryResult(null, new Response())); } } diff --git a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java index 2c10631e6aa59..724cd9860a13a 100644 --- a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -29,6 +29,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasAction; import org.elasticsearch.cluster.metadata.AliasMetaData; @@ -141,7 +142,7 @@ public void testFilteringAliases() throws Exception { ensureGreen(); logger.info("--> aliasing index [test] with [alias1] and filter [user:kimchy]"); - QueryBuilder filter = termQuery("user", "kimchy"); + QueryBuilder filter = termQuery("user", "kimchy"); assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1", filter)); // For now just making sure that filter was stored with the alias @@ -175,10 +176,15 @@ public void testSearchingFilteringAliasesSingleIndex() throws Exception { assertAcked(admin().indices().prepareAliases().addAlias("test", "tests", termQuery("name", "test"))); logger.info("--> indexing against [test]"); - client().index(indexRequest("test").type("type1").id("1").source(source("1", "foo test")).setRefresh(true)).actionGet(); - client().index(indexRequest("test").type("type1").id("2").source(source("2", "bar test")).setRefresh(true)).actionGet(); - client().index(indexRequest("test").type("type1").id("3").source(source("3", "baz test")).setRefresh(true)).actionGet(); - client().index(indexRequest("test").type("type1").id("4").source(source("4", "something else")).setRefresh(true)).actionGet(); + client().index(indexRequest("test").type("type1").id("1").source(source("1", "foo test")).setRefreshPolicy(RefreshPolicy.IMMEDIATE)) + .actionGet(); + client().index(indexRequest("test").type("type1").id("2").source(source("2", "bar test")).setRefreshPolicy(RefreshPolicy.IMMEDIATE)) + .actionGet(); + client().index(indexRequest("test").type("type1").id("3").source(source("3", "baz test")).setRefreshPolicy(RefreshPolicy.IMMEDIATE)) + .actionGet(); + client().index( + indexRequest("test").type("type1").id("4").source(source("4", "something else")).setRefreshPolicy(RefreshPolicy.IMMEDIATE)) + .actionGet(); logger.info("--> checking single filtering alias search"); SearchResponse searchResponse = client().prepareSearch("foos").setQuery(QueryBuilders.matchAllQuery()).get(); diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index a6bafb912a29b..2b8a04b4d1e5e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -22,6 +22,7 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -225,7 +226,7 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).get(0).state(), equalTo(ShardRoutingState.STARTED)); - client().prepareIndex("test", "type", "1").setSource("field", "value").setRefresh(true).execute().actionGet(); + client().prepareIndex("test", "type", "1").setSource("field", "value").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); final Index index = resolveIndex("test"); logger.info("--> closing all nodes"); diff --git a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java index 8df1bbb1c43ce..92a475421182e 100644 --- a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java +++ b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; @@ -51,7 +52,8 @@ */ public class BlockUntilRefreshIT extends ESIntegTestCase { public void testIndex() { - IndexResponse index = client().prepareIndex("test", "index", "1").setSource("foo", "bar").setBlockUntilRefresh(true).get(); + IndexResponse index = client().prepareIndex("test", "index", "1").setSource("foo", "bar").setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) + .get(); assertEquals(RestStatus.CREATED, index.status()); assertFalse("request shouldn't have forced a refresh", index.forcedRefresh()); assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); @@ -63,7 +65,7 @@ public void testDelete() throws InterruptedException, ExecutionException { assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); // Now delete with blockUntilRefresh - DeleteResponse delete = client().prepareDelete("test", "test", "1").setBlockUntilRefresh(true).get(); + DeleteResponse delete = client().prepareDelete("test", "test", "1").setRefreshPolicy(RefreshPolicy.WAIT_UNTIL).get(); assertTrue("document was deleted", delete.isFound()); assertFalse("request shouldn't have forced a refresh", delete.forcedRefresh()); assertNoSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get()); @@ -75,20 +77,22 @@ public void testUpdate() throws InterruptedException, ExecutionException { assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); // Update with block_until_refresh - UpdateResponse update = client().prepareUpdate("test", "test", "1").setDoc("foo", "baz").setBlockUntilRefresh(true).get(); + UpdateResponse update = client().prepareUpdate("test", "test", "1").setDoc("foo", "baz").setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) + .get(); assertEquals(2, update.getVersion()); assertFalse("request shouldn't have forced a refresh", update.forcedRefresh()); assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "baz")).get(), "1"); // Upsert with block_until_refresh - update = client().prepareUpdate("test", "test", "2").setDocAsUpsert(true).setDoc("foo", "cat").setBlockUntilRefresh(true).get(); + update = client().prepareUpdate("test", "test", "2").setDocAsUpsert(true).setDoc("foo", "cat") + .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL).get(); assertEquals(1, update.getVersion()); assertFalse("request shouldn't have forced a refresh", update.forcedRefresh()); assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "cat")).get(), "2"); // Update-becomes-delete with block_until_refresh update = client().prepareUpdate("test", "test", "2").setScript(new Script("delete_plz", ScriptType.INLINE, "native", emptyMap())) - .setBlockUntilRefresh(true).get(); + .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL).get(); assertEquals(2, update.getVersion()); assertFalse("request shouldn't have forced a refresh", update.forcedRefresh()); assertNoSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "cat")).get()); @@ -96,19 +100,19 @@ public void testUpdate() throws InterruptedException, ExecutionException { public void testBulk() { // Index by bulk with block_until_refresh - BulkRequestBuilder bulk = client().prepareBulk().setBlockUntilRefresh(true); + BulkRequestBuilder bulk = client().prepareBulk().setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); bulk.add(client().prepareIndex("test", "test", "1").setSource("foo", "bar")); assertBulkSuccess(bulk.get()); assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); // Update by bulk with block_until_refresh - bulk = client().prepareBulk().setBlockUntilRefresh(true); + bulk = client().prepareBulk().setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); bulk.add(client().prepareUpdate("test", "test", "1").setDoc("foo", "baz")); assertBulkSuccess(bulk.get()); assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "baz")).get(), "1"); // Update by bulk with block_until_refresh - bulk = client().prepareBulk().setBlockUntilRefresh(true); + bulk = client().prepareBulk().setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); bulk.add(client().prepareDelete("test", "test", "1")); assertBulkSuccess(bulk.get()); assertNoSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get()); @@ -121,7 +125,7 @@ public void testBulk() { public void testNoRefreshInterval() throws InterruptedException, ExecutionException { client().admin().indices().prepareCreate("test").setSettings("index.refresh_interval", -1).get(); ListenableActionFuture index = client().prepareIndex("test", "index", "1").setSource("foo", "bar") - .setBlockUntilRefresh(true).execute(); + .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL).execute(); while (false == index.isDone()) { client().admin().indices().prepareRefresh("test").get(); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/AllFieldMapperPositionIncrementGapTests.java b/core/src/test/java/org/elasticsearch/index/mapper/all/AllFieldMapperPositionIncrementGapTests.java index 702c9b85da427..7b106863341f4 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/all/AllFieldMapperPositionIncrementGapTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/all/AllFieldMapperPositionIncrementGapTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.mapper.all; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.client.Client; import org.elasticsearch.index.query.MatchPhraseQueryBuilder; import org.elasticsearch.plugins.Plugin; @@ -87,7 +88,7 @@ public static void assertGapIsZero(Client client, String indexName, String type) private static void testGap(Client client, String indexName, String type, int positionIncrementGap) throws IOException { client.prepareIndex(indexName, type, "position_gap_test") - .setSource("string1", "one", "string2", "two three").setRefresh(true).get(); + .setSource("string1", "one", "string2", "two three").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); // Baseline - phrase query finds matches in the same field value assertHitCount(client.prepareSearch(indexName) diff --git a/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java b/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java index 052a891010478..829e8bcb2b627 100644 --- a/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java +++ b/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java @@ -29,6 +29,7 @@ import org.elasticsearch.action.percolate.PercolateSourceBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.client.Requests; import org.elasticsearch.common.geo.builders.ShapeBuilders; import org.elasticsearch.common.lucene.search.function.CombineFunction; @@ -286,8 +287,8 @@ public void storePercolateQueriesOnRecreatedIndex() throws Exception { .field("color", "blue") .field("query", termQuery("field1", "value1")) .endObject()) - .setRefresh(true) - .execute().actionGet(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); cluster().wipeIndices("test"); createIndex("test"); @@ -300,8 +301,8 @@ public void storePercolateQueriesOnRecreatedIndex() throws Exception { .field("color", "blue") .field("query", termQuery("field1", "value1")) .endObject()) - .setRefresh(true) - .execute().actionGet(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); } // see #2814 @@ -330,8 +331,8 @@ public void testPercolateCustomAnalyzer() throws Exception { .field("source", "productizer") .field("query", QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("filingcategory:s"))) .endObject()) - .setRefresh(true) - .execute().actionGet(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); refresh(); PercolateResponse percolate = client().preparePercolate() @@ -409,8 +410,8 @@ public void testMultiplePercolators() throws Exception { .field("color", "blue") .field("query", termQuery("field1", "value1")) .endObject()) - .setRefresh(true) - .execute().actionGet(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); logger.info("--> register a query 2"); client().prepareIndex(INDEX_NAME, TYPE_NAME, "bubu") @@ -418,8 +419,8 @@ public void testMultiplePercolators() throws Exception { .field("color", "green") .field("query", termQuery("field1", "value2")) .endObject()) - .setRefresh(true) - .execute().actionGet(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); PercolateResponse percolate = client().preparePercolate() .setIndices(INDEX_NAME).setDocumentType("type1") @@ -453,8 +454,8 @@ public void testDynamicAddingRemovingQueries() throws Exception { .field("color", "blue") .field("query", termQuery("field1", "value1")) .endObject()) - .setRefresh(true) - .execute().actionGet(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); PercolateResponse percolate = client().preparePercolate() .setIndices(INDEX_NAME).setDocumentType("type1") @@ -470,8 +471,8 @@ public void testDynamicAddingRemovingQueries() throws Exception { .field("color", "green") .field("query", termQuery("field1", "value2")) .endObject()) - .setRefresh(true) - .execute().actionGet(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); percolate = client().preparePercolate() .setIndices(INDEX_NAME).setDocumentType("type1") @@ -487,8 +488,8 @@ public void testDynamicAddingRemovingQueries() throws Exception { .field("color", "red") .field("query", termQuery("field1", "value2")) .endObject()) - .setRefresh(true) - .execute().actionGet(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); PercolateSourceBuilder sourceBuilder = new PercolateSourceBuilder() .setDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "value2").endObject())) @@ -502,7 +503,7 @@ public void testDynamicAddingRemovingQueries() throws Exception { assertThat(convertFromTextArray(percolate.getMatches(), INDEX_NAME), arrayContaining("susu")); logger.info("--> deleting query 1"); - client().prepareDelete(INDEX_NAME, TYPE_NAME, "kuku").setRefresh(true).execute().actionGet(); + client().prepareDelete(INDEX_NAME, TYPE_NAME, "kuku").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); percolate = client().preparePercolate() .setIndices(INDEX_NAME).setDocumentType("type1") @@ -1485,8 +1486,8 @@ public void testPercolateNonMatchingConstantScoreQuery() throws Exception { .must(QueryBuilders.queryStringQuery("root")) .must(QueryBuilders.termQuery("message", "tree")))) .endObject()) - .setRefresh(true) - .execute().actionGet(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); refresh(); PercolateResponse percolate = client().preparePercolate() diff --git a/core/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java b/core/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java index 6a4de4b9ff238..6a4d965706a25 100644 --- a/core/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java +++ b/core/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java @@ -21,6 +21,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ESIntegTestCase; @@ -45,7 +46,7 @@ public void testAliasCrudRouting() throws Exception { assertAcked(admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test", "alias0").routing("0"))); logger.info("--> indexing with id [1], and routing [0] using alias"); - client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); @@ -72,7 +73,7 @@ public void testAliasCrudRouting() throws Exception { logger.info("--> deleting with no routing, should not delete anything"); - client().prepareDelete("test", "type1", "1").setRefresh(true).execute().actionGet(); + client().prepareDelete("test", "type1", "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); @@ -80,7 +81,7 @@ public void testAliasCrudRouting() throws Exception { } logger.info("--> deleting with routing alias, should delete"); - client().prepareDelete("alias0", "type1", "1").setRefresh(true).execute().actionGet(); + client().prepareDelete("alias0", "type1", "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false)); @@ -88,7 +89,7 @@ public void testAliasCrudRouting() throws Exception { } logger.info("--> indexing with id [1], and routing [0] using alias"); - client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); @@ -110,7 +111,7 @@ public void testAliasSearchRouting() throws Exception { .addAliasAction(newAddAliasAction("test", "alias01").searchRouting("0,1"))); logger.info("--> indexing with id [1], and routing [0] using alias"); - client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); @@ -142,7 +143,7 @@ public void testAliasSearchRouting() throws Exception { } logger.info("--> indexing with id [2], and routing [1] using alias"); - client().prepareIndex("alias1", "type1", "2").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("alias1", "type1", "2").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> search with no routing, should fine two"); for (int i = 0; i < 5; i++) { @@ -207,7 +208,7 @@ public void testAliasSearchRoutingWithTwoIndices() throws Exception { .addAliasAction(newAddAliasAction("test-b", "alias-ab").searchRouting("1"))); ensureGreen(); // wait for events again to make sure we got the aliases on all nodes logger.info("--> indexing with id [1], and routing [0] using alias to test-a"); - client().prepareIndex("alias-a0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("alias-a0", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test-a", "type1", "1").execute().actionGet().isExists(), equalTo(false)); @@ -218,7 +219,7 @@ public void testAliasSearchRoutingWithTwoIndices() throws Exception { } logger.info("--> indexing with id [0], and routing [1] using alias to test-b"); - client().prepareIndex("alias-b1", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("alias-b1", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test-a", "type1", "1").execute().actionGet().isExists(), equalTo(false)); @@ -261,9 +262,9 @@ public void testAliasSearchRoutingWithConcreteAndAliasedIndices_issue2682() thro .addAliasAction(newAddAliasAction("index", "index_1").routing("1"))); logger.info("--> indexing on index_1 which is an alias for index with routing [1]"); - client().prepareIndex("index_1", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("index_1", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> indexing on index_2 which is a concrete index"); - client().prepareIndex("index_2", "type2", "2").setSource("field", "value2").setRefresh(true).execute().actionGet(); + client().prepareIndex("index_2", "type2", "2").setSource("field", "value2").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> search all on index_* should find two"); @@ -286,9 +287,9 @@ public void testAliasSearchRoutingWithConcreteAndAliasedIndices_issue3268() thro .addAliasAction(newAddAliasAction("index", "index_1").routing("1"))); logger.info("--> indexing on index_1 which is an alias for index with routing [1]"); - client().prepareIndex("index_1", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("index_1", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> indexing on index_2 which is a concrete index"); - client().prepareIndex("index_2", "type2", "2").setSource("field", "value2").setRefresh(true).execute().actionGet(); + client().prepareIndex("index_2", "type2", "2").setSource("field", "value2").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); SearchResponse searchResponse = client().prepareSearch("index_*").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(1).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(); @@ -307,7 +308,7 @@ public void testIndexingAliasesOverTime() throws Exception { .addAliasAction(newAddAliasAction("test", "alias").routing("3"))); logger.info("--> indexing with id [0], and routing [3]"); - client().prepareIndex("alias", "type1", "0").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("alias", "type1", "0").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); logger.info("--> verifying get and search with routing, should find"); @@ -332,7 +333,7 @@ public void testIndexingAliasesOverTime() throws Exception { .addAliasAction(newAddAliasAction("test", "alias").searchRouting("3,4").indexRouting("4"))); logger.info("--> indexing with id [1], and routing [4]"); - client().prepareIndex("alias", "type1", "1").setSource("field", "value2").setRefresh(true).execute().actionGet(); + client().prepareIndex("alias", "type1", "1").setSource("field", "value2").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); logger.info("--> verifying get and search with routing, should find"); diff --git a/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java b/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java index 03e6cbf9ef10b..d8cf1e7b5ec61 100644 --- a/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java +++ b/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java @@ -28,6 +28,7 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; import org.elasticsearch.action.termvectors.TermVectorsRequest; import org.elasticsearch.action.termvectors.TermVectorsResponse; @@ -56,7 +57,8 @@ public void testSimpleCrudRouting() throws Exception { ensureGreen(); logger.info("--> indexing with id [1], and routing [0]"); - client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); @@ -67,21 +69,22 @@ public void testSimpleCrudRouting() throws Exception { } logger.info("--> deleting with no routing, should not delete anything"); - client().prepareDelete("test", "type1", "1").setRefresh(true).execute().actionGet(); + client().prepareDelete("test", "type1", "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); } logger.info("--> deleting with routing, should delete"); - client().prepareDelete("test", "type1", "1").setRouting("0").setRefresh(true).execute().actionGet(); + client().prepareDelete("test", "type1", "1").setRouting("0").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false)); } logger.info("--> indexing with id [1], and routing [0]"); - client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); @@ -97,7 +100,8 @@ public void testSimpleSearchRouting() { ensureGreen(); logger.info("--> indexing with id [1], and routing [0]"); - client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); @@ -125,7 +129,7 @@ public void testSimpleSearchRouting() { } logger.info("--> indexing with id [2], and routing [1]"); - client().prepareIndex("test", "type1", "2").setRouting("1").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("test", "type1", "2").setRouting("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> search with no routing, should fine two"); for (int i = 0; i < 5; i++) { @@ -165,12 +169,13 @@ public void testRequiredRoutingCrudApis() throws Exception { ensureGreen(); logger.info("--> indexing with id [1], and routing [0]"); - client().prepareIndex(indexOrAlias(), "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex(indexOrAlias(), "type1", "1").setRouting("0").setSource("field", "value1") + .setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should fail"); logger.info("--> indexing with id [1], with no routing, should fail"); try { - client().prepareIndex(indexOrAlias(), "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex(indexOrAlias(), "type1", "1").setSource("field", "value1").get(); fail("index with missing routing when routing is required should fail"); } catch (ElasticsearchException e) { assertThat(e.unwrapCause(), instanceOf(RoutingMissingException.class)); @@ -183,7 +188,7 @@ public void testRequiredRoutingCrudApis() throws Exception { logger.info("--> deleting with no routing, should fail"); try { - client().prepareDelete(indexOrAlias(), "type1", "1").setRefresh(true).execute().actionGet(); + client().prepareDelete(indexOrAlias(), "type1", "1").get(); fail("delete with missing routing when routing is required should fail"); } catch (ElasticsearchException e) { assertThat(e.unwrapCause(), instanceOf(RoutingMissingException.class)); @@ -223,7 +228,7 @@ public void testRequiredRoutingCrudApis() throws Exception { assertThat(getResponse.getSourceAsMap().get("field"), equalTo("value2")); } - client().prepareDelete(indexOrAlias(), "type1", "1").setRouting("0").setRefresh(true).execute().actionGet(); + client().prepareDelete(indexOrAlias(), "type1", "1").setRouting("0").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { try { @@ -320,7 +325,8 @@ public void testRequiredRoutingMappingVariousAPIs() throws Exception { logger.info("--> indexing with id [1], and routing [0]"); client().prepareIndex(indexOrAlias(), "type1", "1").setRouting("0").setSource("field", "value1").get(); logger.info("--> indexing with id [2], and routing [0]"); - client().prepareIndex(indexOrAlias(), "type1", "2").setRouting("0").setSource("field", "value2").setRefresh(true).get(); + client().prepareIndex(indexOrAlias(), "type1", "2").setRouting("0").setSource("field", "value2") + .setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with id [1] with routing [0], should succeed"); assertThat(client().prepareGet(indexOrAlias(), "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); diff --git a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java index 1a258abc9cc7f..80b6100c1d7cc 100644 --- a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.common.lucene.search.function.CombineFunction; import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery; import org.elasticsearch.common.settings.Settings; @@ -754,10 +755,11 @@ public void testParentChildQueriesCanHandleNoRelevantTypesInIndex() throws Excep assertNoFailures(response); assertThat(response.getHits().totalHits(), equalTo(0L)); - client().prepareIndex("test", "child1").setSource(jsonBuilder().startObject().field("text", "value").endObject()).setRefresh(true) - .get(); + client().prepareIndex("test", "child1").setSource(jsonBuilder().startObject().field("text", "value").endObject()) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); - response = client().prepareSearch("test").setQuery(QueryBuilders.hasChildQuery("child", matchQuery("text", "value"), ScoreMode.None)).get(); + response = client().prepareSearch("test") + .setQuery(QueryBuilders.hasChildQuery("child", matchQuery("text", "value"), ScoreMode.None)).get(); assertNoFailures(response); assertThat(response.getHits().totalHits(), equalTo(0L)); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearch2xIT.java b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearch2xIT.java index d09c8f172df00..29f31a023cc27 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearch2xIT.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearch2xIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.action.percolate.PercolateResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; @@ -509,7 +510,7 @@ public void testThatUpgradeToMultiFieldTypeWorks() throws Exception { .setSettings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, PRE2X_VERSION.id)) .addMapping(TYPE, mapping)); client().prepareIndex(INDEX, TYPE, "1") - .setRefresh(true) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get(); ensureGreen(INDEX); @@ -532,7 +533,7 @@ public void testThatUpgradeToMultiFieldTypeWorks() throws Exception { ).execute().actionGet(); assertSuggestions(suggestResponse, "suggs"); - client().prepareIndex(INDEX, TYPE, "1").setRefresh(true) + client().prepareIndex(INDEX, TYPE, "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get(); ensureGreen(INDEX); @@ -558,7 +559,7 @@ public void testThatUpgradeToMultiFieldsWorks() throws Exception { .addMapping(TYPE, mapping) .setSettings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, PRE2X_VERSION.id))); client().prepareIndex(INDEX, TYPE, "1") - .setRefresh(true) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get(); ensureGreen(INDEX); @@ -581,7 +582,7 @@ public void testThatUpgradeToMultiFieldsWorks() throws Exception { ).execute().actionGet(); assertSuggestions(suggestResponse, "suggs"); - client().prepareIndex(INDEX, TYPE, "1").setRefresh(true) + client().prepareIndex(INDEX, TYPE, "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get(); ensureGreen(INDEX); @@ -767,10 +768,10 @@ public void testThatStatsAreWorking() throws Exception { assertThat(putMappingResponse.isAcknowledged(), is(true)); // Index two entities - client().prepareIndex(INDEX, TYPE, "1").setRefresh(true) + client().prepareIndex(INDEX, TYPE, "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").field(otherField, "WHATEVER").endObject()) .get(); - client().prepareIndex(INDEX, TYPE, "2").setRefresh(true) + client().prepareIndex(INDEX, TYPE, "2").setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource(jsonBuilder().startObject().field(FIELD, "Bar Fighters").field(otherField, "WHATEVER2").endObject()) .get(); @@ -1076,7 +1077,7 @@ public void testMaxFieldLength() throws IOException { .startArray("input").value(str).endArray() .field("output", "foobar") .endObject().endObject() - ).setRefresh(true).get(); + ).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); // need to flush and refresh, because we keep changing the same document // we have to make sure that segments without any live documents are deleted flushAndRefresh(); @@ -1110,7 +1111,7 @@ public void testVeryLongInput() throws IOException { .startArray("input").value(longString).endArray() .field("output", "foobar") .endObject().endObject() - ).setRefresh(true).get(); + ).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); } @@ -1132,7 +1133,7 @@ public void testReservedChars() throws IOException { .startArray("input").value(string).endArray() .field("output", "foobar") .endObject().endObject() - ).setRefresh(true).get(); + ).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); fail("expected MapperParsingException"); } catch (MapperParsingException expected) {} } @@ -1152,7 +1153,7 @@ public void testIssue5930() throws IOException { .startObject() .field(FIELD, string) .endObject() - ).setRefresh(true).get(); + ).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); try { client().prepareSearch(INDEX).addAggregation(AggregationBuilders.terms("suggest_agg").field(FIELD) @@ -1184,11 +1185,11 @@ public void testIndexingUnrelatedNullValue() throws Exception { ensureGreen(); client().prepareIndex(INDEX, TYPE, "1").setSource(FIELD, "strings make me happy", FIELD + "_1", "nulls make me sad") - .setRefresh(true).get(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); try { client().prepareIndex(INDEX, TYPE, "2").setSource(FIELD, null, FIELD + "_1", "nulls make me sad") - .setRefresh(true).get(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); fail("Expected MapperParsingException for null value"); } catch (MapperParsingException e) { // make sure that the exception has the name of the field causing the error diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BulkTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BulkTests.java index dd9daa75e7665..13868566eac17 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BulkTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BulkTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.action.update.UpdateResponse; @@ -584,7 +585,7 @@ public void testThatFailedUpdateRequestReturnsCorrectType() throws Exception { .add(new IndexRequest("test", "type", "4").source("{ \"title\" : \"Great Title of doc 4\" }")) .add(new IndexRequest("test", "type", "5").source("{ \"title\" : \"Great Title of doc 5\" }")) .add(new IndexRequest("test", "type", "6").source("{ \"title\" : \"Great Title of doc 6\" }")) - .setRefresh(true) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .get(); assertNoFailures(indexBulkItemResponse); @@ -622,7 +623,7 @@ public void testThatMissingIndexDoesNotAbortFullBulkRequest() throws Exception{ .add(new IndexRequest("bulkindex2", "index2_type").source("text", "hallo2")) .add(new UpdateRequest("bulkindex2", "index2_type", "2").doc("foo", "bar")) .add(new DeleteRequest("bulkindex2", "index2_type", "3")) - .refresh(true); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE); client().bulk(bulkRequest).get(); SearchResponse searchResponse = client().prepareSearch("bulkindex*").get(); @@ -643,10 +644,10 @@ public void testFailedRequestsOnClosedIndex() throws Exception { client().prepareIndex("bulkindex1", "index1_type", "1").setSource("text", "test").get(); assertAcked(client().admin().indices().prepareClose("bulkindex1")); - BulkRequest bulkRequest = new BulkRequest(); + BulkRequest bulkRequest = new BulkRequest().setRefreshPolicy(RefreshPolicy.IMMEDIATE); bulkRequest.add(new IndexRequest("bulkindex1", "index1_type", "1").source("text", "hallo1")) .add(new UpdateRequest("bulkindex1", "index1_type", "1").doc("foo", "bar")) - .add(new DeleteRequest("bulkindex1", "index1_type", "1")).refresh(true); + .add(new DeleteRequest("bulkindex1", "index1_type", "1")); BulkResponse bulkResponse = client().bulk(bulkRequest).get(); assertThat(bulkResponse.hasFailures(), is(true)); From dc28951d02973fc03b4d51913b5f96de14b75607 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 23 May 2016 21:09:20 -0400 Subject: [PATCH 46/86] Javadocs and compromises --- .../org/elasticsearch/action/DocWriteResponse.java | 8 ++++---- .../action/bulk/BulkShardResponse.java | 13 ++++--------- .../action/support/WriteRequestBuilder.java | 4 +--- .../replication/ReplicatedWriteResponse.java | 9 ++++----- .../replication/TransportReplicationAction.java | 2 -- 5 files changed, 13 insertions(+), 23 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 286b4d64e96d4..1ab1d872c4828 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -19,9 +19,9 @@ package org.elasticsearch.action; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.replication.ReplicatedWriteResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; -import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.StatusToXContent; @@ -91,9 +91,9 @@ public long getVersion() { } /** - * Did this request force a refresh? Requests that set {@link WriteRequest#setRefresh(boolean)} to true should always - * return true for this. Requests that set {@link WriteRequest#setBlockUntilRefresh(boolean)} to true should only return - * this if they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}). + * Did this request force a refresh? Requests that set {@link WriteRequest#setRefreshPolicy(RefreshPolicy)} to + * {@link RefreshPolicy#IMMEDIATE} will always return true for this. Requests that set it to {@link RefreshPolicy#WAIT_UNTIL} will + * only return true here if they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}). */ public boolean forcedRefresh() { return forcedRefresh; diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java index 3b8f0727b2909..6b6461001312f 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java @@ -20,12 +20,10 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.replication.ReplicatedWriteResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -54,15 +52,12 @@ public BulkItemResponse[] getResponses() { return responses; } - /** - * Each DocWriteResponse already has a location for whether or not it forced a refresh so we just set that information on the response. - * - * Requests that set {@link WriteRequest#setRefresh(boolean)} to true should always set this to true. Requests that set - * {@link WriteRequest#setBlockUntilRefresh(boolean)} to true should only set this to true if they run out of refresh - * listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}). - */ @Override public void setForcedRefresh(boolean forcedRefresh) { + /* + * Each DocWriteResponse already has a location for whether or not it forced a refresh so we just set that information on the + * response. + */ for (BulkItemResponse response : responses) { DocWriteResponse r = response.getResponse(); if (r != null) { diff --git a/core/src/main/java/org/elasticsearch/action/support/WriteRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/support/WriteRequestBuilder.java index 225d84560debb..19ba4dc9f0765 100644 --- a/core/src/main/java/org/elasticsearch/action/support/WriteRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/support/WriteRequestBuilder.java @@ -37,10 +37,8 @@ default B setRefreshPolicy(RefreshPolicy refreshPolicy) { /** * If set to true then this request will force an immediate refresh. Backwards compatibility layer for Elasticsearch's old * {@code setRefresh} calls. - * - * @deprecated use setRefreshPolicy instead */ - @Deprecated + // NOCOMMIT deprecate or just remove this @SuppressWarnings("unchecked") default B setRefresh(boolean refresh) { request().setRefreshPolicy(refresh ? RefreshPolicy.IMMEDIATE : RefreshPolicy.NONE); diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteResponse.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteResponse.java index d89fe7963cef3..b928058421ec5 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteResponse.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.support.replication; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.index.IndexSettings; /** @@ -27,11 +28,9 @@ */ public interface ReplicatedWriteResponse { /** - * Mark the request with if it was forced to refresh the index. All implementations by default assume that the request didn't force a - * refresh unless set otherwise so it mostly only makes sense to call this with {@code true}. Requests that set - * {@link WriteRequest#setRefresh(boolean)} to true should always set this to true. Requests that set - * {@link WriteRequest#setBlockUntilRefresh(boolean)} to true should only set this to true if they run out of refresh - * listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}). + * Mark the response as having forced a refresh? Requests that set {@link WriteRequest#setRefreshPolicy(RefreshPolicy)} to + * {@link RefreshPolicy#IMMEDIATE} should always mark this as true. Requests that set it to {@link RefreshPolicy#WAIT_UNTIL} will only + * set this to true if they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}). */ public abstract void setForcedRefresh(boolean forcedRefresh); } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 4b650046252c0..ed31ee4115585 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -152,8 +152,6 @@ protected void resolveRequest(MetaData metaData, IndexMetaData indexMetaData, Re * Synchronous portion of primary operation on node with primary copy * * @param shardRequest the request to the primary shard - * @return Tuple of the request to send to the replicas and the information needed by the { - //* {@link #asyncShardOperationOnPrimary(Object, ReplicationRequest, ActionListener)} to do its job */ protected abstract PrimaryResult shardOperationOnPrimary(Request shardRequest) throws Exception; From 15d948a348089bb2937eec5ac4e96f3ec67dbe32 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 23 May 2016 22:17:59 -0400 Subject: [PATCH 47/86] Better.... --- .../replication/TransportWriteAction.java | 65 +++++++++++-------- .../index/BlockUntilRefreshIT.java | 9 ++- 2 files changed, 45 insertions(+), 29 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index e765fca3e4e01..32644bc3dffca 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -21,9 +21,11 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; @@ -61,7 +63,7 @@ protected TransportWriteAction(Settings settings, String actionName, TransportSe /** * Called once per replica with a reference to the {@linkplain IndexShard} to modify. * - * @return the translog location of the {@linkplain IndexShard} after the write was completed + * @return the translog location of the {@linkplain IndexShard} after the write was completed or null if no write occurred */ protected abstract Translog.Location onReplicaShard(Request request, IndexShard indexShard); @@ -87,13 +89,15 @@ protected final void shardOperationOnReplica(Request request, ActionListener { - if (forcedRefresh) { - logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); - } - listener.onResponse(null); - }); + if (location != null) { + forked = true; + indexShard.addRefreshListener(location, forcedRefresh -> { + if (forcedRefresh) { + logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + } + listener.onResponse(null); + }); + } break; case NONE: break; @@ -112,9 +116,12 @@ public static class WriteResult { private final Response response; private final Translog.Location location; - public WriteResult(Response response, Location location) { + public WriteResult(Response response, @Nullable Location location) { this.response = response; this.location = location; + // Set the ShardInfo to 0 so we can safely send it to the replicas + // NOCOMMIT this seems wrong + response.setShardInfo(new ShardInfo()); } public Response getResponse() { @@ -127,33 +134,32 @@ public Translog.Location getLocation() { } protected class WritePrimaryResult extends PrimaryResult { - boolean refreshNeeded; + volatile boolean refreshPending; volatile ActionListener listener = null; public WritePrimaryResult(Request request, Response finalResponse, - Translog.Location location, + @Nullable Translog.Location location, IndexShard indexShard) { super(request, finalResponse); switch (request.getRefreshPolicy()) { case IMMEDIATE: indexShard.refresh("refresh_flag_index"); finalResponse.setForcedRefresh(true); - synchronized (this) { - refreshNeeded = false; - } break; case WAIT_UNTIL: - refreshNeeded = true; - indexShard.addRefreshListener(location, forcedRefresh -> { - synchronized (WritePrimaryResult.this) { - if (forcedRefresh) { - finalResponse.setForcedRefresh(true); - logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + if (location != null) { + refreshPending = true; + indexShard.addRefreshListener(location, forcedRefresh -> { + synchronized (WritePrimaryResult.this) { + if (forcedRefresh) { + finalResponse.setForcedRefresh(true); + logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + } + refreshPending = false; + respondIfPossible(); } - refreshNeeded = false; - respondIfNeeded(); - } - }); + }); + } break; case NONE: break; @@ -166,13 +172,16 @@ public WritePrimaryResult(Request request, Response finalResponse, } @Override - public void respond(ActionListener listener) { + public synchronized void respond(ActionListener listener) { this.listener = listener; - respondIfNeeded(); + respondIfPossible(); } - protected synchronized void respondIfNeeded() { - if (refreshNeeded == false && listener != null) { + /** + * Respond if the refresh has occurred and the listener is ready. Always called while synchronized on {@code this}. + */ + protected void respondIfPossible() { + if (refreshPending == false && listener != null) { super.respond(listener); } } diff --git a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java index 92a475421182e..ebcdb4071e522 100644 --- a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java +++ b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java @@ -111,11 +111,18 @@ public void testBulk() { assertBulkSuccess(bulk.get()); assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "baz")).get(), "1"); - // Update by bulk with block_until_refresh + // Delete by bulk with block_until_refresh bulk = client().prepareBulk().setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); bulk.add(client().prepareDelete("test", "test", "1")); assertBulkSuccess(bulk.get()); assertNoSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get()); + // NOCOMMIT figure out why this sort of noop doesn't trigger +// +// // Update makes a noop +// bulk = client().prepareBulk().setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); +// bulk.add(client().prepareDelete("test", "test", "1")); +// assertBulkSuccess(bulk.get()); +// assertNoSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get()); } /** From 4d8bf5d4a70dcc56150c8d8d14165cd23d308b3c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 23 May 2016 22:20:42 -0400 Subject: [PATCH 48/86] explain --- .../action/support/replication/TransportWriteAction.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 32644bc3dffca..e5a944766bd65 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -112,6 +112,9 @@ protected final void shardOperationOnReplica(Request request, ActionListener { private final Response response; private final Translog.Location location; From 957e9b77007c32ee75dde152c6622bab065d5993 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 24 May 2016 07:34:13 -0400 Subject: [PATCH 49/86] /Consumer/Executor/ --- .../elasticsearch/index/shard/RefreshListeners.java | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java index bbeb494c072a2..42b608bd4dd19 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java +++ b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.util.Iterator; +import java.util.concurrent.Executor; import java.util.concurrent.LinkedTransferQueue; import java.util.function.Consumer; import java.util.function.IntSupplier; @@ -37,7 +38,7 @@ * Allows for the registration of listeners that are called when a change becomes visible for search. This functionality is exposed from * {@link IndexShard} but kept here so it can be tested without standing up the entire thing. */ -class RefreshListeners { +final class RefreshListeners { /** * Refresh listeners. While they are not stored in sorted order they are processed as though they are. */ @@ -45,7 +46,7 @@ class RefreshListeners { private final IntSupplier getMaxRefreshListeners; private final Runnable forceRefresh; - private final Consumer fireListener; + private final Executor listenerExecutor; /** * The estimated size of refreshListenersEstimatedSize used for triggering refresh when the size gets over @@ -58,10 +59,10 @@ class RefreshListeners { */ private volatile Translog.Location lastRefreshedLocation; - public RefreshListeners(IntSupplier getMaxRefreshListeners, Runnable forceRefresh, Consumer fireListener) { + public RefreshListeners(IntSupplier getMaxRefreshListeners, Runnable forceRefresh, Executor listenerExecutor) { this.getMaxRefreshListeners = getMaxRefreshListeners; this.forceRefresh = forceRefresh; - this.fireListener = fireListener; + this.listenerExecutor = listenerExecutor; } /** @@ -148,7 +149,7 @@ public void afterRefresh(boolean didRefresh) throws IOException { } itr.remove(); refreshListenersEstimatedSize--; - fireListener.accept(() -> listener.v2().accept(false)); + listenerExecutor.execute(() -> listener.v2().accept(false)); } refreshListenersEstimatedSize = 0; } From 7da36a4ceed2ccf7955138c3b005237fa41efcb4 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 24 May 2016 07:46:38 -0400 Subject: [PATCH 50/86] More cleanup for RefreshListeners --- .../elasticsearch/index/shard/IndexShard.java | 2 +- .../index/shard/RefreshListeners.java | 24 ++++++++++--------- .../index/shard/RefreshListenersTests.java | 10 ++++---- 3 files changed, 19 insertions(+), 17 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index f1da00013e03f..096e2cec60e48 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1556,7 +1556,7 @@ public boolean isRefreshNeeded() { * false otherwise. */ public void addRefreshListener(Translog.Location location, Consumer listener) { - refreshListeners.add(location, listener); + refreshListeners.addOrNotify(location, listener); } private class IndexShardRecoveryPerformer extends TranslogRecoveryPerformer { diff --git a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java index 42b608bd4dd19..ff8d23f5a164e 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java +++ b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java @@ -66,13 +66,14 @@ public RefreshListeners(IntSupplier getMaxRefreshListeners, Runnable forceRefres } /** - * Add a listener for refreshes. + * Add a listener for refreshes, calling it immediately if the location is already visible. If this runs out of listener slots then it + * forces a refresh and calls the listener immediately as well. * * @param location the location to listen for * @param listener for the refresh. Called with true if registering the listener ran it out of slots and forced a refresh. Called with * false otherwise. */ - public void add(Translog.Location location, Consumer listener) { + public void addOrNotify(Translog.Location location, Consumer listener) { requireNonNull(listener, "listener cannot be null"); Translog.Location listenerLocation = requireNonNull(location, "location cannot be null"); @@ -88,10 +89,7 @@ public void add(Translog.Location location, Consumer listener) { refreshListenersEstimatedSize++; return; } - /* - * No free slot so force a refresh and call the listener in this thread. Do so outside of the synchronized block so any other - * attempts to add a listener can continue. - */ + // No free slot so force a refresh and call the listener in this thread forceRefresh.run(); listener.accept(true); } @@ -100,7 +98,7 @@ public void add(Translog.Location location, Consumer listener) { * Start listening to an engine. */ public void listenTo(Engine engine) { - engine.registerSearchRefreshListener(new RefreshListenerCallingRefreshListener(engine)); + engine.registerSearchRefreshListener(new RefreshListenerCallingRefreshListener(engine.getTranslog())); } /** @@ -108,16 +106,20 @@ public void listenTo(Engine engine) { * {@linkplain IndexShard#addRefreshListener(Translog.Location, Consumer)}. */ private class RefreshListenerCallingRefreshListener implements ReferenceManager.RefreshListener { - private final Engine engine; + private final Translog translog; + /** + * Snapshot of the translog location before the current refresh if there is a refresh going on or null. Doesn't have to be volatile + * because when it is used by the refreshing thread. + */ private Translog.Location currentRefreshLocation; - public RefreshListenerCallingRefreshListener(Engine engine) { - this.engine = engine; + public RefreshListenerCallingRefreshListener(Translog translog) { + this.translog = translog; } @Override public void beforeRefresh() throws IOException { - currentRefreshLocation = engine.getTranslog().getLastWriteLocation(); + currentRefreshLocation = translog.getLastWriteLocation(); } @Override diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 852105cb23e6f..08a45eccd2ba4 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -142,7 +142,7 @@ public void testTooMany() throws Exception { for (int i = 0; i < maxListeners; i++) { DummyRefreshListener listener = new DummyRefreshListener(); nonForcedListeners.add(listener); - listeners.add(index.getTranslogLocation(), listener); + listeners.addOrNotify(index.getTranslogLocation(), listener); } // We shouldn't have called any of them @@ -152,7 +152,7 @@ public void testTooMany() throws Exception { // Add one more listener which should cause a refresh. DummyRefreshListener forcingListener = new DummyRefreshListener(); - listeners.add(index.getTranslogLocation(), forcingListener); + listeners.addOrNotify(index.getTranslogLocation(), forcingListener); assertTrue("Forced listener wasn't forced?", forcingListener.forcedRefresh.get()); // That forces all the listeners through. On the listener thread pool so give them some time with assertBusy. @@ -172,7 +172,7 @@ public void testAfterRefresh() throws Exception { } DummyRefreshListener listener = new DummyRefreshListener(); - listeners.add(index.getTranslogLocation(), listener); + listeners.addOrNotify(index.getTranslogLocation(), listener); assertFalse(listener.forcedRefresh.get()); } @@ -194,7 +194,7 @@ public void testConcurrentRefresh() throws Exception { Engine.Index index = index("1"); DummyRefreshListener listener = new DummyRefreshListener(); - listeners.add(index.getTranslogLocation(), listener); + listeners.addOrNotify(index.getTranslogLocation(), listener); assertBusy(() -> assertNotNull(listener.forcedRefresh.get())); assertFalse(listener.forcedRefresh.get()); } @@ -228,7 +228,7 @@ public void testLotsOfThreads() throws Exception { assertEquals(iteration, index.version()); DummyRefreshListener listener = new DummyRefreshListener(); - listeners.add(index.getTranslogLocation(), listener); + listeners.addOrNotify(index.getTranslogLocation(), listener); assertBusy(() -> assertNotNull("listener never called", listener.forcedRefresh.get())); if (threadCount < maxListeners) { assertFalse(listener.forcedRefresh.get()); From 1ec71eea0f4e1228ae1497d982307be818ef4b65 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 24 May 2016 08:01:14 -0400 Subject: [PATCH 51/86] s/LinkedTransferQueue/ArrayList/ --- .../index/shard/RefreshListeners.java | 47 +++++++++---------- .../index/shard/RefreshListenersTests.java | 5 +- 2 files changed, 23 insertions(+), 29 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java index ff8d23f5a164e..240b032674c23 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java +++ b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java @@ -21,14 +21,13 @@ import org.apache.lucene.search.ReferenceManager; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.translog.Translog; import java.io.IOException; -import java.util.Iterator; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.Executor; -import java.util.concurrent.LinkedTransferQueue; import java.util.function.Consumer; import java.util.function.IntSupplier; @@ -39,21 +38,14 @@ * {@link IndexShard} but kept here so it can be tested without standing up the entire thing. */ final class RefreshListeners { - /** - * Refresh listeners. While they are not stored in sorted order they are processed as though they are. - */ - private final LinkedTransferQueue>> refreshListeners = new LinkedTransferQueue<>(); - private final IntSupplier getMaxRefreshListeners; private final Runnable forceRefresh; private final Executor listenerExecutor; /** - * The estimated size of refreshListenersEstimatedSize used for triggering refresh when the size gets over - * {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}. No effort is made to correct for threading issues in the size calculation - * beyond it being volatile. + * List of refresh listeners. Built new any time any entries are removed from it. Always modified while synchronized on {@code this}. */ - private volatile int refreshListenersEstimatedSize; + private volatile List>> refreshListeners = new ArrayList<>(); /** * The translog location that was last made visible by a refresh. */ @@ -83,11 +75,12 @@ public void addOrNotify(Translog.Location location, Consumer listener) listener.accept(false); return; } - if (refreshListenersEstimatedSize < getMaxRefreshListeners.getAsInt()) { - // We have a free slot so register the listener - refreshListeners.add(new Tuple<>(location, listener)); - refreshListenersEstimatedSize++; - return; + synchronized (this) { + if (refreshListeners.size() < getMaxRefreshListeners.getAsInt()) { + // We have a free slot so register the listener + refreshListeners.add(new Tuple<>(location, listener)); + return; + } } // No free slot so force a refresh and call the listener in this thread forceRefresh.run(); @@ -143,17 +136,19 @@ public void afterRefresh(boolean didRefresh) throws IOException { * iterate over the whole queue on every refresh at the cost of some requests having to wait an extra cycle if they get stuck * behind a request that missed the refresh cycle. */ - Iterator>> itr = refreshListeners.iterator(); - while (itr.hasNext()) { - Tuple> listener = itr.next(); - if (listener.v1().compareTo(currentRefreshLocation) > 0) { - return; + List>> newRefreshListeners = new ArrayList<>(); + synchronized (this) { + for (Tuple> tuple : refreshListeners) { + Translog.Location location = tuple.v1(); + if (location.compareTo(currentRefreshLocation) > 0) { + newRefreshListeners.add(tuple); + } else { + Consumer listener = tuple.v2(); + listenerExecutor.execute(() -> listener.accept(false)); + } } - itr.remove(); - refreshListenersEstimatedSize--; - listenerExecutor.execute(() -> listener.v2().accept(false)); + refreshListeners = newRefreshListeners; } - refreshListenersEstimatedSize = 0; } } } diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 08a45eccd2ba4..2b780e7bc4ee2 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -88,7 +88,7 @@ public void setupListeners() throws Exception { () -> maxListeners, () -> engine.refresh("too-many-listeners"), // Immediately run listeners rather than adding them to the listener thread pool like IndexShard does to simplify the test. - fire -> fire.run() + toExecute -> toExecute.run() ); // Now setup the InternalEngine which is much more complicated because we aren't mocking anything @@ -212,7 +212,6 @@ public void testLotsOfThreads() throws Exception { int threadCount = between(3, 10); maxListeners = between(1, threadCount * 2); - // This thread just refreshes every once in a while to cause trouble. ScheduledFuture refresher = threadPool.scheduleWithFixedDelay(() -> engine.refresh("because test"), timeValueMillis(100)); @@ -221,7 +220,7 @@ public void testLotsOfThreads() throws Exception { for (int thread = 0; thread < threadCount; thread++) { final String threadId = String.format(Locale.ROOT, "%04d", thread); indexers[thread] = new Thread(() -> { - for (int iteration = 1; iteration <= 500; iteration++) { + for (int iteration = 1; iteration <= 50; iteration++) { try { String testFieldValue = String.format(Locale.ROOT, "%s%04d", threadId, iteration); Engine.Index index = index(threadId, testFieldValue); From 5d8eecd0d904b497844b4c81c46477bd6178ed3a Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 24 May 2016 08:58:47 -0400 Subject: [PATCH 52/86] Remove funky synchronization in AsyncReplicaAction --- .../flush/TransportShardFlushAction.java | 6 +- .../refresh/TransportShardRefreshAction.java | 6 +- .../TransportReplicationAction.java | 79 ++++++++----------- .../replication/TransportWriteAction.java | 79 +++++++++++-------- .../elasticsearch/index/IndexSettings.java | 9 +++ .../index/engine/ShadowEngine.java | 1 - .../elasticsearch/index/shard/IndexShard.java | 2 +- .../TransportReplicationActionTests.java | 8 +- 8 files changed, 101 insertions(+), 89 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index 516d4aa98d977..82fb6d70ca441 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationAction; @@ -32,7 +31,6 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; /** @@ -64,11 +62,11 @@ protected PrimaryResult shardOperationOnPrimary(ShardFlushRequest shardRequest) } @Override - protected void shardOperationOnReplica(ShardFlushRequest request, ActionListener listener) { + protected ReplicaResult shardOperationOnReplica(ShardFlushRequest request) { IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); indexShard.flush(request.getRequest()); logger.trace("{} flush request executed on replica", indexShard.shardId()); - listener.onResponse(TransportResponse.Empty.INSTANCE); + return new ReplicaResult(); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index bb6e2ba17a16c..d7d0c289953a4 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.refresh; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.BasicReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; @@ -34,7 +33,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; public class TransportShardRefreshAction @@ -64,12 +62,12 @@ protected PrimaryResult shardOperationOnPrimary(BasicReplicationRequest shardReq } @Override - protected void shardOperationOnReplica(BasicReplicationRequest request, ActionListener listener) { + protected ReplicaResult shardOperationOnReplica(BasicReplicationRequest request) { final ShardId shardId = request.shardId(); IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id()); indexShard.refresh("api"); logger.trace("{} refresh request executed on replica", indexShard.shardId()); - listener.onResponse(TransportResponse.Empty.INSTANCE); + return new ReplicaResult(); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index ed31ee4115585..6cffee514dfc0 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -68,7 +68,6 @@ import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Supplier; @@ -149,17 +148,17 @@ protected void resolveRequest(MetaData metaData, IndexMetaData indexMetaData, Re } /** - * Synchronous portion of primary operation on node with primary copy + * Primary operation on node with primary copy. * * @param shardRequest the request to the primary shard */ protected abstract PrimaryResult shardOperationOnPrimary(Request shardRequest) throws Exception; /** - * Replica operation on nodes with replica copies. While this does take a listener it should not return until it has completed any - * operations that it must take under the shard lock. The listener is for waiting for things like index to become visible in search. + * Synchronous replica operation on nodes with replica copies. This is done under the lock form + * {@link #acquireReplicaOperationLock(ShardId, long)}. */ - protected abstract void shardOperationOnReplica(ReplicaRequest shardRequest, ActionListener listener); + protected abstract ReplicaResult shardOperationOnReplica(ReplicaRequest shardRequest); /** * True if write consistency should be checked for an implementation @@ -353,6 +352,17 @@ public void respond(ActionListener listener) { } } + protected class ReplicaResult { + /** + * Public constructor so subclasses can call it. + */ + public ReplicaResult() {} + + public void respond(ActionListener listener) { + listener.onResponse(TransportResponse.Empty.INSTANCE); + } + } + class ReplicaOperationTransportHandler implements TransportRequestHandler { @Override public void messageReceived(final ReplicaRequest request, final TransportChannel channel) throws Exception { @@ -378,10 +388,6 @@ public RetryOnReplicaException(StreamInput in) throws IOException { } private final class AsyncReplicaAction extends AbstractRunnable { - /** - * The number of operations remaining before we can reply. See javadoc for {@link #operationComplete()} more. - */ - private final AtomicInteger operationsUntilReply = new AtomicInteger(2); private final ReplicaRequest request; private final TransportChannel channel; /** @@ -444,45 +450,30 @@ protected void responseWithFailure(Throwable t) { protected void doRun() throws Exception { setPhase(task, "replica"); assert request.shardId() != null : "request shardId must be set"; + ReplicaResult result; try (Releasable ignored = acquireReplicaOperationLock(request.shardId(), request.primaryTerm())) { - shardOperationOnReplica(request, new ActionListener() { - @Override - public void onResponse(Empty response) { - operationComplete(); + result = shardOperationOnReplica(request); + } + result.respond(new ActionListener() { + @Override + public void onResponse(Empty response) { + if (logger.isTraceEnabled()) { + logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), + request); } - - @Override - public void onFailure(Throwable e) { - AsyncReplicaAction.this.onFailure(e); + setPhase(task, "finished"); + try { + channel.sendResponse(response); + } catch (Exception e) { + onFailure(e); } - }); - } - operationComplete(); - } + } - /** - * Handle a portion of the operation finishing. Called twice: once after the operation returns and the lock is released and once - * after the listener returns. We only reply over the channel when both have finished but we don't know in which order they will - * finish. - * - * The reason we can't reply until both is finished is a bit unclear - but the advantage of doing it this ways is that we never - * ever ever reply while we have the operation lock. And it is just a good idea in general not to do network IO while you have a - * lock. So that is something. - */ - private void operationComplete() { - if (operationsUntilReply.decrementAndGet() != 0) { - return; - } - if (logger.isTraceEnabled()) { - logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), - request); - } - setPhase(task, "finished"); - try { - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } catch (Exception e) { - onFailure(e); - } + @Override + public void onFailure(Throwable e) { + AsyncReplicaAction.this.onFailure(e); + } + }); } } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index e5a944766bd65..d585140ff5f2d 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -34,7 +34,7 @@ import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponse.Empty; import org.elasticsearch.transport.TransportService; import java.util.function.Supplier; @@ -77,39 +77,12 @@ protected final WritePrimaryResult shardOperationOnPrimary(Request request) thro } @Override - protected final void shardOperationOnReplica(Request request, ActionListener listener) { + protected final WriteReplicaResult shardOperationOnReplica(Request request) { final ShardId shardId = request.shardId(); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.getShard(shardId.id()); Translog.Location location = onReplicaShard(request, indexShard); - // NOCOMMIT deduplicate with the WritePrimaryResult - boolean forked = false; - switch (request.getRefreshPolicy()) { - case IMMEDIATE: - indexShard.refresh("refresh_flag_index"); - break; - case WAIT_UNTIL: - if (location != null) { - forked = true; - indexShard.addRefreshListener(location, forcedRefresh -> { - if (forcedRefresh) { - logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); - } - listener.onResponse(null); - }); - } - break; - case NONE: - break; - } - boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; - if (fsyncTranslog) { - indexShard.sync(location); - } - indexShard.maybeFlush(); - if (false == forked) { - listener.onResponse(null); - } + return new WriteReplicaResult(indexShard, request, location); } /** @@ -136,7 +109,7 @@ public Translog.Location getLocation() { } } - protected class WritePrimaryResult extends PrimaryResult { + private class WritePrimaryResult extends PrimaryResult { volatile boolean refreshPending; volatile ActionListener listener = null; @@ -189,4 +162,48 @@ protected void respondIfPossible() { } } } + + private class WriteReplicaResult extends ReplicaResult { + private final IndexShard indexShard; + private final ReplicatedWriteRequest request; + private final Translog.Location location; + + public WriteReplicaResult(IndexShard indexShard, ReplicatedWriteRequest request, Location location) { + this.indexShard = indexShard; + this.request = request; + this.location = location; + } + + @Override + public void respond(ActionListener listener) { + // NOCOMMIT deduplicate with the WritePrimaryResult + boolean forked = false; + switch (request.getRefreshPolicy()) { + case IMMEDIATE: + indexShard.refresh("refresh_flag_index"); + break; + case WAIT_UNTIL: + if (location != null) { + forked = true; + indexShard.addRefreshListener(location, forcedRefresh -> { + if (forcedRefresh) { + logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + } + listener.onResponse(null); + }); + } + break; + case NONE: + break; + } + boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; + if (fsyncTranslog) { + indexShard.sync(location); + } + indexShard.maybeFlush(); + if (false == forked) { + listener.onResponse(null); + } + } + } } diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index cbfe19d1a2697..e5174ea192125 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -115,6 +115,9 @@ public final class IndexSettings { public static final Setting INDEX_GC_DELETES_SETTING = Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS), Property.Dynamic, Property.IndexScope); + /** + * The maximum number of refresh listeners allows on this shard. + */ public static final Setting MAX_REFRESH_LISTENERS_PER_SHARD = Setting.intSetting("index.max_refresh_listeners", 1000, 0, Property.Dynamic, Property.IndexScope); @@ -147,6 +150,9 @@ public final class IndexSettings { private volatile int maxResultWindow; private volatile int maxRescoreWindow; private volatile boolean TTLPurgeDisabled; + /** + * The maximum number of refresh listeners allows on this shard. + */ private volatile int maxRefreshListeners; /** @@ -504,6 +510,9 @@ public T getValue(Setting setting) { return scopedSettings.get(setting); } + /** + * The maximum number of refresh listeners allows on this shard. + */ public int getMaxRefreshListeners() { return maxRefreshListeners; } diff --git a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index e69e31d88ef7c..9e544b568a603 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -35,7 +35,6 @@ import java.io.IOException; import java.util.Arrays; import java.util.List; -import java.util.function.Consumer; import java.util.function.Function; /** diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 096e2cec60e48..7268d711b58c1 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -246,7 +246,7 @@ public IndexShard(ShardId shardId, IndexSettings indexSettings, ShardPath path, searcherWrapper = indexSearcherWrapper; primaryTerm = indexSettings.getIndexMetaData().primaryTerm(shardId.id()); refreshListeners = new RefreshListeners( - () -> indexSettings.getMaxRefreshListeners(), + indexSettings::getMaxRefreshListeners, () -> refresh("too_many_listeners"), fire -> threadPool.executor(ThreadPool.Names.LISTENER).execute(fire)); } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 08c01e718c6e1..224c020270f0e 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -667,13 +667,13 @@ public void testReplicasCounter() throws Exception { final ReplicationTask task = maybeTask(); Action action = new Action(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool) { @Override - protected void shardOperationOnReplica(Request request, ActionListener listener) { + protected ReplicaResult shardOperationOnReplica(Request request) { assertIndexShardCounter(1); assertPhase(task, "replica"); if (throwException) { throw new ElasticsearchException("simulated"); } - super.shardOperationOnReplica(request, listener); + return new ReplicaResult(); } }; final Action.ReplicaOperationTransportHandler replicaOperationTransportHandler = action.new ReplicaOperationTransportHandler(); @@ -772,9 +772,9 @@ protected PrimaryResult shardOperationOnPrimary(Request shardRequest) throws Exc } @Override - protected void shardOperationOnReplica(Request request, ActionListener listener) { + protected ReplicaResult shardOperationOnReplica(Request request) { request.processedOnReplicas.incrementAndGet(); - listener.onResponse(TransportResponse.Empty.INSTANCE); + return new ReplicaResult(); } @Override From da1e765678890a02d61d8a29aa433274beb5e00c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 24 May 2016 09:26:35 -0400 Subject: [PATCH 53/86] Reply with non-null Also move the fsync and flush to before the refresh listener stuff. --- .../replication/TransportWriteAction.java | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index d585140ff5f2d..29101c5d87b1b 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -34,7 +34,7 @@ import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportResponse.Empty; +import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; import java.util.function.Supplier; @@ -117,6 +117,11 @@ public WritePrimaryResult(Request request, Response finalResponse, @Nullable Translog.Location location, IndexShard indexShard) { super(request, finalResponse); + boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; + if (fsyncTranslog) { + indexShard.sync(location); + } + indexShard.maybeFlush(); switch (request.getRefreshPolicy()) { case IMMEDIATE: indexShard.refresh("refresh_flag_index"); @@ -140,11 +145,6 @@ public WritePrimaryResult(Request request, Response finalResponse, case NONE: break; } - boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; - if (fsyncTranslog) { - indexShard.sync(location); - } - indexShard.maybeFlush(); } @Override @@ -175,8 +175,13 @@ public WriteReplicaResult(IndexShard indexShard, ReplicatedWriteRequest reque } @Override - public void respond(ActionListener listener) { + public void respond(ActionListener listener) { // NOCOMMIT deduplicate with the WritePrimaryResult + boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; + if (fsyncTranslog) { + indexShard.sync(location); + } + indexShard.maybeFlush(); boolean forked = false; switch (request.getRefreshPolicy()) { case IMMEDIATE: @@ -189,20 +194,15 @@ public void respond(ActionListener listener) { if (forcedRefresh) { logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); } - listener.onResponse(null); + listener.onResponse(TransportResponse.Empty.INSTANCE); }); } break; case NONE: break; } - boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; - if (fsyncTranslog) { - indexShard.sync(location); - } - indexShard.maybeFlush(); if (false == forked) { - listener.onResponse(null); + listener.onResponse(TransportResponse.Empty.INSTANCE); } } } From 04343a22647f19304d9dc716b3fac9b183227f63 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 24 May 2016 09:37:52 -0400 Subject: [PATCH 54/86] Javadoc --- .../test/java/org/elasticsearch/index/BlockUntilRefreshIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java index ebcdb4071e522..d4b567bb843be 100644 --- a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java +++ b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java @@ -48,7 +48,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; /** - * Tests that requests with block_until_refresh set to true will be visible when they return. + * Tests that requests with RefreshPolicy.WAIT_UNTIL will be visible when they return. */ public class BlockUntilRefreshIT extends ESIntegTestCase { public void testIndex() { From b2704b8a39382953f8f91a9743e894ee289f7514 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 24 May 2016 09:37:58 -0400 Subject: [PATCH 55/86] Remove unused imports Maybe I added them? --- .../org/elasticsearch/index/engine/InternalEngineTests.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index c535d43ea9065..4a4a73df85175 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -71,7 +71,6 @@ import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.engine.Engine.Searcher; -import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperForType; @@ -117,16 +116,12 @@ import java.util.Locale; import java.util.Map; import java.util.concurrent.BrokenBarrierException; -import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; -import static java.lang.Math.max; import static java.util.Collections.emptyMap; import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; From 0d49d9c5720dadfb67da3fa760397bf6d874601c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 24 May 2016 10:46:18 -0400 Subject: [PATCH 56/86] Flip relationship between RefreshListeners and Engine Now RefreshListeners comes to Engine from EngineConfig. --- .../elasticsearch/index/engine/Engine.java | 8 -- .../index/engine/EngineConfig.java | 46 ++++++++- .../index/engine/InternalEngine.java | 7 ++ .../index/engine/ShadowEngine.java | 7 ++ .../elasticsearch/index/shard/IndexShard.java | 10 +- .../index/shard/RefreshListeners.java | 96 +++++++++---------- .../index/shard/ShadowIndexShard.java | 5 + .../index/BlockUntilRefreshIT.java | 7 ++ .../index/engine/InternalEngineTests.java | 45 +++++++++ .../index/engine/ShadowEngineTests.java | 42 ++++++++ .../index/shard/RefreshListenersTests.java | 5 +- 11 files changed, 209 insertions(+), 69 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 6da78dc613b24..944c235aa2356 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -33,7 +33,6 @@ import org.apache.lucene.index.SnapshotDeletionPolicy; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.ReferenceManager.RefreshListener; import org.apache.lucene.search.SearcherManager; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; @@ -604,13 +603,6 @@ public final boolean refreshNeeded() { return false; } - /** - * Register a listener that is called whenever the searcher is refreshed. See {@link SearcherManager#addListener(RefreshListener)}. - */ - public final void registerSearchRefreshListener(RefreshListener listener) { - getSearcherManager().addListener(listener); - } - /** * Synchronously refreshes the engine for new search operations to reflect the latest * changes. diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 8a56feff70f52..a499886a1215f 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -24,10 +24,10 @@ import org.apache.lucene.index.SnapshotDeletionPolicy; import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -40,7 +40,10 @@ import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.threadpool.ThreadPool; -import java.util.function.Function; +import java.util.ArrayList; +import java.util.List; + +import static java.util.Collections.unmodifiableList; /* * Holds all the configuration that is used to create an {@link Engine}. @@ -66,6 +69,14 @@ public final class EngineConfig { private final Engine.EventListener eventListener; private final QueryCache queryCache; private final QueryCachingPolicy queryCachingPolicy; + /** + * List of listeners for the engine to be created. + */ + private final List creationListeners = new ArrayList<>(); + /** + * List of listeners for the searcher being refreshed. + */ + private final List refreshListeners = new ArrayList<>(); /** * Index setting to change the low level lucene codec used for writing new segments. @@ -289,6 +300,34 @@ public OpenMode getOpenMode() { return openMode; } + /** + * Add a listener for engine creation. + */ + public void addEngineCreationListener(EngineCreationListener listener) { + creationListeners.add(listener); + } + + /** + * List of listeners for this engine. + */ + public List getEngineCreationListeners() { + return unmodifiableList(creationListeners); + } + + /** + * Add a refresh listener that will be registered with the index on creation. + */ + public void addRefreshListener(ReferenceManager.RefreshListener listener) { + refreshListeners.add(listener); + } + + /** + * List of refresh listeners for the searcher's refresh cycle. + */ + public List getRefreshListeners() { + return unmodifiableList(refreshListeners); + } + /** * Engine open mode defines how the engine should be opened or in other words what the engine should expect * to recover from. We either create a brand new engine with a new index and translog or we recover from an existing index. @@ -303,4 +342,7 @@ public enum OpenMode { OPEN_INDEX_AND_TRANSLOG; } + public interface EngineCreationListener { + void engineCreated(Engine engine); + } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 9f7bc41add995..474f41d4536aa 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -32,6 +32,7 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.SearcherFactory; import org.apache.lucene.search.SearcherManager; import org.apache.lucene.store.AlreadyClosedException; @@ -168,6 +169,12 @@ public InternalEngine(EngineConfig engineConfig) throws EngineException { } } } + for (EngineConfig.EngineCreationListener listener : engineConfig.getEngineCreationListeners()) { + listener.engineCreated(this); + } + for (ReferenceManager.RefreshListener listener : engineConfig.getRefreshListeners()) { + searcherManager.addListener(listener); + } logger.trace("created new InternalEngine"); } diff --git a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index 9e544b568a603..6933e58e64243 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -22,6 +22,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.SearcherFactory; import org.apache.lucene.search.SearcherManager; import org.apache.lucene.store.AlreadyClosedException; @@ -98,6 +99,12 @@ public ShadowEngine(EngineConfig engineConfig) { } catch (IOException ex) { throw new EngineCreationFailureException(shardId, "failed to open index reader", ex); } + for (EngineConfig.EngineCreationListener listener : engineConfig.getEngineCreationListeners()) { + listener.engineCreated(this); + } + for (ReferenceManager.RefreshListener listener : engineConfig.getRefreshListeners()) { + searcherManager.addListener(listener); + } logger.trace("created new ShadowEngine"); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 7268d711b58c1..9a7cf10a7fe61 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -920,6 +920,7 @@ private void internalPerformTranslogRecovery(boolean skipTranslogRecovery, boole // we disable deletes since we allow for operations to be executed against the shard while recovering // but we need to make sure we don't loose deletes until we are done recovering config.setEnableGcDeletes(false); + setupRefreshListeners(config); Engine newEngine = createNewEngine(config); verifyNotClosed(); if (openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { @@ -1370,9 +1371,12 @@ private Engine createNewEngine(EngineConfig config) { } protected Engine newEngine(EngineConfig config) { - Engine engine = engineFactory.newReadWriteEngine(config); - refreshListeners.listenTo(engine); - return engine; + return engineFactory.newReadWriteEngine(config); + } + + protected void setupRefreshListeners(EngineConfig config) { + config.addEngineCreationListener(refreshListeners); + config.addRefreshListener(refreshListeners); } /** diff --git a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java index 240b032674c23..096042b54dedc 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java +++ b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java @@ -22,6 +22,7 @@ import org.apache.lucene.search.ReferenceManager; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineConfig.EngineCreationListener; import org.elasticsearch.index.translog.Translog; import java.io.IOException; @@ -37,7 +38,7 @@ * Allows for the registration of listeners that are called when a change becomes visible for search. This functionality is exposed from * {@link IndexShard} but kept here so it can be tested without standing up the entire thing. */ -final class RefreshListeners { +final class RefreshListeners implements EngineCreationListener, ReferenceManager.RefreshListener { private final IntSupplier getMaxRefreshListeners; private final Runnable forceRefresh; private final Executor listenerExecutor; @@ -87,68 +88,57 @@ public void addOrNotify(Translog.Location location, Consumer listener) listener.accept(true); } - /** - * Start listening to an engine. - */ - public void listenTo(Engine engine) { - engine.registerSearchRefreshListener(new RefreshListenerCallingRefreshListener(engine.getTranslog())); + @Override + public void engineCreated(Engine engine) { + translog = engine.getTranslog(); } + // Implementation of ReferenceManager.RefreshListener that adapts Lucene's RefreshListener into Elasticsearch's refresh listeners. + private Translog translog; /** - * Listens to Lucene's {@linkplain ReferenceManager.RefreshListener} and fires off listeners added by - * {@linkplain IndexShard#addRefreshListener(Translog.Location, Consumer)}. + * Snapshot of the translog location before the current refresh if there is a refresh going on or null. Doesn't have to be volatile + * because when it is used by the refreshing thread. */ - private class RefreshListenerCallingRefreshListener implements ReferenceManager.RefreshListener { - private final Translog translog; - /** - * Snapshot of the translog location before the current refresh if there is a refresh going on or null. Doesn't have to be volatile - * because when it is used by the refreshing thread. - */ - private Translog.Location currentRefreshLocation; - - public RefreshListenerCallingRefreshListener(Translog translog) { - this.translog = translog; - } + private Translog.Location currentRefreshLocation; - @Override - public void beforeRefresh() throws IOException { - currentRefreshLocation = translog.getLastWriteLocation(); - } + @Override + public void beforeRefresh() throws IOException { + currentRefreshLocation = translog.getLastWriteLocation(); + } - @Override - public void afterRefresh(boolean didRefresh) throws IOException { - // This intentionally ignores didRefresh so a refresh call over the API can force rechecking the refreshListeners. - if (null == currentRefreshLocation) { - /* - * The translog had an empty last write location at the start of the refresh so we can't alert anyone to anything. This - * usually happens during recovery. The next refresh cycle out to pick up this refresh. - */ - return; - } - /* - * First set the lastRefreshedLocation so listeners that come in locations before that will just execute inline without messing - * around with refreshListeners at all. - */ - lastRefreshedLocation = currentRefreshLocation; + @Override + public void afterRefresh(boolean didRefresh) throws IOException { + // This intentionally ignores didRefresh so a refresh call over the API can force rechecking the refreshListeners. + if (null == currentRefreshLocation) { /* - * Now pop all listeners off the front of refreshListeners that are ready to be called. The listeners won't always be in order - * but they should be pretty close because you don't listen to times super far in the future. This prevents us from having to - * iterate over the whole queue on every refresh at the cost of some requests having to wait an extra cycle if they get stuck - * behind a request that missed the refresh cycle. + * The translog had an empty last write location at the start of the refresh so we can't alert anyone to anything. This + * usually happens during recovery. The next refresh cycle out to pick up this refresh. */ - List>> newRefreshListeners = new ArrayList<>(); - synchronized (this) { - for (Tuple> tuple : refreshListeners) { - Translog.Location location = tuple.v1(); - if (location.compareTo(currentRefreshLocation) > 0) { - newRefreshListeners.add(tuple); - } else { - Consumer listener = tuple.v2(); - listenerExecutor.execute(() -> listener.accept(false)); - } + return; + } + /* + * First set the lastRefreshedLocation so listeners that come in locations before that will just execute inline without messing + * around with refreshListeners at all. + */ + lastRefreshedLocation = currentRefreshLocation; + /* + * Now pop all listeners off the front of refreshListeners that are ready to be called. The listeners won't always be in order + * but they should be pretty close because you don't listen to times super far in the future. This prevents us from having to + * iterate over the whole queue on every refresh at the cost of some requests having to wait an extra cycle if they get stuck + * behind a request that missed the refresh cycle. + */ + List>> newRefreshListeners = new ArrayList<>(); + synchronized (this) { + for (Tuple> tuple : refreshListeners) { + Translog.Location location = tuple.v1(); + if (location.compareTo(currentRefreshLocation) > 0) { + newRefreshListeners.add(tuple); + } else { + Consumer listener = tuple.v2(); + listenerExecutor.execute(() -> listener.accept(false)); } - refreshListeners = newRefreshListeners; } + refreshListeners = newRefreshListeners; } } } diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java index adc5f92374cf2..9e939f9cf9cd4 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java @@ -88,6 +88,11 @@ protected Engine newEngine(EngineConfig config) { return engineFactory.newReadOnlyEngine(config); } + @Override + protected void setupRefreshListeners(EngineConfig config) { + // Intentionally not setting them up because the shadow replica doesn't have a Translog so it can't support RefreshListeners. + } + @Override public boolean shouldFlush() { // we don't need to flush since we don't write - all dominated by the primary diff --git a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java index d4b567bb843be..91a807d23d414 100644 --- a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java +++ b/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.ExecutableScript; @@ -51,6 +52,12 @@ * Tests that requests with RefreshPolicy.WAIT_UNTIL will be visible when they return. */ public class BlockUntilRefreshIT extends ESIntegTestCase { + @Override + public Settings indexSettings() { + // Use a shorter refresh interval to speed up the tests. We'll be waiting on this interval several times. + return Settings.builder().put(super.indexSettings()).put("index.refresh_interval", "100ms").build(); + } + public void testIndex() { IndexResponse index = client().prepareIndex("test", "index", "1").setSource("foo", "bar").setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) .get(); diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 4a4a73df85175..60366533acbfc 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -43,6 +43,7 @@ import org.apache.lucene.index.TieredMergePolicy; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.AlreadyClosedException; @@ -2114,4 +2115,48 @@ public void testCurrentTranslogIDisCommitted() throws IOException { } } } + + public void testCreationListener() throws IOException { + AtomicReference listener = new AtomicReference<>(); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy()); + config.addEngineCreationListener(listener::set); + try (Engine created = new InternalEngine(config)) { + assertSame(created, listener.get()); + } + } + } + + public void testRefreshListener() throws IOException { + class TestRefreshListener implements ReferenceManager.RefreshListener { + boolean beforeRefreshed = false; + boolean afterRefreshed = false; + + @Override + public void beforeRefresh() throws IOException { + assertFalse(beforeRefreshed); + assertFalse(afterRefreshed); + beforeRefreshed = true; + } + + @Override + public void afterRefresh(boolean didRefresh) throws IOException { + assertTrue(beforeRefreshed); + assertFalse(afterRefreshed); + afterRefreshed = true; + } + } + TestRefreshListener listener = new TestRefreshListener(); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy()); + config.addRefreshListener(listener); + try (Engine engine = new InternalEngine(config)) { + assertFalse(listener.beforeRefreshed); + assertFalse(listener.afterRefreshed); + engine.refresh("test"); + assertTrue(listener.beforeRefreshed); + assertTrue(listener.afterRefreshed); + } + } + } } diff --git a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index 6695fb471f6ae..b171f09c3210f 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -32,6 +32,7 @@ import org.apache.lucene.index.SnapshotDeletionPolicy; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; @@ -74,6 +75,7 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -980,4 +982,44 @@ public void testNoTranslog() { // all good } } + + public void testCreationListener() throws IOException { + AtomicReference listener = new AtomicReference<>(); + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy()); + config.addEngineCreationListener(listener::set); + try (Engine created = new ShadowEngine(config)) { + assertSame(created, listener.get()); + } + } + + public void testRefreshListener() throws IOException { + class TestRefreshListener implements ReferenceManager.RefreshListener { + boolean beforeRefreshed = false; + boolean afterRefreshed = false; + + @Override + public void beforeRefresh() throws IOException { + assertFalse(beforeRefreshed); + assertFalse(afterRefreshed); + beforeRefreshed = true; + } + + @Override + public void afterRefresh(boolean didRefresh) throws IOException { + assertTrue(beforeRefreshed); + assertFalse(afterRefreshed); + afterRefreshed = true; + } + } + TestRefreshListener listener = new TestRefreshListener(); + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy()); + config.addRefreshListener(listener); + try (Engine engine = new ShadowEngine(config)) { + assertFalse(listener.beforeRefreshed); + assertFalse(listener.afterRefreshed); + engine.refresh("test"); + assertTrue(listener.beforeRefreshed); + assertTrue(listener.afterRefreshed); + } + } } diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 2b780e7bc4ee2..79c4dbfc48d45 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -122,10 +122,9 @@ store, new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()), newMe iwc.getSimilarity(), new CodecService(null, logger), eventListener, new TranslogHandler(shardId.getIndexName(), logger), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5)); + config.addEngineCreationListener(listeners); + config.addRefreshListener(listeners); engine = new InternalEngine(config); - - // Finally, we can listen to the engine - listeners.listenTo(engine); } @After From 87ab6e60ca5ba945bf0fba84784b2bbe53506abf Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 31 May 2016 11:28:30 -0400 Subject: [PATCH 57/86] Shorten lock time in RefreshListeners Also use null to represent no listeners rather than an empty list. This saves allocating a new ArrayList every refresh cycle on every index. --- .../index/shard/RefreshListeners.java | 62 ++++++++++++++----- .../index/shard/RefreshListenersTests.java | 2 +- 2 files changed, 46 insertions(+), 18 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java index 096042b54dedc..d9bafa6035bd2 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java +++ b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java @@ -44,9 +44,11 @@ final class RefreshListeners implements EngineCreationListener, ReferenceManager private final Executor listenerExecutor; /** - * List of refresh listeners. Built new any time any entries are removed from it. Always modified while synchronized on {@code this}. + * List of refresh listeners. Defaults to null and built on demand because most refresh cycles won't need it. Entries are never removed + * from it, rather, it is nulled and rebuilt when needed again. The (hopefully) rare entries that didn't make the current refresh cycle + * are just added back to the new list. Both the reference and the contents are always modified while synchronized on {@code this}. */ - private volatile List>> refreshListeners = new ArrayList<>(); + private volatile List>> refreshListeners = null; /** * The translog location that was last made visible by a refresh. */ @@ -68,15 +70,18 @@ public RefreshListeners(IntSupplier getMaxRefreshListeners, Runnable forceRefres */ public void addOrNotify(Translog.Location location, Consumer listener) { requireNonNull(listener, "listener cannot be null"); - Translog.Location listenerLocation = requireNonNull(location, "location cannot be null"); + requireNonNull(location, "location cannot be null"); Translog.Location lastRefresh = lastRefreshedLocation; - if (lastRefresh != null && lastRefresh.compareTo(listenerLocation) >= 0) { + if (lastRefresh != null && lastRefresh.compareTo(location) >= 0) { // Location already visible, just call the listener listener.accept(false); return; } synchronized (this) { + if (refreshListeners == null) { + refreshListeners = new ArrayList<>(); + } if (refreshListeners.size() < getMaxRefreshListeners.getAsInt()) { // We have a free slot so register the listener refreshListeners.add(new Tuple<>(location, listener)); @@ -122,23 +127,46 @@ public void afterRefresh(boolean didRefresh) throws IOException { */ lastRefreshedLocation = currentRefreshLocation; /* - * Now pop all listeners off the front of refreshListeners that are ready to be called. The listeners won't always be in order - * but they should be pretty close because you don't listen to times super far in the future. This prevents us from having to - * iterate over the whole queue on every refresh at the cost of some requests having to wait an extra cycle if they get stuck - * behind a request that missed the refresh cycle. + * Grab the current refresh listeners and replace them with a new list while synchronized. Any listeners that come in after this + * won't be in the list we iterate over and very likely won't be candidates for refresh anyway because we've already moved the + * lastRefreshedLocation. */ - List>> newRefreshListeners = new ArrayList<>(); + List>> candidates; synchronized (this) { - for (Tuple> tuple : refreshListeners) { - Translog.Location location = tuple.v1(); - if (location.compareTo(currentRefreshLocation) > 0) { - newRefreshListeners.add(tuple); - } else { - Consumer listener = tuple.v2(); - listenerExecutor.execute(() -> listener.accept(false)); + candidates = refreshListeners; + // No listeners to check so just bail early + if (candidates == null) { + return; + } + refreshListeners = null; + } + /* + * Iterate the list of listeners, preserving the ones that we couldn't fire in a new list. We expect to fire most of them so this + * copying should be minimial. Much less overhead than removing all of the fired ones from the list. + */ + List>> preservedListeners = null; + for (Tuple> tuple : candidates) { + Translog.Location location = tuple.v1(); + Consumer listener = tuple.v2(); + if (location.compareTo(currentRefreshLocation) <= 0) { + listenerExecutor.execute(() -> listener.accept(false)); + } else { + if (preservedListeners == null) { + preservedListeners = new ArrayList<>(); + } + preservedListeners.add(tuple); + } + } + /* + * Now add any preserved listeners back to the running list of refresh listeners. We'll try them next time. + */ + if (preservedListeners != null) { + synchronized (this) { + if (refreshListeners == null) { + refreshListeners = new ArrayList<>(); } + refreshListeners.addAll(preservedListeners); } - refreshListeners = newRefreshListeners; } } } diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 79c4dbfc48d45..78161e5879e1f 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -154,7 +154,7 @@ public void testTooMany() throws Exception { listeners.addOrNotify(index.getTranslogLocation(), forcingListener); assertTrue("Forced listener wasn't forced?", forcingListener.forcedRefresh.get()); - // That forces all the listeners through. On the listener thread pool so give them some time with assertBusy. + // That forces all the listeners through. It would be on the listener ThreadPool but we've made all of those execute immediately. for (DummyRefreshListener listener : nonForcedListeners) { assertEquals("Expected listener called with unforced refresh!", Boolean.FALSE, listener.forcedRefresh.get()); } From 2f579f89b4867a880396f2e7fcffc508449ff2de Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 31 May 2016 12:19:05 -0400 Subject: [PATCH 58/86] Clean up registration of RefreshListeners --- .../index/engine/EngineConfig.java | 56 +++------------ .../index/engine/InternalEngine.java | 9 +-- .../index/engine/ShadowEngine.java | 10 +-- .../elasticsearch/index/shard/IndexShard.java | 28 +++++--- .../index/shard/RefreshListeners.java | 12 ++-- .../index/shard/ShadowIndexShard.java | 5 +- .../index/engine/InternalEngineTests.java | 68 ++++--------------- .../index/engine/ShadowEngineTests.java | 65 +++++------------- .../index/shard/RefreshListenersTests.java | 4 +- 9 files changed, 74 insertions(+), 183 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index a499886a1215f..13408408e7ec2 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -24,8 +24,8 @@ import org.apache.lucene.index.SnapshotDeletionPolicy; import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; -import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.similarities.Similarity; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -33,6 +33,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.codec.CodecService; +import org.elasticsearch.index.shard.RefreshListeners; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.TranslogRecoveryPerformer; import org.elasticsearch.index.store.Store; @@ -40,11 +41,6 @@ import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.threadpool.ThreadPool; -import java.util.ArrayList; -import java.util.List; - -import static java.util.Collections.unmodifiableList; - /* * Holds all the configuration that is used to create an {@link Engine}. * Once {@link Engine} has been created with this object, changes to this @@ -69,14 +65,8 @@ public final class EngineConfig { private final Engine.EventListener eventListener; private final QueryCache queryCache; private final QueryCachingPolicy queryCachingPolicy; - /** - * List of listeners for the engine to be created. - */ - private final List creationListeners = new ArrayList<>(); - /** - * List of listeners for the searcher being refreshed. - */ - private final List refreshListeners = new ArrayList<>(); + @Nullable + private final RefreshListeners refreshListeners; /** * Index setting to change the low level lucene codec used for writing new segments. @@ -110,7 +100,7 @@ public EngineConfig(OpenMode openMode, ShardId shardId, ThreadPool threadPool, MergePolicy mergePolicy,Analyzer analyzer, Similarity similarity, CodecService codecService, Engine.EventListener eventListener, TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, - TranslogConfig translogConfig, TimeValue flushMergesAfter) { + TranslogConfig translogConfig, TimeValue flushMergesAfter, RefreshListeners refreshListeners) { if (openMode == null) { throw new IllegalArgumentException("openMode must not be null"); } @@ -136,6 +126,7 @@ public EngineConfig(OpenMode openMode, ShardId shardId, ThreadPool threadPool, this.translogConfig = translogConfig; this.flushMergesAfter = flushMergesAfter; this.openMode = openMode; + this.refreshListeners = refreshListeners; } /** @@ -300,34 +291,6 @@ public OpenMode getOpenMode() { return openMode; } - /** - * Add a listener for engine creation. - */ - public void addEngineCreationListener(EngineCreationListener listener) { - creationListeners.add(listener); - } - - /** - * List of listeners for this engine. - */ - public List getEngineCreationListeners() { - return unmodifiableList(creationListeners); - } - - /** - * Add a refresh listener that will be registered with the index on creation. - */ - public void addRefreshListener(ReferenceManager.RefreshListener listener) { - refreshListeners.add(listener); - } - - /** - * List of refresh listeners for the searcher's refresh cycle. - */ - public List getRefreshListeners() { - return unmodifiableList(refreshListeners); - } - /** * Engine open mode defines how the engine should be opened or in other words what the engine should expect * to recover from. We either create a brand new engine with a new index and translog or we recover from an existing index. @@ -342,7 +305,10 @@ public enum OpenMode { OPEN_INDEX_AND_TRANSLOG; } - public interface EngineCreationListener { - void engineCreated(Engine engine); + /** + * {@linkplain RefreshListeners} instance to configure. + */ + public RefreshListeners getRefreshListeners() { + return refreshListeners; } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 474f41d4536aa..05fe848cc122a 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -32,7 +32,6 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.SearcherFactory; import org.apache.lucene.search.SearcherManager; import org.apache.lucene.store.AlreadyClosedException; @@ -169,12 +168,8 @@ public InternalEngine(EngineConfig engineConfig) throws EngineException { } } } - for (EngineConfig.EngineCreationListener listener : engineConfig.getEngineCreationListeners()) { - listener.engineCreated(this); - } - for (ReferenceManager.RefreshListener listener : engineConfig.getRefreshListeners()) { - searcherManager.addListener(listener); - } + searcherManager.addListener(engineConfig.getRefreshListeners()); + engineConfig.getRefreshListeners().setTranslog(translog); logger.trace("created new InternalEngine"); } diff --git a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index 6933e58e64243..0a55803a5ecb2 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -22,7 +22,6 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.SegmentInfos; -import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.SearcherFactory; import org.apache.lucene.search.SearcherManager; import org.apache.lucene.store.AlreadyClosedException; @@ -68,6 +67,9 @@ public class ShadowEngine extends Engine { public ShadowEngine(EngineConfig engineConfig) { super(engineConfig); + if (engineConfig.getRefreshListeners() != null) { + throw new IllegalArgumentException("ShadowEngine doesn't support RefreshListeners"); + } SearcherFactory searcherFactory = new EngineSearcherFactory(engineConfig); final long nonexistentRetryTime = engineConfig.getIndexSettings().getSettings() .getAsTime(NONEXISTENT_INDEX_RETRY_WAIT, DEFAULT_NONEXISTENT_INDEX_RETRY_WAIT) @@ -99,12 +101,6 @@ public ShadowEngine(EngineConfig engineConfig) { } catch (IOException ex) { throw new EngineCreationFailureException(shardId, "failed to open index reader", ex); } - for (EngineConfig.EngineCreationListener listener : engineConfig.getEngineCreationListeners()) { - listener.engineCreated(this); - } - for (ReferenceManager.RefreshListener listener : engineConfig.getRefreshListeners()) { - searcherManager.addListener(listener); - } logger.trace("created new ShadowEngine"); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 9a7cf10a7fe61..d5dabd8245bab 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -194,6 +194,11 @@ public class IndexShard extends AbstractIndexShardComponent { * IndexingMemoryController}). */ private final AtomicBoolean active = new AtomicBoolean(); + /** + * Allows for the registration of listeners that are called when a change becomes visible for search. This is nullable because + * {@linkplain ShadowIndexShard} doesn't support this. + */ + @Nullable private final RefreshListeners refreshListeners; public IndexShard(ShardId shardId, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache, @@ -245,10 +250,7 @@ public IndexShard(ShardId shardId, IndexSettings indexSettings, ShardPath path, suspendableRefContainer = new SuspendableRefContainer(); searcherWrapper = indexSearcherWrapper; primaryTerm = indexSettings.getIndexMetaData().primaryTerm(shardId.id()); - refreshListeners = new RefreshListeners( - indexSettings::getMaxRefreshListeners, - () -> refresh("too_many_listeners"), - fire -> threadPool.executor(ThreadPool.Names.LISTENER).execute(fire)); + refreshListeners = buildRefreshListeners(); } public Store store() { @@ -920,7 +922,6 @@ private void internalPerformTranslogRecovery(boolean skipTranslogRecovery, boole // we disable deletes since we allow for operations to be executed against the shard while recovering // but we need to make sure we don't loose deletes until we are done recovering config.setEnableGcDeletes(false); - setupRefreshListeners(config); Engine newEngine = createNewEngine(config); verifyNotClosed(); if (openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { @@ -1374,11 +1375,6 @@ protected Engine newEngine(EngineConfig config) { return engineFactory.newReadWriteEngine(config); } - protected void setupRefreshListeners(EngineConfig config) { - config.addEngineCreationListener(refreshListeners); - config.addRefreshListener(refreshListeners); - } - /** * Returns true iff this shard allows primary promotion, otherwise false */ @@ -1421,7 +1417,7 @@ private final EngineConfig newEngineConfig(EngineConfig.OpenMode openMode, Trans return new EngineConfig(openMode, shardId, threadPool, indexSettings, warmer, store, deletionPolicy, indexSettings.getMergePolicy(), mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig, - IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings())); + IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), refreshListeners); } public Releasable acquirePrimaryOperationLock() { @@ -1517,6 +1513,16 @@ public void onAfter() { return false; } + /** + * Build {@linkplain RefreshListeners} for this shard. Protected so {@linkplain ShadowIndexShard} can override it to return null. + */ + protected RefreshListeners buildRefreshListeners() { + return new RefreshListeners( + indexSettings::getMaxRefreshListeners, + () -> refresh("too_many_listeners"), + threadPool.executor(ThreadPool.Names.LISTENER)::execute); + } + /** * Simple struct encapsulating a shard failure * diff --git a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java index d9bafa6035bd2..b6bd3961694ed 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java +++ b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java @@ -21,8 +21,6 @@ import org.apache.lucene.search.ReferenceManager; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.engine.EngineConfig.EngineCreationListener; import org.elasticsearch.index.translog.Translog; import java.io.IOException; @@ -38,7 +36,7 @@ * Allows for the registration of listeners that are called when a change becomes visible for search. This functionality is exposed from * {@link IndexShard} but kept here so it can be tested without standing up the entire thing. */ -final class RefreshListeners implements EngineCreationListener, ReferenceManager.RefreshListener { +public final class RefreshListeners implements ReferenceManager.RefreshListener { private final IntSupplier getMaxRefreshListeners; private final Runnable forceRefresh; private final Executor listenerExecutor; @@ -93,9 +91,11 @@ public void addOrNotify(Translog.Location location, Consumer listener) listener.accept(true); } - @Override - public void engineCreated(Engine engine) { - translog = engine.getTranslog(); + /** + * Setup the translog used to find the last refreshed location. + */ + public void setTranslog(Translog translog) { + this.translog = translog; } // Implementation of ReferenceManager.RefreshListener that adapts Lucene's RefreshListener into Elasticsearch's refresh listeners. diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java index 9e939f9cf9cd4..c05b66ee242dd 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java @@ -89,8 +89,9 @@ protected Engine newEngine(EngineConfig config) { } @Override - protected void setupRefreshListeners(EngineConfig config) { - // Intentionally not setting them up because the shadow replica doesn't have a Translog so it can't support RefreshListeners. + protected RefreshListeners buildRefreshListeners() { + // ShadowEngine doesn't have a translog so it shouldn't try to support RefreshListeners. + return null; } @Override diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 60366533acbfc..18784ba29aa56 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -43,7 +43,6 @@ import org.apache.lucene.index.TieredMergePolicy; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.AlreadyClosedException; @@ -85,6 +84,7 @@ import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.mapper.object.RootObjectMapper; import org.elasticsearch.index.shard.IndexSearcherWrapper; +import org.elasticsearch.index.shard.RefreshListeners; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardUtils; import org.elasticsearch.index.shard.TranslogRecoveryPerformer; @@ -202,7 +202,7 @@ public EngineConfig copy(EngineConfig config, EngineConfig.OpenMode openMode) { return new EngineConfig(openMode, config.getShardId(), config.getThreadPool(), config.getIndexSettings(), config.getWarmer(), config.getStore(), config.getDeletionPolicy(), config.getMergePolicy(), config.getAnalyzer(), config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getTranslogRecoveryPerformer(), config.getQueryCache(), - config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter()); + config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getRefreshListeners()); } @Override @@ -293,14 +293,17 @@ public EngineConfig config(IndexSettings indexSettings, Store store, Path transl } catch (IOException e) { throw new ElasticsearchException("can't find index?", e); } - EngineConfig config = new EngineConfig(openMode, shardId, threadPool, indexSettings - , null, store, createSnapshotDeletionPolicy(), mergePolicy, - iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), new Engine.EventListener() { + Engine.EventListener listener = new Engine.EventListener() { @Override public void onFailedEngine(String reason, @Nullable Throwable t) { // we don't need to notify anybody in this test } - }, new TranslogHandler(shardId.getIndexName(), logger), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5)); + }; + RefreshListeners refreshListeners = new RefreshListeners(() -> 1, () -> {throw new UnsupportedOperationException();}, r -> r.run()); + EngineConfig config = new EngineConfig(openMode, shardId, threadPool, indexSettings, null, store, createSnapshotDeletionPolicy(), + mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), listener, + new TranslogHandler(shardId.getIndexName(), logger), IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), refreshListeners); return config; } @@ -1997,10 +2000,11 @@ public void testRecoverFromForeignTranslog() throws IOException { /* create a TranslogConfig that has been created with a different UUID */ TranslogConfig translogConfig = new TranslogConfig(shardId, translog.location(), config.getIndexSettings(), BigArrays.NON_RECYCLING_INSTANCE); - EngineConfig brokenConfig = new EngineConfig(EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, shardId, threadPool, config.getIndexSettings() - , null, store, createSnapshotDeletionPolicy(), newMergePolicy(), - config.getAnalyzer(), config.getSimilarity(), new CodecService(null, logger), config.getEventListener() - , config.getTranslogRecoveryPerformer(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5)); + EngineConfig brokenConfig = new EngineConfig(EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, shardId, threadPool, + config.getIndexSettings(), null, store, createSnapshotDeletionPolicy(), newMergePolicy(), config.getAnalyzer(), + config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getTranslogRecoveryPerformer(), + IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, + TimeValue.timeValueMinutes(5), config.getRefreshListeners()); try { InternalEngine internalEngine = new InternalEngine(brokenConfig); @@ -2115,48 +2119,4 @@ public void testCurrentTranslogIDisCommitted() throws IOException { } } } - - public void testCreationListener() throws IOException { - AtomicReference listener = new AtomicReference<>(); - try (Store store = createStore()) { - EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy()); - config.addEngineCreationListener(listener::set); - try (Engine created = new InternalEngine(config)) { - assertSame(created, listener.get()); - } - } - } - - public void testRefreshListener() throws IOException { - class TestRefreshListener implements ReferenceManager.RefreshListener { - boolean beforeRefreshed = false; - boolean afterRefreshed = false; - - @Override - public void beforeRefresh() throws IOException { - assertFalse(beforeRefreshed); - assertFalse(afterRefreshed); - beforeRefreshed = true; - } - - @Override - public void afterRefresh(boolean didRefresh) throws IOException { - assertTrue(beforeRefreshed); - assertFalse(afterRefreshed); - afterRefreshed = true; - } - } - TestRefreshListener listener = new TestRefreshListener(); - try (Store store = createStore()) { - EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy()); - config.addRefreshListener(listener); - try (Engine engine = new InternalEngine(config)) { - assertFalse(listener.beforeRefreshed); - assertFalse(listener.afterRefreshed); - engine.refresh("test"); - assertTrue(listener.beforeRefreshed); - assertTrue(listener.afterRefreshed); - } - } - } } diff --git a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index b171f09c3210f..ad74844ddba4a 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -32,7 +32,6 @@ import org.apache.lucene.index.SnapshotDeletionPolicy; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; @@ -54,6 +53,7 @@ import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.internal.SourceFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.shard.RefreshListeners; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardUtils; import org.elasticsearch.index.store.DirectoryService; @@ -75,7 +75,6 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -211,7 +210,7 @@ protected InternalEngine createInternalEngine(Store store, Path translogPath) { } protected ShadowEngine createShadowEngine(IndexSettings indexSettings, Store store) { - return new ShadowEngine(config(indexSettings, store, null, null)); + return new ShadowEngine(config(indexSettings, store, null, null, null)); } protected InternalEngine createInternalEngine(IndexSettings indexSettings, Store store, Path translogPath) { @@ -219,11 +218,12 @@ protected InternalEngine createInternalEngine(IndexSettings indexSettings, Store } protected InternalEngine createInternalEngine(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy) { - EngineConfig config = config(indexSettings, store, translogPath, mergePolicy); + EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, new RefreshListeners(null, null, null)); return new InternalEngine(config); } - public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy) { + public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, + RefreshListeners refreshListeners) { IndexWriterConfig iwc = newIndexWriterConfig(); final EngineConfig.OpenMode openMode; try { @@ -235,14 +235,17 @@ public EngineConfig config(IndexSettings indexSettings, Store store, Path transl } catch (IOException e) { throw new ElasticsearchException("can't find index?", e); } - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); - EngineConfig config = new EngineConfig(openMode, shardId, threadPool, indexSettings - , null, store, createSnapshotDeletionPolicy(), mergePolicy, - iwc.getAnalyzer(), iwc.getSimilarity() , new CodecService(null, logger), new Engine.EventListener() { + Engine.EventListener eventListener = new Engine.EventListener() { @Override public void onFailedEngine(String reason, @Nullable Throwable t) { // we don't need to notify anybody in this test - }}, null, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5)); + } + }; + TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); + EngineConfig config = new EngineConfig(openMode, shardId, threadPool, indexSettings, null, store, createSnapshotDeletionPolicy(), + mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), eventListener, null, + IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, + TimeValue.timeValueMinutes(5), refreshListeners); return config; } @@ -983,43 +986,9 @@ public void testNoTranslog() { } } - public void testCreationListener() throws IOException { - AtomicReference listener = new AtomicReference<>(); - EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy()); - config.addEngineCreationListener(listener::set); - try (Engine created = new ShadowEngine(config)) { - assertSame(created, listener.get()); - } - } - - public void testRefreshListener() throws IOException { - class TestRefreshListener implements ReferenceManager.RefreshListener { - boolean beforeRefreshed = false; - boolean afterRefreshed = false; - - @Override - public void beforeRefresh() throws IOException { - assertFalse(beforeRefreshed); - assertFalse(afterRefreshed); - beforeRefreshed = true; - } - - @Override - public void afterRefresh(boolean didRefresh) throws IOException { - assertTrue(beforeRefreshed); - assertFalse(afterRefreshed); - afterRefreshed = true; - } - } - TestRefreshListener listener = new TestRefreshListener(); - EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy()); - config.addRefreshListener(listener); - try (Engine engine = new ShadowEngine(config)) { - assertFalse(listener.beforeRefreshed); - assertFalse(listener.afterRefreshed); - engine.refresh("test"); - assertTrue(listener.beforeRefreshed); - assertTrue(listener.afterRefreshed); - } + public void testRefreshListenersFails() throws IOException { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), new RefreshListeners(null, null, null)); + Exception e = expectThrows(IllegalArgumentException.class, () -> new ShadowEngine(config)); + assertEquals("ShadowEngine doesn't support RefreshListeners", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 78161e5879e1f..9f7d10720a41b 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -121,9 +121,7 @@ public void onFailedEngine(String reason, @Nullable Throwable t) { store, new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()), newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), eventListener, new TranslogHandler(shardId.getIndexName(), logger), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, - TimeValue.timeValueMinutes(5)); - config.addEngineCreationListener(listeners); - config.addRefreshListener(listeners); + TimeValue.timeValueMinutes(5), listeners); engine = new InternalEngine(config); } From bb2739202e084703baf02cfa58f09517598cf14e Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 31 May 2016 13:08:08 -0400 Subject: [PATCH 59/86] Remove duplication in WritePrimaryResult and WriteReplicaResult --- .../replication/TransportWriteAction.java | 106 +++++++++--------- 1 file changed, 52 insertions(+), 54 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 29101c5d87b1b..fded7f3614bc1 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -37,6 +37,7 @@ import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; +import java.util.function.Consumer; import java.util.function.Supplier; /** @@ -117,34 +118,16 @@ public WritePrimaryResult(Request request, Response finalResponse, @Nullable Translog.Location location, IndexShard indexShard) { super(request, finalResponse); - boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; - if (fsyncTranslog) { - indexShard.sync(location); - } - indexShard.maybeFlush(); - switch (request.getRefreshPolicy()) { - case IMMEDIATE: - indexShard.refresh("refresh_flag_index"); - finalResponse.setForcedRefresh(true); - break; - case WAIT_UNTIL: - if (location != null) { - refreshPending = true; - indexShard.addRefreshListener(location, forcedRefresh -> { - synchronized (WritePrimaryResult.this) { - if (forcedRefresh) { - finalResponse.setForcedRefresh(true); - logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); - } - refreshPending = false; - respondIfPossible(); - } - }); + refreshPending = finishWrite(indexShard, request, location, forcedRefresh -> { + synchronized (WritePrimaryResult.this) { + if (forcedRefresh) { + finalResponse.setForcedRefresh(true); + logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + } + refreshPending = false; + respondIfPossible(); } - break; - case NONE: - break; - } + }); } @Override @@ -166,9 +149,9 @@ protected void respondIfPossible() { private class WriteReplicaResult extends ReplicaResult { private final IndexShard indexShard; private final ReplicatedWriteRequest request; - private final Translog.Location location; + private final Translog.Location location; - public WriteReplicaResult(IndexShard indexShard, ReplicatedWriteRequest request, Location location) { + public WriteReplicaResult(IndexShard indexShard, ReplicatedWriteRequest request, Translog.Location location) { this.indexShard = indexShard; this.request = request; this.location = location; @@ -176,34 +159,49 @@ public WriteReplicaResult(IndexShard indexShard, ReplicatedWriteRequest reque @Override public void respond(ActionListener listener) { - // NOCOMMIT deduplicate with the WritePrimaryResult - boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; - if (fsyncTranslog) { - indexShard.sync(location); - } - indexShard.maybeFlush(); - boolean forked = false; - switch (request.getRefreshPolicy()) { - case IMMEDIATE: - indexShard.refresh("refresh_flag_index"); - break; - case WAIT_UNTIL: - if (location != null) { - forked = true; - indexShard.addRefreshListener(location, forcedRefresh -> { - if (forcedRefresh) { - logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); - } - listener.onResponse(TransportResponse.Empty.INSTANCE); - }); + boolean refreshPending = finishWrite(indexShard, request, location, forcedRefresh -> { + if (forcedRefresh) { + logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); } - break; - case NONE: - break; - } - if (false == forked) { listener.onResponse(TransportResponse.Empty.INSTANCE); + }); + if (false == refreshPending) { + listener.onResponse(TransportResponse.Empty.INSTANCE); + } + } + } + + /** + * Finish up the write by syncing the translog, flushing, and refreshing or waiting for a refresh. Called on both the primary and the + * replica. + * + * @param refreshListener used to signal that a refresh has made this change visible (see + * {@link IndexShard#addRefreshListener(Location, Consumer)}). Only called if this method returns true. Otherwise you shouldn't + * wait for it because it'll never be called. + * @return true if this request should wait for the refreshListener to be called or false if this method only took synchronous actions + * and won't call the listener at all + */ + private static boolean finishWrite(IndexShard indexShard, ReplicatedWriteRequest request, Translog.Location location, + Consumer refreshListener) { + boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; + if (fsyncTranslog) { + indexShard.sync(location); + } + indexShard.maybeFlush(); + boolean async = false; + switch (request.getRefreshPolicy()) { + case IMMEDIATE: + indexShard.refresh("refresh_flag_index"); + break; + case WAIT_UNTIL: + if (location != null) { + async = true; + indexShard.addRefreshListener(location, refreshListener); } + break; + case NONE: + break; } + return async; } } From 43ce50a1de250a9e073a2ca6cbf55c1b4c74b11b Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 31 May 2016 14:02:56 -0400 Subject: [PATCH 60/86] Delay translog sync and flush until after refresh The sync might have occurred for us during the refresh so we have less work to do. Maybe. --- .../replication/TransportWriteAction.java | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index fded7f3614bc1..5caa5ba994564 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -184,24 +184,32 @@ public void respond(ActionListener listener) { private static boolean finishWrite(IndexShard indexShard, ReplicatedWriteRequest request, Translog.Location location, Consumer refreshListener) { boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; - if (fsyncTranslog) { - indexShard.sync(location); - } - indexShard.maybeFlush(); - boolean async = false; + boolean refreshPending = false; switch (request.getRefreshPolicy()) { case IMMEDIATE: indexShard.refresh("refresh_flag_index"); break; case WAIT_UNTIL: if (location != null) { - async = true; - indexShard.addRefreshListener(location, refreshListener); + refreshPending = true; + indexShard.addRefreshListener(location, forcedRefresh -> { + if (fsyncTranslog) { + indexShard.sync(location); + } + indexShard.maybeFlush(); + refreshListener.accept(forcedRefresh); + }); } break; case NONE: break; } - return async; + if (false == refreshPending) { + if (fsyncTranslog) { + indexShard.sync(location); + } + indexShard.maybeFlush(); + } + return refreshPending; } } From 5797d1b1c4d233c0db918c0d08c21731ddccd05e Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 31 May 2016 15:02:34 -0400 Subject: [PATCH 61/86] Fix forced_refresh flag It wasn't being set. --- .../replication/TransportWriteAction.java | 148 +++++++++++------- .../resources/rest-api-spec/api/bulk.json | 10 +- .../resources/rest-api-spec/api/delete.json | 10 +- .../resources/rest-api-spec/api/index.json | 10 +- .../resources/rest-api-spec/api/update.json | 10 +- ..._refresh.yaml => 50_wait_for_refresh.yaml} | 4 +- .../rest-api-spec/test/create/60_refresh.yaml | 3 +- .../rest-api-spec/test/delete/50_refresh.yaml | 41 ++++- .../test/delete/55_block_until_refresh.yaml | 34 ---- .../rest-api-spec/test/index/60_refresh.yaml | 22 ++- .../test/index/65_block_until_refresh.yaml | 18 --- .../rest-api-spec/test/update/60_refresh.yaml | 39 ++++- .../test/update/65_block_until_refresh.yaml | 36 ----- 13 files changed, 204 insertions(+), 181 deletions(-) rename rest-api-spec/src/main/resources/rest-api-spec/test/bulk/{50_block_until_refresh.yaml => 50_wait_for_refresh.yaml} (77%) delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/delete/55_block_until_refresh.yaml delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/index/65_block_until_refresh.yaml delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/update/65_block_until_refresh.yaml diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 5caa5ba994564..2abb781b48f33 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -21,6 +21,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -37,7 +38,6 @@ import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; -import java.util.function.Consumer; import java.util.function.Supplier; /** @@ -110,24 +110,17 @@ public Translog.Location getLocation() { } } - private class WritePrimaryResult extends PrimaryResult { - volatile boolean refreshPending; + private class WritePrimaryResult extends PrimaryResult implements RespondingWriteResult { + private final ReplicatedWriteRequest request; + volatile boolean finishedWrite; volatile ActionListener listener = null; public WritePrimaryResult(Request request, Response finalResponse, @Nullable Translog.Location location, IndexShard indexShard) { super(request, finalResponse); - refreshPending = finishWrite(indexShard, request, location, forcedRefresh -> { - synchronized (WritePrimaryResult.this) { - if (forcedRefresh) { - finalResponse.setForcedRefresh(true); - logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); - } - refreshPending = false; - respondIfPossible(); - } - }); + this.request = request; + finishWrite(indexShard, request, location); } @Override @@ -140,16 +133,33 @@ public synchronized void respond(ActionListener listener) { * Respond if the refresh has occurred and the listener is ready. Always called while synchronized on {@code this}. */ protected void respondIfPossible() { - if (refreshPending == false && listener != null) { + if (finishedWrite && listener != null) { super.respond(listener); } } + + @Override + public void forcedRefresh() { + finalResponse.setForcedRefresh(true); + } + + @Override + public void waitForRefreshForcedRefresh() { + logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + } + + @Override + public synchronized void finished() { + finishedWrite = true; + respondIfPossible(); + } } - private class WriteReplicaResult extends ReplicaResult { + private class WriteReplicaResult extends ReplicaResult implements RespondingWriteResult { private final IndexShard indexShard; private final ReplicatedWriteRequest request; private final Translog.Location location; + private ActionListener listener; public WriteReplicaResult(IndexShard indexShard, ReplicatedWriteRequest request, Translog.Location location) { this.indexShard = indexShard; @@ -159,57 +169,81 @@ public WriteReplicaResult(IndexShard indexShard, ReplicatedWriteRequest reque @Override public void respond(ActionListener listener) { - boolean refreshPending = finishWrite(indexShard, request, location, forcedRefresh -> { - if (forcedRefresh) { - logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); - } - listener.onResponse(TransportResponse.Empty.INSTANCE); - }); - if (false == refreshPending) { - listener.onResponse(TransportResponse.Empty.INSTANCE); - } + this.listener = listener; + finishWrite(indexShard, request, location); + } + + @Override + public void forcedRefresh() { + // We don't have anywhere to store this so we just throw it on the floor for now. + } + + @Override + public void waitForRefreshForcedRefresh() { + logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + } + + @Override + public void finished() { + listener.onResponse(TransportResponse.Empty.INSTANCE); } } - /** - * Finish up the write by syncing the translog, flushing, and refreshing or waiting for a refresh. Called on both the primary and the - * replica. - * - * @param refreshListener used to signal that a refresh has made this change visible (see - * {@link IndexShard#addRefreshListener(Location, Consumer)}). Only called if this method returns true. Otherwise you shouldn't - * wait for it because it'll never be called. - * @return true if this request should wait for the refreshListener to be called or false if this method only took synchronous actions - * and won't call the listener at all - */ - private static boolean finishWrite(IndexShard indexShard, ReplicatedWriteRequest request, Translog.Location location, - Consumer refreshListener) { - boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; - boolean refreshPending = false; - switch (request.getRefreshPolicy()) { - case IMMEDIATE: - indexShard.refresh("refresh_flag_index"); - break; - case WAIT_UNTIL: - if (location != null) { - refreshPending = true; - indexShard.addRefreshListener(location, forcedRefresh -> { - if (fsyncTranslog) { - indexShard.sync(location); - } - indexShard.maybeFlush(); - refreshListener.accept(forcedRefresh); - }); + private interface RespondingWriteResult { + /** + * Finish up the write by syncing the translog, flushing, and refreshing or waiting for a refresh. Called on both the primary and + * the replica. + */ + default void finishWrite(IndexShard indexShard, ReplicatedWriteRequest request, Translog.Location location) { + boolean refreshPending = false; + switch (request.getRefreshPolicy()) { + case IMMEDIATE: + indexShard.refresh("refresh_flag_index"); + forcedRefresh(); + break; + case WAIT_UNTIL: + if (location != null) { + refreshPending = true; + indexShard.addRefreshListener(location, forcedRefresh -> { + if (forcedRefresh) { + forcedRefresh(); + waitForRefreshForcedRefresh(); + } + finish(indexShard, location); + }); + } + break; + case NONE: + break; + } + if (false == refreshPending) { + finish(indexShard, location); } - break; - case NONE: - break; } - if (false == refreshPending) { + + default void finish(IndexShard indexShard, Translog.Location location) { + boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; if (fsyncTranslog) { indexShard.sync(location); } indexShard.maybeFlush(); + finished(); } - return refreshPending; + + /** + * Called either when the request forces a refresh via {@link RefreshPolicy#IMMEDIATE} or when we run out of listeners slots while + * attempting to honor {@link RefreshPolicy#WAIT_UNTIL}. + */ + void forcedRefresh(); + + /** + * Called when we run out of listeners slots while attempting to honor {@link RefreshPolicy#WAIT_UNTIL}. + */ + void waitForRefreshForcedRefresh(); + + /** + * Called when we are finished waiting for a refresh. This is never called if we don't wait for a refresh. + */ + void finished(); } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json index 3d58c9ec982a8..a75daf35204e3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json @@ -16,19 +16,15 @@ } }, "params": { - "block_until_refresh": { - "type" : "boolean", - "default": false, - "description" : "Do not return from the request until the changes this request makes is visible by search" - }, "consistency": { "type" : "enum", "options" : ["one", "quorum", "all"], "description" : "Explicit write consistency setting for the operation" }, "refresh": { - "type" : "boolean", - "description" : "Refresh the index after performing the operation" + "type" : "enum", + "options": ["true", "false", "wait_for"], + "description" : "If `true` then refresh the effected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes." }, "routing": { "type" : "string", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json index 3eb81f20fbbc6..5bb0e3fed4c50 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json @@ -23,11 +23,6 @@ } }, "params": { - "block_until_refresh": { - "type" : "boolean", - "default": false, - "description" : "Do not return from the request until the changes this request makes is visible by search" - }, "consistency": { "type" : "enum", "options" : ["one", "quorum", "all"], @@ -38,8 +33,9 @@ "description" : "ID of parent document" }, "refresh": { - "type" : "boolean", - "description" : "Refresh the index after performing the operation" + "type" : "enum", + "options": ["true", "false", "wait_for"], + "description" : "If `true` then refresh the effected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes." }, "routing": { "type" : "string", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json index 1d7ab116e8c20..b7f7eeb9ef531 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json @@ -22,11 +22,6 @@ } }, "params": { - "block_until_refresh": { - "type" : "boolean", - "default": false, - "description" : "Do not return from the request until the changes this request makes is visible by search" - }, "consistency": { "type" : "enum", "options" : ["one", "quorum", "all"], @@ -43,8 +38,9 @@ "description" : "ID of the parent document" }, "refresh": { - "type" : "boolean", - "description" : "Refresh the index after performing the operation" + "type" : "enum", + "options": ["true", "false", "wait_for"], + "description" : "If `true` then refresh the effected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes." }, "routing": { "type" : "string", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index 2a318040e4e42..4a3f134301d89 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -23,11 +23,6 @@ } }, "params": { - "block_until_refresh": { - "type" : "boolean", - "default": false, - "description" : "Do not return from the request until the changes this request makes is visible by search" - }, "consistency": { "type": "enum", "options": ["one", "quorum", "all"], @@ -46,8 +41,9 @@ "description": "ID of the parent document. Is is only used for routing and when for the upsert request" }, "refresh": { - "type": "boolean", - "description": "Refresh the index after performing the operation" + "type" : "enum", + "options": ["true", "false", "wait_for"], + "description" : "If `true` then refresh the effected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes." }, "retry_on_conflict": { "type": "number", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_block_until_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_wait_for_refresh.yaml similarity index 77% rename from rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_block_until_refresh.yaml rename to rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_wait_for_refresh.yaml index 614d3ce6bd4f3..e0382c740f4f5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_block_until_refresh.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_wait_for_refresh.yaml @@ -1,8 +1,8 @@ --- -"block_until_refresh waits until changes are visible in search": +"refresh=wait_for waits until changes are visible in search": - do: bulk: - block_until_refresh: true + refresh: wait_for body: | {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id"}} {"f1": "v1", "f2": 42} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yaml index 99bfbc3cff62e..e8770a7a48f70 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yaml @@ -33,8 +33,9 @@ index: test_1 type: test id: 2 - refresh: 1 + refresh: true body: { foo: bar } + - is_true: forced_refresh - do: search: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yaml index 6cdd135b154f4..ff6a63bbe0ae5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yaml @@ -19,7 +19,7 @@ type: test id: 1 body: { foo: bar } - refresh: 1 + refresh: true # If you wonder why this document get 3 as an id instead of 2, it is because the # current routing algorithm would route 1 and 2 to the same shard while we need @@ -30,7 +30,7 @@ type: test id: 3 body: { foo: bar } - refresh: 1 + refresh: true - is_true: forced_refresh - do: @@ -62,7 +62,7 @@ index: test_1 type: test id: 3 - refresh: 1 + refresh: true # If a replica shard where doc 1 is located gets initialized at this point, doc 1 # won't be found by the following search as the shard gets automatically refreshed @@ -76,3 +76,38 @@ query: { terms: { _id: [1,3] }} - match: { hits.total: 1 } + +--- +"refresh=wait_for waits until changes are visible in search": + - do: + index: + index: test_1 + type: test + id: 1 + body: { foo: bar } + refresh: true + - is_true: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + - match: { hits.total: 1 } + + - do: + delete: + index: test_1 + type: test + id: 1 + refresh: wait_for + - is_false: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + - match: { hits.total: 0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/55_block_until_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/55_block_until_refresh.yaml deleted file mode 100644 index fb9453e414656..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/55_block_until_refresh.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- -"block_until_refresh waits until changes are visible in search": - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - refresh: true - - is_true: forced_refresh - - - do: - search: - index: test_1 - type: test - body: - query: { term: { _id: 1 }} - - match: { hits.total: 1 } - - - do: - delete: - index: test_1 - type: test - id: 1 - block_until_refresh: true - - is_false: forced_refresh - - - do: - search: - index: test_1 - type: test - body: - query: { term: { _id: 1 }} - - match: { hits.total: 0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yaml index af6ea59766fc0..1f5953876d8ea 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yaml @@ -33,8 +33,9 @@ index: test_1 type: test id: 2 - refresh: 1 + refresh: true body: { foo: bar } + - is_true: forced_refresh - do: search: @@ -44,3 +45,22 @@ query: { term: { _id: 2 }} - match: { hits.total: 1 } + +--- +"refresh=wait_for waits until changes are visible in search": + - do: + index: + index: test_1 + type: test + id: 1 + body: { foo: bar } + refresh: wait_for + - is_false: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + - match: { hits.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/65_block_until_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/65_block_until_refresh.yaml deleted file mode 100644 index dc1522dee3464..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/65_block_until_refresh.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- -"block_until_refresh waits until changes are visible in search": - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - block_until_refresh: true - - is_false: forced_refresh - - - do: - search: - index: test_1 - type: test - body: - query: { term: { _id: 1 }} - - match: { hits.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yaml index 5dc952084d781..31a880c7ab66a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yaml @@ -35,7 +35,7 @@ index: test_1 type: test id: 2 - refresh: 1 + refresh: true body: doc: { foo: baz } upsert: { foo: bar } @@ -49,3 +49,40 @@ query: { term: { _id: 2 }} - match: { hits.total: 1 } + +--- +"refresh=wait_for waits until changes are visible in search": + - do: + index: + index: test_1 + type: test + id: 1 + body: { foo: bar } + refresh: true + - is_true: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + - match: { hits.total: 1 } + + - do: + update: + index: test_1 + type: test + id: 1 + refresh: wait_for + body: + doc: { test: asdf } + - is_false: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { match: { test: asdf } } + - match: { hits.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/65_block_until_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/65_block_until_refresh.yaml deleted file mode 100644 index 02e246229e59d..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/65_block_until_refresh.yaml +++ /dev/null @@ -1,36 +0,0 @@ ---- -"block_until_refresh waits until changes are visible in search": - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - refresh: true - - is_true: forced_refresh - - - do: - search: - index: test_1 - type: test - body: - query: { term: { _id: 1 }} - - match: { hits.total: 1 } - - - do: - update: - index: test_1 - type: test - id: 1 - block_until_refresh: true - body: - doc: { test: asdf } - - is_false: forced_refresh - - - do: - search: - index: test_1 - type: test - body: - query: { match: { test: asdf } } - - match: { hits.total: 1 } From fb16d2fc7016c1e8e1621d481e8781c7ef43326c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 31 May 2016 16:14:48 -0400 Subject: [PATCH 62/86] Rewrite refresh docs --- docs/reference/docs.asciidoc | 2 +- .../docs/block-until-refresh.asciidoc | 42 ----------- docs/reference/docs/bulk.asciidoc | 18 +---- docs/reference/docs/delete.asciidoc | 16 +--- docs/reference/docs/index_.asciidoc | 19 +---- docs/reference/docs/refresh.asciidoc | 75 +++++++++++++++++++ docs/reference/docs/update.asciidoc | 14 +--- docs/reference/index-modules.asciidoc | 2 +- 8 files changed, 85 insertions(+), 103 deletions(-) delete mode 100644 docs/reference/docs/block-until-refresh.asciidoc create mode 100644 docs/reference/docs/refresh.asciidoc diff --git a/docs/reference/docs.asciidoc b/docs/reference/docs.asciidoc index 295cd95f6dae7..4e0136c05787c 100644 --- a/docs/reference/docs.asciidoc +++ b/docs/reference/docs.asciidoc @@ -43,4 +43,4 @@ include::docs/termvectors.asciidoc[] include::docs/multi-termvectors.asciidoc[] -include::docs/block-until-refresh.asciidoc[] +include::docs/refresh.asciidoc[] diff --git a/docs/reference/docs/block-until-refresh.asciidoc b/docs/reference/docs/block-until-refresh.asciidoc deleted file mode 100644 index bae90d6a38cd9..0000000000000 --- a/docs/reference/docs/block-until-refresh.asciidoc +++ /dev/null @@ -1,42 +0,0 @@ -[[block-until-refresh]] -== Block Until Refresh - -The <>, <>, <>, and -<> APIs support setting `block_until_refresh` to `true` which -will cause Elasticsearch to wait for the changes made by the request to become -visible to search before returning. The operation that makes the changes -visible is called a refresh and Elasticsearch automatically performs one on -every shard that has been modified every `index.refresh_interval` which -defaults to one second. That setting is <>. - -Refreshes can also be forced with the <> and that will also -cause any requests that blocking-until-refresh to return. Setting `refresh` to -`true` in any of the APIs that support it will also force a refresh but that -refresh typically is only on the shards affected by the request. - -=== Compared to Setting `refresh` to `true` - -All of the APIs that support `block_until_refresh` also support `refresh`, but -`refresh` is a much heavier operation, causing Elasticsearch to immediately -make all pending changes visible for search on the shards affected by the -request immediately. Frequently forcing refreshes causes Elasticsearch to make -many small segments. Searching and creating many small segments is much less -efficient than searching a single large segment. And small segments must -eventually be merged into larger segments, causing yet more work. - -On the other hand, setting `refresh` causes the request to be returned as fast -as the segment can be made visible. Setting `block_until_refresh` slots the -request into process that will be called by default every second and a second -is a long time sometimes. So the tradeoff is clear: `block_until_refresh` is -slower but puts less load on Elasticsearch. - -=== Forced a Refresh Anyway - -If a `block_until_refresh` request comes in when there are already -`index.max_refresh_listeners` (defaults to 1000) requests -blocking-until-refresh then that request will behave just as though it had -`refresh` set to `true` on it. It will force a refresh. This keeps the promise -that when a `block_until_refresh` request returns that its changes are visible -for search while preventing unchecked resource usage for blocked requests. If -a request forced a refresh because it ran out of listener slots then its -response will contain `"forced_refresh": true`. diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index c0119881cb790..58684d8025092 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -167,22 +167,8 @@ are the same). [[bulk-refresh]] === Refresh -The `refresh` parameter can be set to `true` in order to refresh the relevant -primary and replica shards immediately after the bulk operation has occurred -and make it searchable, instead of waiting for the normal refresh interval to -expire. Setting it to `true` can trigger additional load, and may slow down -indexing. Due to its costly nature, the `refresh` parameter is set on the bulk request level -and is not supported on each individual bulk item. - -[float] -[[bulk-block-until-refresh]] -=== Block Until Refresh - -If `refresh` is too heavy for your use case then you can instead set -`block_until_refresh` to `true` to wait until the operation has been made -visible for search by a refresh. This is *much* lower overhead than `refresh` -but the downside is that the request will wait for the next refresh. See -<> for more details. +Control when the changes made by this request are visible to search. See +<>. [float] [[bulk-update]] diff --git a/docs/reference/docs/delete.asciidoc b/docs/reference/docs/delete.asciidoc index f7d5fcaa22de7..6db08a97a443c 100644 --- a/docs/reference/docs/delete.asciidoc +++ b/docs/reference/docs/delete.asciidoc @@ -113,21 +113,9 @@ is the same). [[delete-refresh]] === Refresh -The `refresh` parameter can be set to `true` in order to refresh the relevant -primary and replica shards after the delete operation has occurred and make it -searchable. Setting it to `true` should be done after careful thought and -verification that this does not cause a heavy load on the system (and slows -down indexing). +Control when the changes made by this request are visible to search. See +<>. -[float] -[[delete-block-until-refresh]] -=== Block Until Refresh - -If `refresh` is too heavy for your use case then you can instead set -`block_until_refresh` to `true` to wait until the operation has been made -visible for search by a refresh. This is *much* lower overhead than `refresh` -but the downside is that the request will wait for the next refresh. See -<> for more details. [float] [[delete-timeout]] diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index b04b8b4b80e55..f997f271dfd9e 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -354,23 +354,8 @@ replication group have indexed the document (sync replication). [[index-refresh]] === Refresh -To refresh the shard (not the whole index) immediately after the operation -occurs, so that the document appears in search results immediately, the -`refresh` parameter can be set to `true`. Setting this option to `true` should -*ONLY* be done after careful thought and verification that it does not lead to -poor performance, both from an indexing and a search standpoint. Note, getting -a document using the get API is completely realtime and doesn't require a -refresh. - -[float] -[[index-block-until-refresh]] -=== Block Until Refresh - -If `refresh` is too heavy for your use case then you can instead set -`block_until_refresh` to `true` to wait until the operation has been made -visible for search by a refresh. This is *much* lower overhead than `refresh` -but the downside is that the request will wait for the next refresh. See -<> for more details. +Control when the changes made by this request are visible to search. See +<>. [float] [[index-noop]] diff --git a/docs/reference/docs/refresh.asciidoc b/docs/reference/docs/refresh.asciidoc new file mode 100644 index 0000000000000..e85751ba4f706 --- /dev/null +++ b/docs/reference/docs/refresh.asciidoc @@ -0,0 +1,75 @@ +[[docs-refresh]] +== `?refresh` + +The <>, <>, <>, and +<> APIs support setting `refresh` to control when changes made +by this request are made visible to search. These are the allowed values: + +`true`:: + +Refresh the relevant primary and replica shards (not the whole index) +immediately after the operation occurs, so that the updated document appears +in search results immediately. This should *ONLY* be done after careful thought +and verification that it does not lead to poor performance, both from an +indexing and a search standpoint. + +`wait_for`:: + +Wait for the changes made by the request to be made visible by a refresh before +replying. This doesn't force an immediate refresh, rather, it waits for a +refresh happen. Elasticsearch automatically refreshes shards that have changed +every `index.refresh_interval` which defaults to one second. That setting is +<>. The <> API will also +cause the request to return, as will setting `refresh` to `true` on any of the +APIs that support it. + +`false` (the default):: + +Take no refresh related actions. The changes made by this request will be made +visible at some point after the request returns. + +=== Choosing which setting to use + +Unless you have a good reason to wait for the change to become visible always +use `refresh=false`, or, because that is the default, just leave the `refresh` +parameter out of the URL. That is the simplest and fastest choice. + +If you absolutely must have the changes made by a request visible synchronously +with the request then you must get to pick between putting more load on +Elasticsearch (`true`) and waiting longer for the response (`wait_for`). Here +are a few points that should inform that decision: + +* The more changes being made to the index the more work `wait_for` saves +compared to `true`. In the case that the index is only changed once every +`index.refresh_interval` then it saves no work. +* `true` creates less efficient indexes constructs (tiny segments) that must +later be merged into more efficient index constructs (larger segments). Meaning +that the cost of `true` is payed at index time to create the tiny segment, at +search time to search the tiny segment, and at merge time to make the larger +segments. +* Never start multiple `refresh=wait_for` requests in a row. Instead batch them +into a single bulk request with `refresh=wait_for` and Elasticsearch will start +them all in parallel and return only when they have all finished. +* If the refresh interval is set to `-1`, disabling the automatic refreshes, +then requests with `refresh=wait_for` will wait indefinitely until some action +causes a refresh. Conversely, setting `index.refresh_interval` to something +shorter than the default like `200ms` will make `refresh=wait_for` come back +faster, but it'll still generate inefficient segments. +* `refresh=wait_for` only affects the request that it is on, but, by forcing a +refresh immediately, `refresh=true` will affect other ongoing request. In +general, if you have a running system you don't wish to disturb then +`refresh=wait_for` is a smaller modification. + +=== `refresh=wait_for` Can Force a Refresh + +If a `refresh=wait_for` request comes in when there are already +`index.max_refresh_listeners` (defaults to 1000) requests waiting for a refresh +on that shard then that request will behave just as though it had `refresh` set +to `true` instead: it will force a refresh. This keeps the promise that when a +`refresh=wait_for` request returns that its changes are visible for search +while preventing unchecked resource usage for blocked requests. If a request +forced a refresh because it ran out of listener slots then its response will +contain `"forced_refresh": true`. + +Bulk requests only take up one slot on each shard that they touch no matter how +many times they modify the shard. diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 1a0bdc59434ab..39261c5d21f22 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -235,18 +235,8 @@ The write consistency of the index/delete operation. `refresh`:: -Refresh the relevant primary and replica shards (not the whole index) -immediately after the operation occurs, so that the updated document appears -in search results immediately. This should *ONLY* be done after careful thought -and verification that it does not lead to poor performance, both from an -indexing and a search standpoint. - -`block_until_refresh`:: - -Wait to reply to the request until the primary and replica shards have been -refreshed to make this operation's changes visible for search. Use this if -`refresh` is too inefficient and you can tolerate slower responses. See -<> for more details. +Control when the changes made by this request are visible to search. See +<>. `fields`:: diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index e66c55125aeba..3cc95e5af9a3e 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -139,7 +139,7 @@ specific index module: `index.max_refresh_listeners`:: Maximum number of refresh listeners available on each shard of the index. - These listeners are used to implement <>. + These listeners are used to implement <>. [float] From 8453fc4f7850f6a02fb5971c17a942a3e3fd9f7b Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 31 May 2016 17:26:48 -0400 Subject: [PATCH 63/86] Javadoc --- .../org/elasticsearch/action/bulk/BulkRequest.java | 10 +++++++--- .../elasticsearch/action/support/WriteRequest.java | 13 +++++++++++-- .../support/replication/ReplicatedWriteRequest.java | 6 ++++++ .../support/replication/ReplicationRequest.java | 5 ++++- 4 files changed, 28 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 1770ff89c52ff..ef62290a0f348 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -58,6 +58,11 @@ public class BulkRequest extends ActionRequest implements Composite private static final int REQUEST_OVERHEAD = 50; + /** + * Requests that are part of this request. It is only possible to add things that are both {@link ActionRequest}s and + * {@link WriteRequest}s to this but java doesn't support syntax to declare that everything in the array has both types so we declare + * the one with the least casts. + */ final List> requests = new ArrayList<>(); List payloads = null; @@ -477,7 +482,7 @@ private int findNextMarker(byte marker, int from, BytesReference data, int lengt * @return Whether this bulk request contains index request with an ingest pipeline enabled. */ public boolean hasIndexRequestsWithPipelines() { - for (ActionRequest actionRequest : requests) { + for (ActionRequest actionRequest : requests) { if (actionRequest instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) actionRequest; if (Strings.hasText(indexRequest.getPipeline())) { @@ -497,8 +502,7 @@ public ActionRequestValidationException validate() { } for (ActionRequest request : requests) { // We first check if refresh has been set - if (request instanceof WriteRequest - && ((WriteRequest) request).getRefreshPolicy() != RefreshPolicy.NONE) { + if (((WriteRequest) request).getRefreshPolicy() != RefreshPolicy.NONE) { validationException = addValidationError( "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", validationException); } diff --git a/core/src/main/java/org/elasticsearch/action/support/WriteRequest.java b/core/src/main/java/org/elasticsearch/action/support/WriteRequest.java index 58cf38d68a82a..a4cfe9dc80027 100644 --- a/core/src/main/java/org/elasticsearch/action/support/WriteRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/WriteRequest.java @@ -19,16 +19,23 @@ package org.elasticsearch.action.support; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; +import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; /** - * Base class for requests that modify data in some shard like delete, index, and shardBulk. + * Interface implemented by requests that modify the documents in an index like {@link IndexRequest}, {@link UpdateRequest}, and + * {@link BulkRequest}. Rather than implement this directly most implementers should extend {@link ReplicatedWriteRequest}. */ -public interface WriteRequest> { +public interface WriteRequest> extends Streamable { /** * Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}), wait for a refresh ( * {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}, the default). @@ -52,6 +59,8 @@ default R setRefreshPolicy(String refreshPolicy) { */ RefreshPolicy getRefreshPolicy(); + ActionRequestValidationException validate(); + enum RefreshPolicy implements Writeable { /** * Don't refresh after this request. The default. diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java index 4942eb348ec6c..9fd6b5d10899c 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.support.replication; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -26,6 +28,10 @@ import java.io.IOException; +/** + * Requests that are both {@linkplain ReplicationRequests} (run on a shard's primary first, then the replica) and {@linkplain WriteRequest} + * (modify documents on a shard), for example {@link BulkShardRequest}, {@link IndexRequest}, and {@link DeleteRequest}. + */ public abstract class ReplicatedWriteRequest> extends ReplicationRequest implements WriteRequest { private RefreshPolicy refreshPolicy = RefreshPolicy.NONE; diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java index 5bd858234ff6a..44c420598b54e 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java @@ -23,6 +23,8 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.WriteConsistencyLevel; +import org.elasticsearch.action.admin.indices.refresh.TransportShardRefreshAction; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; @@ -38,7 +40,8 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; /** - * + * Requests that are run on a particular replica, first on the primary and then on the replicas like {@link IndexRequest} or + * {@link TransportShardRefreshAction}. */ public abstract class ReplicationRequest> extends ActionRequest implements IndicesRequest { From d2123b1cabf29bce8ff561d4a4c1c1d5b42bccad Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 31 May 2016 17:28:09 -0400 Subject: [PATCH 64/86] Make more stuff final --- .../action/support/replication/ReplicationOperation.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index f5781b75a60e7..8442e70525783 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -96,8 +96,8 @@ public class ReplicationOperation< void execute() throws Exception { final String writeConsistencyFailure = checkWriteConsistency ? checkWriteConsistency() : null; - ShardRouting primaryRouting = primary.routingEntry(); - ShardId primaryId = primaryRouting.shardId(); + final ShardRouting primaryRouting = primary.routingEntry(); + final ShardId primaryId = primaryRouting.shardId(); if (writeConsistencyFailure != null) { finishAsFailed(new UnavailableShardsException(primaryId, "{} Timeout: [{}], request: [{}]", writeConsistencyFailure, request.timeout(), request)); @@ -107,7 +107,7 @@ void execute() throws Exception { totalShards.incrementAndGet(); pendingShards.incrementAndGet(); primaryResult = primary.perform(request); - ReplicaRequest replicaRequest = primaryResult.replicaRequest(); + final ReplicaRequest replicaRequest = primaryResult.replicaRequest(); assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term"; if (logger.isTraceEnabled()) { logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request); @@ -116,7 +116,7 @@ void execute() throws Exception { // we have to make sure that every operation indexed into the primary after recovery start will also be replicated // to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then. // If the index gets deleted after primary operation, we skip replication - List shards = getShards(primaryId, clusterStateSupplier.get()); + final List shards = getShards(primaryId, clusterStateSupplier.get()); final String localNodeId = primary.routingEntry().currentNodeId(); for (final ShardRouting shard : shards) { if (executeOnReplicas == false || shard.unassigned()) { From 058481ad72019c0492b03a7a4ac32a48673697d3 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 31 May 2016 17:33:42 -0400 Subject: [PATCH 65/86] Fix javadoc links --- .../action/support/replication/ReplicatedWriteRequest.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java index 9fd6b5d10899c..fa02dac9e1e2d 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.support.replication; +import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; @@ -29,7 +30,7 @@ import java.io.IOException; /** - * Requests that are both {@linkplain ReplicationRequests} (run on a shard's primary first, then the replica) and {@linkplain WriteRequest} + * Requests that are both {@linkplain ReplicationRequest}s (run on a shard's primary first, then the replica) and {@linkplain WriteRequest} * (modify documents on a shard), for example {@link BulkShardRequest}, {@link IndexRequest}, and {@link DeleteRequest}. */ public abstract class ReplicatedWriteRequest> extends ReplicationRequest implements WriteRequest { From 2b771f8dabd488e056cfdc9989608d18264ddfb0 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 31 May 2016 17:39:46 -0400 Subject: [PATCH 66/86] Pull listener out into an inner class with javadoc and stuff --- .../TransportReplicationAction.java | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 6cffee514dfc0..e911341d25603 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -454,26 +454,31 @@ protected void doRun() throws Exception { try (Releasable ignored = acquireReplicaOperationLock(request.shardId(), request.primaryTerm())) { result = shardOperationOnReplica(request); } - result.respond(new ActionListener() { - @Override - public void onResponse(Empty response) { - if (logger.isTraceEnabled()) { - logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), - request); - } - setPhase(task, "finished"); - try { - channel.sendResponse(response); - } catch (Exception e) { - onFailure(e); - } - } + result.respond(new ResponseListener()); + } - @Override - public void onFailure(Throwable e) { - AsyncReplicaAction.this.onFailure(e); + /** + * Listens for the response on the replica and sends the response back to the primary. + */ + private class ResponseListener implements ActionListener { + @Override + public void onResponse(Empty response) { + if (logger.isTraceEnabled()) { + logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), + request); } - }); + setPhase(task, "finished"); + try { + channel.sendResponse(response); + } catch (Exception e) { + onFailure(e); + } + } + + @Override + public void onFailure(Throwable e) { + responseWithFailure(e); + } } } @@ -796,11 +801,6 @@ public PrimaryResult perform(Request request) throws Exception { return result; } -// @Override -// public void performAsync(AsyncStash stash, Request request, ActionListener listener) throws Exception { -// asyncShardOperationOnPrimary(stash, request, listener); -// } - @Override public ShardRouting routingEntry() { return indexShard.routingEntry(); From 74be1480d6e44af2b354ff9ea47c234d4870b6c2 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 31 May 2016 18:02:03 -0400 Subject: [PATCH 67/86] Move funny ShardInfo hack for bulk into bulk This should make it easier to understand because it is closer to where it matters.... --- .../elasticsearch/action/bulk/TransportShardBulkAction.java | 4 ++++ .../action/support/replication/TransportWriteAction.java | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 35d670961d6b0..4ad1136e668c1 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.support.replication.TransportWriteAction; +import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; @@ -327,6 +328,9 @@ private void setResponse(BulkItemRequest request, BulkItemResponse response) { request.setPrimaryResponse(response); if (response.isFailed()) { request.setIgnoreOnReplica(); + } else { + // Set the ShardInfo to 0 so we can safely send it to the replicas. We won't use it in the real response though. + response.getResponse().setShardInfo(new ShardInfo()); } } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 2abb781b48f33..55195d2434b1a 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -22,7 +22,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; -import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; @@ -96,9 +95,6 @@ public static class WriteResult { public WriteResult(Response response, @Nullable Location location) { this.response = response; this.location = location; - // Set the ShardInfo to 0 so we can safely send it to the replicas - // NOCOMMIT this seems wrong - response.setShardInfo(new ShardInfo()); } public Response getResponse() { From 6bb4e5c75e850f4a42518f06fbc955f7ec76d245 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 1 Jun 2016 13:17:44 -0400 Subject: [PATCH 68/86] Support null RefreshListeners in InternalEngine Just skip using it. --- .../java/org/elasticsearch/index/engine/InternalEngine.java | 6 ++++-- .../org/elasticsearch/index/engine/InternalEngineTests.java | 4 +--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 05fe848cc122a..7d0ef356acf64 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -157,6 +157,10 @@ public InternalEngine(EngineConfig engineConfig) throws EngineException { this.versionMap.setManager(searcherManager); // don't allow commits until we are done with recovering allowCommits.compareAndSet(true, openMode != EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG); + if (engineConfig.getRefreshListeners() != null) { + searcherManager.addListener(engineConfig.getRefreshListeners()); + engineConfig.getRefreshListeners().setTranslog(translog); + } success = true; } finally { if (success == false) { @@ -168,8 +172,6 @@ public InternalEngine(EngineConfig engineConfig) throws EngineException { } } } - searcherManager.addListener(engineConfig.getRefreshListeners()); - engineConfig.getRefreshListeners().setTranslog(translog); logger.trace("created new InternalEngine"); } diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 18784ba29aa56..c3bb61ab4a298 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -84,7 +84,6 @@ import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.mapper.object.RootObjectMapper; import org.elasticsearch.index.shard.IndexSearcherWrapper; -import org.elasticsearch.index.shard.RefreshListeners; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardUtils; import org.elasticsearch.index.shard.TranslogRecoveryPerformer; @@ -299,11 +298,10 @@ public void onFailedEngine(String reason, @Nullable Throwable t) { // we don't need to notify anybody in this test } }; - RefreshListeners refreshListeners = new RefreshListeners(() -> 1, () -> {throw new UnsupportedOperationException();}, r -> r.run()); EngineConfig config = new EngineConfig(openMode, shardId, threadPool, indexSettings, null, store, createSnapshotDeletionPolicy(), mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), listener, new TranslogHandler(shardId.getIndexName(), logger), IndexSearcher.getDefaultQueryCache(), - IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), refreshListeners); + IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), null); return config; } From 19606ec3bbe612095df45eba734c5b7eb2709c01 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 1 Jun 2016 14:09:52 -0400 Subject: [PATCH 69/86] Assert translog ordering --- .../java/org/elasticsearch/index/shard/RefreshListeners.java | 1 + 1 file changed, 1 insertion(+) diff --git a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java index b6bd3961694ed..aecb027f76b8e 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java +++ b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java @@ -125,6 +125,7 @@ public void afterRefresh(boolean didRefresh) throws IOException { * First set the lastRefreshedLocation so listeners that come in locations before that will just execute inline without messing * around with refreshListeners at all. */ + assert lastRefreshedLocation == null || currentRefreshLocation.compareTo(lastRefreshedLocation) >= 0; lastRefreshedLocation = currentRefreshLocation; /* * Grab the current refresh listeners and replace them with a new list while synchronized. Any listeners that come in after this From 4ffb7c0e954343cc1c04b3d7be2ebad66d3a016b Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 1 Jun 2016 14:27:39 -0400 Subject: [PATCH 70/86] Fire all refresh listeners in a single thread Rather than queueing a runnable each. --- .../elasticsearch/index/shard/IndexShard.java | 3 ++- .../index/shard/RefreshListeners.java | 18 ++++++++++++++++-- .../index/engine/ShadowEngineTests.java | 5 +++-- .../index/shard/RefreshListenersTests.java | 3 ++- 4 files changed, 23 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index d5dabd8245bab..26765075b26f7 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1520,7 +1520,8 @@ protected RefreshListeners buildRefreshListeners() { return new RefreshListeners( indexSettings::getMaxRefreshListeners, () -> refresh("too_many_listeners"), - threadPool.executor(ThreadPool.Names.LISTENER)::execute); + threadPool.executor(ThreadPool.Names.LISTENER)::execute, + logger); } /** diff --git a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java index aecb027f76b8e..db33c3f974f71 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java +++ b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java @@ -21,6 +21,7 @@ import org.apache.lucene.search.ReferenceManager; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.index.translog.Translog; import java.io.IOException; @@ -40,6 +41,7 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener private final IntSupplier getMaxRefreshListeners; private final Runnable forceRefresh; private final Executor listenerExecutor; + private final ESLogger logger; /** * List of refresh listeners. Defaults to null and built on demand because most refresh cycles won't need it. Entries are never removed @@ -52,10 +54,11 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener */ private volatile Translog.Location lastRefreshedLocation; - public RefreshListeners(IntSupplier getMaxRefreshListeners, Runnable forceRefresh, Executor listenerExecutor) { + public RefreshListeners(IntSupplier getMaxRefreshListeners, Runnable forceRefresh, Executor listenerExecutor, ESLogger logger) { this.getMaxRefreshListeners = getMaxRefreshListeners; this.forceRefresh = forceRefresh; this.listenerExecutor = listenerExecutor; + this.logger = logger; } /** @@ -145,12 +148,13 @@ public void afterRefresh(boolean didRefresh) throws IOException { * Iterate the list of listeners, preserving the ones that we couldn't fire in a new list. We expect to fire most of them so this * copying should be minimial. Much less overhead than removing all of the fired ones from the list. */ + List> listenersToFire = new ArrayList<>(); List>> preservedListeners = null; for (Tuple> tuple : candidates) { Translog.Location location = tuple.v1(); Consumer listener = tuple.v2(); if (location.compareTo(currentRefreshLocation) <= 0) { - listenerExecutor.execute(() -> listener.accept(false)); + listenersToFire.add(listener); } else { if (preservedListeners == null) { preservedListeners = new ArrayList<>(); @@ -169,5 +173,15 @@ public void afterRefresh(boolean didRefresh) throws IOException { refreshListeners.addAll(preservedListeners); } } + // Lastly, fire the listeners that are ready on the listener thread pool + listenerExecutor.execute(() -> { + for (Consumer listener : listenersToFire) { + try { + listener.accept(false); + } catch (Throwable t) { + logger.warn("Error firing refresh listener", t); + } + } + }); } } diff --git a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index ad74844ddba4a..3d40bb8a8ac5c 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -218,7 +218,7 @@ protected InternalEngine createInternalEngine(IndexSettings indexSettings, Store } protected InternalEngine createInternalEngine(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy) { - EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, new RefreshListeners(null, null, null)); + EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, null); return new InternalEngine(config); } @@ -987,7 +987,8 @@ public void testNoTranslog() { } public void testRefreshListenersFails() throws IOException { - EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), new RefreshListeners(null, null, null)); + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), + new RefreshListeners(null, null, null, logger)); Exception e = expectThrows(IllegalArgumentException.class, () -> new ShadowEngine(config)); assertEquals("ShadowEngine doesn't support RefreshListeners", e.getMessage()); } diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 9f7d10720a41b..cc8be3ffefa0e 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -88,7 +88,8 @@ public void setupListeners() throws Exception { () -> maxListeners, () -> engine.refresh("too-many-listeners"), // Immediately run listeners rather than adding them to the listener thread pool like IndexShard does to simplify the test. - toExecute -> toExecute.run() + Runnable::run, + logger ); // Now setup the InternalEngine which is much more complicated because we aren't mocking anything From d523b5702b60c7ba309fb0dcf3cd3a4798f11960 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 1 Jun 2016 14:34:01 -0400 Subject: [PATCH 71/86] Explain Integer.MAX_VALUE --- .../main/java/org/elasticsearch/index/translog/Translog.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index c85852f3babe5..3b7dfd26186bb 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -453,6 +453,11 @@ public Location add(Operation operation) throws IOException { */ public Location getLastWriteLocation() { try (ReleasableLock lock = readLock.acquire()) { + /* + * We use position = current - 1 and size = Integer.MAX_VALUE here instead of position current and size = 0 for two reasons: + * 1. Translog.Location's compareTo doesn't actually pay attention to size even though it's equals method does. + * 2. It feels more right to return a *position* that is before the next write's position rather than rely on the size. + */ return new Location(current.generation, current.sizeInBytes() - 1, Integer.MAX_VALUE); } } From 30f972bdaeaaa0de6fe67746cdb8628aa86f5a8c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 1 Jun 2016 17:39:05 -0400 Subject: [PATCH 72/86] Handle hanging documents If a document is added to the index during a refresh we weren't properly firing its refresh listener. This happened because the way we detect whether a refresh makes something visible or not is imperfect. It is ok because it always errs on the side of thinking that something isn't yet visible. So when a document arrives during a refresh the refresh listeners won't think it made it into a refresh when, often, it does. The way we work around this is by telling Elasticsearch that it ought to trigger a refresh if there are any pending refresh listeners even if there aren't pending documents to update. Lucene short circuits the refresh so it doesn't take that much effort, but the refresh listeners still get the signal that a refresh has come in and they still pick up the change and notify the listener. This means that the time that a listener can wait is actually slightly longer than the refresh interval. --- .../elasticsearch/index/shard/IndexShard.java | 5 +- .../index/shard/RefreshListeners.java | 65 ++++++++++++------- .../index/translog/TranslogWriter.java | 2 + ...RefreshIT.java => WaitUntilRefreshIT.java} | 43 +++++++----- .../index/shard/RefreshListenersTests.java | 3 + 5 files changed, 78 insertions(+), 40 deletions(-) rename core/src/test/java/org/elasticsearch/index/{BlockUntilRefreshIT.java => WaitUntilRefreshIT.java} (88%) diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 26765075b26f7..dd41b447de983 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1549,14 +1549,15 @@ EngineFactory getEngineFactory() { } /** - * Returns true iff one or more changes to the engine are not visible to via the current searcher. + * Returns true iff one or more changes to the engine are not visible to via the current searcher *or* there are pending + * refresh listeners. * Otherwise false. * * @throws EngineClosedException if the engine is already closed * @throws AlreadyClosedException if the internal indexwriter in the engine is already closed */ public boolean isRefreshNeeded() { - return getEngine().refreshNeeded(); + return getEngine().refreshNeeded() || (refreshListeners != null && refreshListeners.refreshNeeded()); } /** diff --git a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java index db33c3f974f71..ab3e334714af0 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java +++ b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java @@ -73,8 +73,7 @@ public void addOrNotify(Translog.Location location, Consumer listener) requireNonNull(listener, "listener cannot be null"); requireNonNull(location, "location cannot be null"); - Translog.Location lastRefresh = lastRefreshedLocation; - if (lastRefresh != null && lastRefresh.compareTo(location) >= 0) { + if (lastRefreshedLocation != null && lastRefreshedLocation.compareTo(location) >= 0) { // Location already visible, just call the listener listener.accept(false); return; @@ -94,6 +93,14 @@ public void addOrNotify(Translog.Location location, Consumer listener) listener.accept(true); } + /** + * Returns true if there are pending listeners. + */ + public boolean refreshNeeded() { + // No need to synchronize here because we're doing a single volatile read + return refreshListeners != null; + } + /** * Setup the translog used to find the last refreshed location. */ @@ -116,7 +123,11 @@ public void beforeRefresh() throws IOException { @Override public void afterRefresh(boolean didRefresh) throws IOException { - // This intentionally ignores didRefresh so a refresh call over the API can force rechecking the refreshListeners. + /* + * We intentionally ignore didRefresh here because our timing is a little off. It'd be a useful flag if we knew everything that made + * it into the refresh, but the way we snapshot the translog position before the refresh, things can sneak into the refresh that we + * don't know about. + */ if (null == currentRefreshLocation) { /* * The translog had an empty last write location at the start of the refresh so we can't alert anyone to anything. This @@ -124,15 +135,19 @@ public void afterRefresh(boolean didRefresh) throws IOException { */ return; } + // First check if we've actually moved forward. If not then just bail immediately. + assert lastRefreshedLocation == null || currentRefreshLocation.compareTo(lastRefreshedLocation) >= 0; + if (lastRefreshedLocation != null && currentRefreshLocation.compareTo(lastRefreshedLocation) == 0) { + return; + } /* - * First set the lastRefreshedLocation so listeners that come in locations before that will just execute inline without messing - * around with refreshListeners at all. + * Set the lastRefreshedLocation so listeners that come in for locations before that will just execute inline without messing + * around with refreshListeners or synchronizing at all. */ - assert lastRefreshedLocation == null || currentRefreshLocation.compareTo(lastRefreshedLocation) >= 0; lastRefreshedLocation = currentRefreshLocation; /* - * Grab the current refresh listeners and replace them with a new list while synchronized. Any listeners that come in after this - * won't be in the list we iterate over and very likely won't be candidates for refresh anyway because we've already moved the + * Grab the current refresh listeners and replace them with null while synchronized. Any listeners that come in after this won't be + * in the list we iterate over and very likely won't be candidates for refresh anyway because we've already moved the * lastRefreshedLocation. */ List>> candidates; @@ -144,16 +159,16 @@ public void afterRefresh(boolean didRefresh) throws IOException { } refreshListeners = null; } - /* - * Iterate the list of listeners, preserving the ones that we couldn't fire in a new list. We expect to fire most of them so this - * copying should be minimial. Much less overhead than removing all of the fired ones from the list. - */ - List> listenersToFire = new ArrayList<>(); + // Iterate the list of listeners, copying the listeners to fire to one list and those to preserve to another list. + List> listenersToFire = null; List>> preservedListeners = null; for (Tuple> tuple : candidates) { Translog.Location location = tuple.v1(); Consumer listener = tuple.v2(); if (location.compareTo(currentRefreshLocation) <= 0) { + if (listenersToFire == null) { + listenersToFire = new ArrayList<>(); + } listenersToFire.add(listener); } else { if (preservedListeners == null) { @@ -163,7 +178,10 @@ public void afterRefresh(boolean didRefresh) throws IOException { } } /* - * Now add any preserved listeners back to the running list of refresh listeners. We'll try them next time. + * Now add any preserved listeners back to the running list of refresh listeners while under lock. We'll try them next time. While + * we were iterating the list of listeners new listeners could have come in. That means that adding all of our preserved listeners + * might push our list of listeners above the maximum number of slots allowed. This seems unlikely because we expect few listeners + * to be preserved. And the next listener while we're full will trigger a refresh anyway. */ if (preservedListeners != null) { synchronized (this) { @@ -174,14 +192,17 @@ public void afterRefresh(boolean didRefresh) throws IOException { } } // Lastly, fire the listeners that are ready on the listener thread pool - listenerExecutor.execute(() -> { - for (Consumer listener : listenersToFire) { - try { - listener.accept(false); - } catch (Throwable t) { - logger.warn("Error firing refresh listener", t); + if (listenersToFire != null) { + final List> finalListenersToFire = listenersToFire; + listenerExecutor.execute(() -> { + for (Consumer listener : finalListenersToFire) { + try { + listener.accept(false); + } catch (Throwable t) { + logger.warn("Error firing refresh listener", t); + } } - } - }); + }); + } } } diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index e215669761c6a..ae99d2cf1c357 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -26,6 +26,7 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Channels; +import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.shard.ShardId; @@ -136,6 +137,7 @@ public synchronized Translog.Location add(BytesReference data) throws IOExceptio } totalOffset += data.length(); operationCounter++; + ESLoggerFactory.getLogger("TMP").error("Advanced position from [{}] to [{}]", offset, totalOffset); return new Translog.Location(generation, offset, data.length()); } diff --git a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java b/core/src/test/java/org/elasticsearch/index/WaitUntilRefreshIT.java similarity index 88% rename from core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java rename to core/src/test/java/org/elasticsearch/index/WaitUntilRefreshIT.java index 91a807d23d414..b2cb2d9681830 100644 --- a/core/src/test/java/org/elasticsearch/index/BlockUntilRefreshIT.java +++ b/core/src/test/java/org/elasticsearch/index/WaitUntilRefreshIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; @@ -36,6 +37,7 @@ import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.test.ESIntegTestCase; +import org.junit.Before; import java.util.Collection; import java.util.Map; @@ -43,6 +45,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singleton; +import static java.util.Collections.singletonMap; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoSearchHits; @@ -51,11 +54,21 @@ /** * Tests that requests with RefreshPolicy.WAIT_UNTIL will be visible when they return. */ -public class BlockUntilRefreshIT extends ESIntegTestCase { +public class WaitUntilRefreshIT extends ESIntegTestCase { + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(NetworkModule.HTTP_ENABLED.getKey(), true).build(); + } + @Override public Settings indexSettings() { // Use a shorter refresh interval to speed up the tests. We'll be waiting on this interval several times. - return Settings.builder().put(super.indexSettings()).put("index.refresh_interval", "100ms").build(); + return Settings.builder().put(super.indexSettings()).put("index.refresh_interval", "40ms").build(); + } + + @Before + public void createTestIndex() { + createIndex("test"); } public void testIndex() { @@ -83,21 +96,21 @@ public void testUpdate() throws InterruptedException, ExecutionException { indexRandom(true, client().prepareIndex("test", "test", "1").setSource("foo", "bar")); assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); - // Update with block_until_refresh + // Update with RefreshPolicy.WAIT_UNTIL UpdateResponse update = client().prepareUpdate("test", "test", "1").setDoc("foo", "baz").setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) .get(); assertEquals(2, update.getVersion()); assertFalse("request shouldn't have forced a refresh", update.forcedRefresh()); assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "baz")).get(), "1"); - // Upsert with block_until_refresh + // Upsert with RefreshPolicy.WAIT_UNTIL update = client().prepareUpdate("test", "test", "2").setDocAsUpsert(true).setDoc("foo", "cat") .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL).get(); assertEquals(1, update.getVersion()); assertFalse("request shouldn't have forced a refresh", update.forcedRefresh()); assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "cat")).get(), "2"); - // Update-becomes-delete with block_until_refresh + // Update-becomes-delete with RefreshPolicy.WAIT_UNTIL update = client().prepareUpdate("test", "test", "2").setScript(new Script("delete_plz", ScriptType.INLINE, "native", emptyMap())) .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL).get(); assertEquals(2, update.getVersion()); @@ -106,30 +119,28 @@ public void testUpdate() throws InterruptedException, ExecutionException { } public void testBulk() { - // Index by bulk with block_until_refresh + // Index by bulk with RefreshPolicy.WAIT_UNTIL BulkRequestBuilder bulk = client().prepareBulk().setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); bulk.add(client().prepareIndex("test", "test", "1").setSource("foo", "bar")); assertBulkSuccess(bulk.get()); assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); - // Update by bulk with block_until_refresh + // Update by bulk with RefreshPolicy.WAIT_UNTIL bulk = client().prepareBulk().setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); bulk.add(client().prepareUpdate("test", "test", "1").setDoc("foo", "baz")); assertBulkSuccess(bulk.get()); assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "baz")).get(), "1"); - // Delete by bulk with block_until_refresh + // Delete by bulk with RefreshPolicy.WAIT_UNTIL bulk = client().prepareBulk().setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); bulk.add(client().prepareDelete("test", "test", "1")); assertBulkSuccess(bulk.get()); assertNoSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get()); - // NOCOMMIT figure out why this sort of noop doesn't trigger -// -// // Update makes a noop -// bulk = client().prepareBulk().setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); -// bulk.add(client().prepareDelete("test", "test", "1")); -// assertBulkSuccess(bulk.get()); -// assertNoSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get()); + + // Update makes a noop + bulk = client().prepareBulk().setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); + bulk.add(client().prepareDelete("test", "test", "1")); + assertBulkSuccess(bulk.get()); } /** @@ -137,7 +148,7 @@ public void testBulk() { * explicit refresh if the interval is -1 because we don't have that kind of control over refresh. It can happen all on its own. */ public void testNoRefreshInterval() throws InterruptedException, ExecutionException { - client().admin().indices().prepareCreate("test").setSettings("index.refresh_interval", -1).get(); + client().admin().indices().prepareUpdateSettings("test").setSettings(singletonMap("index.refresh_interval", -1)).get(); ListenableActionFuture index = client().prepareIndex("test", "index", "1").setSource("foo", "bar") .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL).execute(); while (false == index.isDone()) { diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index cc8be3ffefa0e..a7862592954a0 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -133,6 +133,7 @@ public void tearDownListeners() throws Exception { } public void testTooMany() throws Exception { + assertFalse(listeners.refreshNeeded()); Engine.Index index = index("1"); // Fill the listener slots @@ -141,6 +142,7 @@ public void testTooMany() throws Exception { DummyRefreshListener listener = new DummyRefreshListener(); nonForcedListeners.add(listener); listeners.addOrNotify(index.getTranslogLocation(), listener); + assertTrue(listeners.refreshNeeded()); } // We shouldn't have called any of them @@ -157,6 +159,7 @@ public void testTooMany() throws Exception { for (DummyRefreshListener listener : nonForcedListeners) { assertEquals("Expected listener called with unforced refresh!", Boolean.FALSE, listener.forcedRefresh.get()); } + assertFalse(listeners.refreshNeeded()); } public void testAfterRefresh() throws Exception { From b74cf3fe778352b140355afcaa08d3d4412d749d Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 1 Jun 2016 18:27:52 -0400 Subject: [PATCH 73/86] Preserve `?refresh` behavior `?refresh` means the same things as `?refresh=true`. --- .../action/support/WriteRequest.java | 7 ++- docs/reference/docs/refresh.asciidoc | 36 +++++++++++++- .../rest-api-spec/test/bulk/50_refresh.yaml | 48 +++++++++++++++++++ .../test/bulk/50_wait_for_refresh.yaml | 15 ------ .../rest-api-spec/test/create/60_refresh.yaml | 39 +++++++++++++++ .../rest-api-spec/test/delete/50_refresh.yaml | 34 +++++++++++++ .../rest-api-spec/test/index/60_refresh.yaml | 20 ++++++++ .../rest-api-spec/test/update/60_refresh.yaml | 30 ++++++++++++ 8 files changed, 212 insertions(+), 17 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_refresh.yaml delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_wait_for_refresh.yaml diff --git a/core/src/main/java/org/elasticsearch/action/support/WriteRequest.java b/core/src/main/java/org/elasticsearch/action/support/WriteRequest.java index a4cfe9dc80027..6379a4fb259c3 100644 --- a/core/src/main/java/org/elasticsearch/action/support/WriteRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/WriteRequest.java @@ -84,12 +84,17 @@ public static RefreshPolicy parse(String string) { switch (string) { case "false": return NONE; + /* + * Empty string is IMMEDIATE because that makes "POST /test/test/1?refresh" perform a refresh which reads well and is what folks + * are used to. + */ + case "": case "true": return IMMEDIATE; case "wait_for": return WAIT_UNTIL; } - throw new IllegalArgumentException("Unknown value for refresh: [" + string + "]"); + throw new IllegalArgumentException("Unknown value for refresh: [" + string + "]."); } public static RefreshPolicy readFrom(StreamInput in) throws IOException { diff --git a/docs/reference/docs/refresh.asciidoc b/docs/reference/docs/refresh.asciidoc index e85751ba4f706..cbe2d44b5af8d 100644 --- a/docs/reference/docs/refresh.asciidoc +++ b/docs/reference/docs/refresh.asciidoc @@ -5,7 +5,7 @@ The <>, <>, <>, and <> APIs support setting `refresh` to control when changes made by this request are made visible to search. These are the allowed values: -`true`:: +Empty string or `true`:: Refresh the relevant primary and replica shards (not the whole index) immediately after the operation occurs, so that the updated document appears @@ -73,3 +73,37 @@ contain `"forced_refresh": true`. Bulk requests only take up one slot on each shard that they touch no matter how many times they modify the shard. + +=== Examples + +These will create a document and immediately refresh the index so it is visible: + +[source,json] +-------------------------------------------------- +PUT /test/test/1?refresh +{"test": "test"} +PUT /test/test/2?refresh=true +{"test": "test"} +-------------------------------------------------- +// CONSOLE + +These will create a document without doing anything to make it visible for +search: + +[source,json] +-------------------------------------------------- +PUT /test/test/3 +{"test": "test"} +PUT /test/test/4?refresh=true +{"test": "test"} +-------------------------------------------------- +// CONSOLE + +This will create a document and wait for it to become visible for search: + +[source,json] +-------------------------------------------------- +PUT /test/test/4?refresh=wait_for +{"test": "test"} +-------------------------------------------------- +// CONSOLE diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_refresh.yaml new file mode 100644 index 0000000000000..4906975bfab15 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_refresh.yaml @@ -0,0 +1,48 @@ +--- +"refresh=true immediately makes changes are visible in search": + - do: + bulk: + refresh: true + body: | + {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id"}} + {"f1": "v1", "f2": 42} + {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id2"}} + {"f1": "v2", "f2": 47} + + - do: + count: + index: test_index + - match: {count: 2} + +--- +"refresh=empty string immediately makes changes are visible in search": + - do: + bulk: + refresh: "" + body: | + {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id"}} + {"f1": "v1", "f2": 42} + {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id2"}} + {"f1": "v2", "f2": 47} + + - do: + count: + index: test_index + - match: {count: 2} + + +--- +"refresh=wait_for waits until changes are visible in search": + - do: + bulk: + refresh: wait_for + body: | + {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id"}} + {"f1": "v1", "f2": 42} + {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id2"}} + {"f1": "v2", "f2": 47} + + - do: + count: + index: test_index + - match: {count: 2} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_wait_for_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_wait_for_refresh.yaml deleted file mode 100644 index e0382c740f4f5..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_wait_for_refresh.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -"refresh=wait_for waits until changes are visible in search": - - do: - bulk: - refresh: wait_for - body: | - {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id"}} - {"f1": "v1", "f2": 42} - {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id2"}} - {"f1": "v2", "f2": 47} - - - do: - count: - index: test_index - - match: {count: 2} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yaml index e8770a7a48f70..90dc28bcfc083 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yaml @@ -45,3 +45,42 @@ query: { term: { _id: 2 }} - match: { hits.total: 1 } + +--- +"When refresh url parameter is an empty string that means \"refresh immediately\"": + - do: + create: + index: test_1 + type: test + id: 1 + refresh: "" + body: { foo: bar } + - is_true: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + + - match: { hits.total: 1 } + +--- +"refresh=wait_for waits until changes are visible in search": + - do: + index: + index: test_1 + type: test + id: 1 + body: { foo: bar } + refresh: wait_for + - is_false: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + - match: { hits.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yaml index ff6a63bbe0ae5..9ea6bc033decd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yaml @@ -77,6 +77,40 @@ - match: { hits.total: 1 } +--- +"When refresh url parameter is an empty string that means \"refresh immediately\"": + - do: + index: + index: test_1 + type: test + id: 1 + body: { foo: bar } + refresh: true + - is_true: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + - match: { hits.total: 1 } + + - do: + delete: + index: test_1 + type: test + id: 1 + refresh: "" + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + - match: { hits.total: 0 } + --- "refresh=wait_for waits until changes are visible in search": - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yaml index 1f5953876d8ea..4ee2641143279 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yaml @@ -46,6 +46,26 @@ - match: { hits.total: 1 } +--- +"When refresh url parameter is an empty string that means \"refresh immediately\"": + - do: + index: + index: test_1 + type: test + id: 1 + refresh: "" + body: { foo: bar } + - is_true: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + + - match: { hits.total: 1 } + --- "refresh=wait_for waits until changes are visible in search": - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yaml index 31a880c7ab66a..8c0e7e66c9740 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yaml @@ -50,6 +50,36 @@ - match: { hits.total: 1 } +--- +"When refresh url parameter is an empty string that means \"refresh immediately\"": + - do: + index: + index: test_1 + type: test + id: 1 + refresh: true + body: { foo: bar } + - is_true: forced_refresh + + - do: + update: + index: test_1 + type: test + id: 1 + refresh: "" + body: + doc: {cat: dog} + - is_true: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { cat: dog }} + + - match: { hits.total: 1 } + --- "refresh=wait_for waits until changes are visible in search": - do: From 788164b898a6ee2878a273961230122b7386c3c9 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 2 Jun 2016 10:01:01 -0400 Subject: [PATCH 74/86] S/ReplicatedWriteResponse/WriteResponse/ Now it lines up with WriteRequest. --- .../org/elasticsearch/action/DocWriteResponse.java | 4 ++-- .../elasticsearch/action/bulk/BulkShardResponse.java | 4 ++-- ...plicatedWriteResponse.java => WriteResponse.java} | 12 ++++++++---- .../support/replication/TransportWriteAction.java | 3 ++- 4 files changed, 14 insertions(+), 9 deletions(-) rename core/src/main/java/org/elasticsearch/action/support/{replication/ReplicatedWriteResponse.java => WriteResponse.java} (71%) diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 1ab1d872c4828..0925c7441446a 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -19,8 +19,8 @@ package org.elasticsearch.action; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; -import org.elasticsearch.action.support.replication.ReplicatedWriteResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -35,7 +35,7 @@ /** * A base class for the response of a write operation that involves a single doc */ -public abstract class DocWriteResponse extends ReplicationResponse implements ReplicatedWriteResponse, StatusToXContent { +public abstract class DocWriteResponse extends ReplicationResponse implements WriteResponse, StatusToXContent { private ShardId shardId; private String id; diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java index 6b6461001312f..22260181bb175 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.support.replication.ReplicatedWriteResponse; +import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -31,7 +31,7 @@ /** * */ -public class BulkShardResponse extends ReplicationResponse implements ReplicatedWriteResponse { +public class BulkShardResponse extends ReplicationResponse implements WriteResponse { private ShardId shardId; private BulkItemResponse[] responses; diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteResponse.java b/core/src/main/java/org/elasticsearch/action/support/WriteResponse.java similarity index 71% rename from core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteResponse.java rename to core/src/main/java/org/elasticsearch/action/support/WriteResponse.java index b928058421ec5..07f5ea695d924 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/WriteResponse.java @@ -17,16 +17,20 @@ * under the License. */ -package org.elasticsearch.action.support.replication; +package org.elasticsearch.action.support; -import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.index.IndexSettings; /** - * Interface for responses that modify data in some shard like delete, index, and shardBulk. + * Interface implemented by responses for actions that modify the documents in an index like {@link IndexResponse}, {@link UpdateResponse}, + * and {@link BulkResponse}. Rather than implement this directly most implementers should extend {@link DocWriteResponse}. */ -public interface ReplicatedWriteResponse { +public interface WriteResponse { /** * Mark the response as having forced a refresh? Requests that set {@link WriteRequest#setRefreshPolicy(RefreshPolicy)} to * {@link RefreshPolicy#IMMEDIATE} should always mark this as true. Requests that set it to {@link RefreshPolicy#WAIT_UNTIL} will only diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 55195d2434b1a..e68b36c4a4513 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -21,6 +21,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -44,7 +45,7 @@ */ public abstract class TransportWriteAction< Request extends ReplicatedWriteRequest, - Response extends ReplicationResponse & ReplicatedWriteResponse + Response extends ReplicationResponse & WriteResponse > extends TransportReplicationAction { protected TransportWriteAction(Settings settings, String actionName, TransportService transportService, From 00d09a9caa638b6f90f4896b5502dd98d8fad56e Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 2 Jun 2016 10:08:28 -0400 Subject: [PATCH 75/86] Improve comment --- .../action/support/replication/TransportWriteAction.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index e68b36c4a4513..9714c07330b7f 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -172,7 +172,7 @@ public void respond(ActionListener listener) { @Override public void forcedRefresh() { - // We don't have anywhere to store this so we just throw it on the floor for now. + // It'd be nice to mark this in the response but logging when wait for refresh forced it is OK. } @Override From aeb1be3f2c501990b33fb1f8230d496035f498ef Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 2 Jun 2016 10:12:27 -0400 Subject: [PATCH 76/86] Remove checkstyle suppression It is fixed --- buildSrc/src/main/resources/checkstyle_suppressions.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 66bbce92a321a..0046d6bef9b1a 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -107,7 +107,6 @@ - From 0cd67b947f58867e704a1f0e66928a6fb5a11f11 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 2 Jun 2016 10:26:23 -0400 Subject: [PATCH 77/86] Deprecate setRefresh(boolean) Users should use `setRefresh(RefreshPolicy)` instead. --- .../org/elasticsearch/gradle/BuildPlugin.groovy | 2 +- .../action/support/WriteRequestBuilder.java | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 096c445934161..36986544a97ad 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -347,7 +347,7 @@ class BuildPlugin implements Plugin { * -serial because we don't use java serialization. */ // don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :) - options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial' << '-Xdoclint:all' << '-Xdoclint:-missing' + options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial,-deprecation' << '-Xdoclint:all' << '-Xdoclint:-missing' // compile with compact 3 profile by default // NOTE: this is just a compile time check: does not replace testing with a compact3 JRE if (project.compactProfile != 'full') { diff --git a/core/src/main/java/org/elasticsearch/action/support/WriteRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/support/WriteRequestBuilder.java index 19ba4dc9f0765..a87fd04345213 100644 --- a/core/src/main/java/org/elasticsearch/action/support/WriteRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/support/WriteRequestBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.support; +import org.elasticsearch.Version; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; public interface WriteRequestBuilder> { @@ -37,11 +38,13 @@ default B setRefreshPolicy(RefreshPolicy refreshPolicy) { /** * If set to true then this request will force an immediate refresh. Backwards compatibility layer for Elasticsearch's old * {@code setRefresh} calls. + * + * @deprecated use {@link #setRefreshPolicy(RefreshPolicy)} with {@link RefreshPolicy#IMMEDIATE} or {@link RefreshPolicy#NONE} instead. + * Will be removed in 6.0. */ - // NOCOMMIT deprecate or just remove this - @SuppressWarnings("unchecked") + @Deprecated default B setRefresh(boolean refresh) { - request().setRefreshPolicy(refresh ? RefreshPolicy.IMMEDIATE : RefreshPolicy.NONE); - return (B) this; + assert Version.CURRENT.major < 6 : "Remove setRefresh(boolean) in 6.0"; + return setRefreshPolicy(refresh ? RefreshPolicy.IMMEDIATE : RefreshPolicy.NONE); } } From 522ecb59d39b3c9e8df0d3b8df34b9e7aeaf0ce9 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 2 Jun 2016 10:30:18 -0400 Subject: [PATCH 78/86] Document deprecation --- docs/reference/migration/migrate_5_0/java.asciidoc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/reference/migration/migrate_5_0/java.asciidoc b/docs/reference/migration/migrate_5_0/java.asciidoc index d2bdf5528ac97..6fda4c138210e 100644 --- a/docs/reference/migration/migrate_5_0/java.asciidoc +++ b/docs/reference/migration/migrate_5_0/java.asciidoc @@ -298,3 +298,8 @@ The `setQuery(BytesReference)` method have been removed in favor of using `setQu Removed the `getMemoryAvailable` method from `OsStats`, which could be previously accessed calling `clusterStatsResponse.getNodesStats().getOs().getMemoryAvailable()`. + +=== setRefresh(boolean) has been deprecated + +`setRefresh(boolean)` has been deprecated in favor of `setRefreshPolicy(RefreshPolicy)` because there +are now three options. It will be removed in 5.0. From 9e63ad6de52d0b28f0b6d7203721baf1ebf6f56b Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 2 Jun 2016 13:21:27 -0400 Subject: [PATCH 79/86] Test for TransportWriteAction --- .../replication/TransportWriteAction.java | 39 +++- .../TransportWriteActionTests.java | 187 ++++++++++++++++++ 2 files changed, 217 insertions(+), 9 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 9714c07330b7f..559f439e9e99f 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -70,22 +70,27 @@ protected TransportWriteAction(Settings settings, String actionName, TransportSe @Override protected final WritePrimaryResult shardOperationOnPrimary(Request request) throws Exception { - final ShardId shardId = request.shardId(); - IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - IndexShard indexShard = indexService.getShard(shardId.id()); + IndexShard indexShard = indexShard(request); WriteResult result = onPrimaryShard(request, indexShard); return new WritePrimaryResult(request, result.getResponse(), result.getLocation(), indexShard); } @Override protected final WriteReplicaResult shardOperationOnReplica(Request request) { - final ShardId shardId = request.shardId(); - IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - IndexShard indexShard = indexService.getShard(shardId.id()); + IndexShard indexShard = indexShard(request); Translog.Location location = onReplicaShard(request, indexShard); return new WriteReplicaResult(indexShard, request, location); } + /** + * Fetch the IndexShard for the request. Protected so it can be mocked in tests. + */ + protected IndexShard indexShard(Request request) { + final ShardId shardId = request.shardId(); + IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + return indexService.getShard(shardId.id()); + } + /** * Simple result from a write action. Write actions have static method to return these so they can integrate with bulk. */ @@ -107,7 +112,10 @@ public Translog.Location getLocation() { } } - private class WritePrimaryResult extends PrimaryResult implements RespondingWriteResult { + /** + * Result of taking the action on the primary. + */ + private class WritePrimaryResult extends PrimaryResult implements RespondingWriteResult { private final ReplicatedWriteRequest request; volatile boolean finishedWrite; volatile ActionListener listener = null; @@ -117,6 +125,10 @@ public WritePrimaryResult(Request request, Response finalResponse, IndexShard indexShard) { super(request, finalResponse); this.request = request; + /* + * We call this before replication because this might wait for a refresh and that can take a while. This way we wait for the + * refresh in parallel on the primary and on the replica. + */ finishWrite(indexShard, request, location); } @@ -152,7 +164,10 @@ public synchronized void finished() { } } - private class WriteReplicaResult extends ReplicaResult implements RespondingWriteResult { + /** + * Result of taking the action on the replica. + */ + private class WriteReplicaResult extends ReplicaResult implements RespondingWriteResult { private final IndexShard indexShard; private final ReplicatedWriteRequest request; private final Translog.Location location; @@ -186,7 +201,13 @@ public void finished() { } } - private interface RespondingWriteResult { + /** + * Duplicate code shared between WritePrimaryResult and WriteReplicaResult. Implemented as an interface because it lets us instantiate a + * few fewer classes during this process. Package private for testing. + */ + interface RespondingWriteResult { + void respond(ActionListener listener); + /** * Finish up the write by syncing the translog, flushing, and refreshing or waiting for a refresh. Called on both the primary and * the replica. diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java new file mode 100644 index 0000000000000..3a0aa68eabdf9 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -0,0 +1,187 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.replication; + +import java.util.HashSet; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import org.elasticsearch.action.support.WriteResponse; +import org.elasticsearch.action.support.replication.TransportWriteAction.RespondingWriteResult; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.Translog.Location; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; + +public class TransportWriteActionTests extends ESTestCase { + private IndexShard indexShard; + private Translog.Location location; + + @Before + public void initCommonMocks() { + indexShard = mock(IndexShard.class); + location = mock(Translog.Location.class); + } + + public void testPrimaryNoRefreshCall() throws Exception { + noRefreshCall(TestAction::shardOperationOnPrimary, r -> assertFalse(r.forcedRefresh)); + } + + public void testReplicaNoRefreshCall() throws Exception { + noRefreshCall(TestAction::shardOperationOnReplica, r -> {}); + } + + private void noRefreshCall(ThrowingBiFunction> action, Consumer resultChecker) + throws Exception { + TestRequest request = new TestRequest(); + request.setRefreshPolicy(RefreshPolicy.NONE); // The default, but we'll set it anyway just to be explicit + RespondingWriteResult result = action.apply(new TestAction(), request); + CapturingActionListener listener = new CapturingActionListener<>(); + result.respond(listener); + assertNotNull(listener.response); + verify(indexShard, never()).refresh(any()); + verify(indexShard, never()).addRefreshListener(any(), any()); + } + + public void testPrimaryImmediateRefresh() throws Exception { + immediateRefresh(TestAction::shardOperationOnPrimary, r -> assertTrue(r.forcedRefresh)); + } + + public void testReplicaImmediateRefresh() throws Exception { + immediateRefresh(TestAction::shardOperationOnReplica, r -> {}); + } + + private void immediateRefresh(ThrowingBiFunction> action, + Consumer resultChecker) throws Exception { + TestRequest request = new TestRequest(); + request.setRefreshPolicy(RefreshPolicy.IMMEDIATE); + RespondingWriteResult result = action.apply(new TestAction(), request); + CapturingActionListener listener = new CapturingActionListener<>(); + result.respond(listener); + assertNotNull(listener.response); + resultChecker.accept(listener.response); + verify(indexShard).refresh("refresh_flag_index"); + verify(indexShard, never()).addRefreshListener(any(), any()); + } + + public void testPrimaryWaitForRefresh() throws Exception { + waitForRefresh(TestAction::shardOperationOnPrimary, (r, forcedRefresh) -> assertEquals(forcedRefresh, r.forcedRefresh)); + } + + public void testReplicaWaitForRefresh() throws Exception { + waitForRefresh(TestAction::shardOperationOnReplica, (r, forcedRefresh) -> {}); + } + + private void waitForRefresh(ThrowingBiFunction> action, + BiConsumer resultChecker) throws Exception { + TestRequest request = new TestRequest(); + request.setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); + RespondingWriteResult result = action.apply(new TestAction(), request); + CapturingActionListener listener = new CapturingActionListener<>(); + result.respond(listener); + assertNull(listener.response); // Haven't reallresponded yet + + @SuppressWarnings({ "unchecked", "rawtypes" }) + ArgumentCaptor> refreshListener = ArgumentCaptor.forClass((Class) Consumer.class); + verify(indexShard, never()).refresh(any()); + verify(indexShard).addRefreshListener(any(), refreshListener.capture()); + + // Now we can fire the listener manually and we'll get a response + boolean forcedRefresh = randomBoolean(); + refreshListener.getValue().accept(forcedRefresh); + assertNotNull(listener.response); + resultChecker.accept(listener.response, forcedRefresh); + } + + private class TestAction extends TransportWriteAction { + protected TestAction() { + super(Settings.EMPTY, "test", mock(TransportService.class), null, null, null, null, new ActionFilters(new HashSet<>()), + new IndexNameExpressionResolver(Settings.EMPTY), TestRequest::new, ThreadPool.Names.SAME); + } + + @Override + protected IndexShard indexShard(TestRequest request) { + return indexShard; + } + + @Override + protected WriteResult onPrimaryShard(TestRequest request, IndexShard indexShard) throws Exception { + return new WriteResult<>(new TestResponse(), location); + } + + @Override + protected Location onReplicaShard(TestRequest request, IndexShard indexShard) { + return location; + } + + @Override + protected TestResponse newResponseInstance() { + return new TestResponse(); + } + } + + private static class TestRequest extends ReplicatedWriteRequest { + public TestRequest() { + setShardId(new ShardId("test", "test", 1)); + } + } + + private static class TestResponse extends ReplicationResponse implements WriteResponse { + boolean forcedRefresh; + + @Override + public void setForcedRefresh(boolean forcedRefresh) { + this.forcedRefresh = forcedRefresh; + } + } + + private static class CapturingActionListener implements ActionListener { + private R response; + + @Override + public void onResponse(R response) { + this.response = response; + } + + @Override + public void onFailure(Throwable e) { + throw new RuntimeException(e); + } + } + + private interface ThrowingBiFunction { + R apply(A a, B b) throws Exception; + } +} From 9c9a1deb002c5bebb2a997c89fa12b3d7978e02e Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 2 Jun 2016 14:09:14 -0400 Subject: [PATCH 80/86] Breaking changes notes --- docs/reference/migration/migrate_5_0/docs.asciidoc | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/reference/migration/migrate_5_0/docs.asciidoc b/docs/reference/migration/migrate_5_0/docs.asciidoc index 85e4e901e5cb7..9149eed6142b5 100644 --- a/docs/reference/migration/migrate_5_0/docs.asciidoc +++ b/docs/reference/migration/migrate_5_0/docs.asciidoc @@ -1,6 +1,16 @@ [[breaking_50_document_api_changes]] === Document API changes +==== `?refresh` no longer supports truthy and falsy values +The `?refresh` request parameter used to accept any value other than `false`, +`0`, `off`, and `no` to mean "make the changes from this request visible for +search immediately." Now it only accepts `?refresh` and `?refresh=true` to +mean that. You can set it to `?refresh=false` and the request will take no +refresh-related action. The same is true if you leave `refresh` off of the +url entirely. If you add `?refresh=wait_for` Elasticsearch will wait for the +changes to become visible before replying to the request but won't take any +immediate refresh related action. See <>. + ==== Reindex and Update By Query Before 5.0.0 `_reindex` and `_update_by_query` only retried bulk failures so they used the following response format: From 03975ac056e44954eb0a371149d410dcf303e212 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 2 Jun 2016 14:20:11 -0400 Subject: [PATCH 81/86] Cleanup after merge from master --- .../java/org/elasticsearch/index/translog/TranslogWriter.java | 1 - .../src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java | 2 +- .../org/elasticsearch/index/shard/RefreshListenersTests.java | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index 6f7dd14634c59..b2c0cc88cf909 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -138,7 +138,6 @@ public synchronized Translog.Location add(BytesReference data) throws IOExceptio } totalOffset += data.length(); operationCounter++; - ESLoggerFactory.getLogger("TMP").error("Advanced position from [{}] to [{}]", offset, totalOffset); return new Translog.Location(generation, offset, data.length()); } diff --git a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java index 724cd9860a13a..2a16625d03726 100644 --- a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -142,7 +142,7 @@ public void testFilteringAliases() throws Exception { ensureGreen(); logger.info("--> aliasing index [test] with [alias1] and filter [user:kimchy]"); - QueryBuilder filter = termQuery("user", "kimchy"); + QueryBuilder filter = termQuery("user", "kimchy"); assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1", filter)); // For now just making sure that filter was stored with the alias diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index a7862592954a0..5c83e6805ab28 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -270,7 +270,7 @@ private Engine.Index index(String id, String testFieldValue) { document.add(uidField); document.add(versionField); BytesReference source = new BytesArray(new byte[] { 1 }); - ParsedDocument doc = new ParsedDocument(uidField, versionField, id, type, null, -1, -1, Arrays.asList(document), source, null); + ParsedDocument doc = new ParsedDocument(versionField, id, type, null, -1, -1, Arrays.asList(document), source, null); Engine.Index index = new Engine.Index(new Term("_uid", uid), doc); engine.index(index); return index; From c2bc36524fda119fd0514415127e8901d94409c8 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 2 Jun 2016 14:46:27 -0400 Subject: [PATCH 82/86] Fix docs After updating to master we are actually testing them. --- docs/reference/docs/index_.asciidoc | 6 ++++-- docs/reference/docs/refresh.asciidoc | 6 +++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index 64e8ca9ee9b2e..aa62b65292e5c 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -30,7 +30,8 @@ The result of the above index operation is: "_type" : "tweet", "_id" : "1", "_version" : 1, - "created" : true + "created" : true, + "forced_refresh": false } -------------------------------------------------- // TESTRESPONSE[s/"successful" : 2/"successful" : 1/] @@ -221,7 +222,8 @@ The result of the above index operation is: "_type" : "tweet", "_id" : "6a8ca01c-7896-48e9-81cc-9f70661fcb32", "_version" : 1, - "created" : true + "created" : true, + "forced_refresh": false } -------------------------------------------------- // TESTRESPONSE[s/6a8ca01c-7896-48e9-81cc-9f70661fcb32/$body._id/ s/"successful" : 2/"successful" : 1/] diff --git a/docs/reference/docs/refresh.asciidoc b/docs/reference/docs/refresh.asciidoc index cbe2d44b5af8d..3e5153341c8ae 100644 --- a/docs/reference/docs/refresh.asciidoc +++ b/docs/reference/docs/refresh.asciidoc @@ -78,7 +78,7 @@ many times they modify the shard. These will create a document and immediately refresh the index so it is visible: -[source,json] +[source,js] -------------------------------------------------- PUT /test/test/1?refresh {"test": "test"} @@ -90,7 +90,7 @@ PUT /test/test/2?refresh=true These will create a document without doing anything to make it visible for search: -[source,json] +[source,js] -------------------------------------------------- PUT /test/test/3 {"test": "test"} @@ -101,7 +101,7 @@ PUT /test/test/4?refresh=true This will create a document and wait for it to become visible for search: -[source,json] +[source,js] -------------------------------------------------- PUT /test/test/4?refresh=wait_for {"test": "test"} From 9b49a480ca9587a0a16ebe941662849f38289644 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 6 Jun 2016 08:25:38 -0400 Subject: [PATCH 83/86] Patch from boaz --- .../replication/TransportWriteAction.java | 133 ++++++------------ .../TransportWriteActionTests.java | 55 ++++---- 2 files changed, 73 insertions(+), 115 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 559f439e9e99f..6861224e6a5b1 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -21,12 +21,13 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteResponse; -import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; @@ -38,6 +39,7 @@ import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; +import java.util.function.Consumer; import java.util.function.Supplier; /** @@ -115,21 +117,19 @@ public Translog.Location getLocation() { /** * Result of taking the action on the primary. */ - private class WritePrimaryResult extends PrimaryResult implements RespondingWriteResult { - private final ReplicatedWriteRequest request; - volatile boolean finishedWrite; - volatile ActionListener listener = null; + class WritePrimaryResult extends PrimaryResult { + boolean finishedAsyncActions; + ActionListener listener = null; public WritePrimaryResult(Request request, Response finalResponse, @Nullable Translog.Location location, IndexShard indexShard) { super(request, finalResponse); - this.request = request; /* * We call this before replication because this might wait for a refresh and that can take a while. This way we wait for the * refresh in parallel on the primary and on the replica. */ - finishWrite(indexShard, request, location); + postWriteActions(indexShard, request, location, this::onFinishedAsyncActions, logger); } @Override @@ -142,24 +142,14 @@ public synchronized void respond(ActionListener listener) { * Respond if the refresh has occurred and the listener is ready. Always called while synchronized on {@code this}. */ protected void respondIfPossible() { - if (finishedWrite && listener != null) { + if (finishedAsyncActions && listener != null) { super.respond(listener); } } - @Override - public void forcedRefresh() { - finalResponse.setForcedRefresh(true); - } - - @Override - public void waitForRefreshForcedRefresh() { - logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); - } - - @Override - public synchronized void finished() { - finishedWrite = true; + public synchronized void onFinishedAsyncActions(boolean forcedRefresh) { + finalResponse.setForcedRefresh(forcedRefresh); + finishedAsyncActions = true; respondIfPossible(); } } @@ -167,101 +157,66 @@ public synchronized void finished() { /** * Result of taking the action on the replica. */ - private class WriteReplicaResult extends ReplicaResult implements RespondingWriteResult { - private final IndexShard indexShard; - private final ReplicatedWriteRequest request; - private final Translog.Location location; + class WriteReplicaResult extends ReplicaResult { + boolean finishedAsyncActions; private ActionListener listener; public WriteReplicaResult(IndexShard indexShard, ReplicatedWriteRequest request, Translog.Location location) { - this.indexShard = indexShard; - this.request = request; - this.location = location; + postWriteActions(indexShard, request, location, this::onFinishedAsyncActions, logger); } @Override public void respond(ActionListener listener) { this.listener = listener; - finishWrite(indexShard, request, location); - } - - @Override - public void forcedRefresh() { - // It'd be nice to mark this in the response but logging when wait for refresh forced it is OK. + respondIfPossible(); } - @Override - public void waitForRefreshForcedRefresh() { - logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + /** + * Respond if the refresh has occurred and the listener is ready. Always called while synchronized on {@code this}. + */ + protected void respondIfPossible() { + if (finishedAsyncActions && listener != null) { + super.respond(listener); + } } - @Override - public void finished() { - listener.onResponse(TransportResponse.Empty.INSTANCE); + public synchronized void onFinishedAsyncActions(boolean forcedRefresh) { + finishedAsyncActions = true; + respondIfPossible(); } } - /** - * Duplicate code shared between WritePrimaryResult and WriteReplicaResult. Implemented as an interface because it lets us instantiate a - * few fewer classes during this process. Package private for testing. - */ - interface RespondingWriteResult { - void respond(ActionListener listener); - - /** - * Finish up the write by syncing the translog, flushing, and refreshing or waiting for a refresh. Called on both the primary and - * the replica. - */ - default void finishWrite(IndexShard indexShard, ReplicatedWriteRequest request, Translog.Location location) { - boolean refreshPending = false; - switch (request.getRefreshPolicy()) { + static void postWriteActions(final IndexShard indexShard, + final WriteRequest request, + @Nullable final Translog.Location location, + final Consumer onDone, + final ESLogger logger) { + boolean pendingOps = false; + boolean immediateRefresh = false; + switch (request.getRefreshPolicy()) { case IMMEDIATE: indexShard.refresh("refresh_flag_index"); - forcedRefresh(); + immediateRefresh = true; break; case WAIT_UNTIL: if (location != null) { - refreshPending = true; + pendingOps = true; indexShard.addRefreshListener(location, forcedRefresh -> { - if (forcedRefresh) { - forcedRefresh(); - waitForRefreshForcedRefresh(); - } - finish(indexShard, location); + logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + onDone.accept(forcedRefresh); }); } break; case NONE: break; - } - if (false == refreshPending) { - finish(indexShard, location); - } } - - default void finish(IndexShard indexShard, Translog.Location location) { - boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; - if (fsyncTranslog) { - indexShard.sync(location); - } - indexShard.maybeFlush(); - finished(); + boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; + if (fsyncTranslog) { + indexShard.sync(location); + } + indexShard.maybeFlush(); + if (pendingOps == false) { + onDone.accept(immediateRefresh); } - - /** - * Called either when the request forces a refresh via {@link RefreshPolicy#IMMEDIATE} or when we run out of listeners slots while - * attempting to honor {@link RefreshPolicy#WAIT_UNTIL}. - */ - void forcedRefresh(); - - /** - * Called when we run out of listeners slots while attempting to honor {@link RefreshPolicy#WAIT_UNTIL}. - */ - void waitForRefreshForcedRefresh(); - - /** - * Called when we are finished waiting for a refresh. This is never called if we don't wait for a refresh. - */ - void finished(); } } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index 3a0aa68eabdf9..7b312959631a7 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -19,15 +19,10 @@ package org.elasticsearch.action.support.replication; -import java.util.HashSet; -import java.util.function.BiConsumer; -import java.util.function.Consumer; - import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.WriteResponse; -import org.elasticsearch.action.support.replication.TransportWriteAction.RespondingWriteResult; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; @@ -40,6 +35,10 @@ import org.junit.Before; import org.mockito.ArgumentCaptor; +import java.util.HashSet; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -56,61 +55,65 @@ public void initCommonMocks() { } public void testPrimaryNoRefreshCall() throws Exception { - noRefreshCall(TestAction::shardOperationOnPrimary, r -> assertFalse(r.forcedRefresh)); + noRefreshCall(TestAction::shardOperationOnPrimary, TestAction.WritePrimaryResult::respond); } public void testReplicaNoRefreshCall() throws Exception { - noRefreshCall(TestAction::shardOperationOnReplica, r -> {}); + noRefreshCall(TestAction::shardOperationOnReplica, TestAction.WriteReplicaResult::respond); } - private void noRefreshCall(ThrowingBiFunction> action, Consumer resultChecker) + private void noRefreshCall(ThrowingBiFunction action, + BiConsumer> responder) throws Exception { TestRequest request = new TestRequest(); request.setRefreshPolicy(RefreshPolicy.NONE); // The default, but we'll set it anyway just to be explicit - RespondingWriteResult result = action.apply(new TestAction(), request); - CapturingActionListener listener = new CapturingActionListener<>(); - result.respond(listener); + Result result = action.apply(new TestAction(), request); + CapturingActionListener listener = new CapturingActionListener<>(); + responder.accept(result, listener); assertNotNull(listener.response); verify(indexShard, never()).refresh(any()); verify(indexShard, never()).addRefreshListener(any(), any()); } public void testPrimaryImmediateRefresh() throws Exception { - immediateRefresh(TestAction::shardOperationOnPrimary, r -> assertTrue(r.forcedRefresh)); + immediateRefresh(TestAction::shardOperationOnPrimary, TestAction.WritePrimaryResult::respond, r -> assertTrue(r.forcedRefresh)); } public void testReplicaImmediateRefresh() throws Exception { - immediateRefresh(TestAction::shardOperationOnReplica, r -> {}); + immediateRefresh(TestAction::shardOperationOnReplica, TestAction.WriteReplicaResult::respond, r -> {}); } - private void immediateRefresh(ThrowingBiFunction> action, - Consumer resultChecker) throws Exception { + private void immediateRefresh(ThrowingBiFunction action, + BiConsumer> responder, + Consumer responseChecker) throws Exception { TestRequest request = new TestRequest(); request.setRefreshPolicy(RefreshPolicy.IMMEDIATE); - RespondingWriteResult result = action.apply(new TestAction(), request); - CapturingActionListener listener = new CapturingActionListener<>(); - result.respond(listener); + Result result = action.apply(new TestAction(), request); + CapturingActionListener listener = new CapturingActionListener<>(); + responder.accept(result, listener); assertNotNull(listener.response); - resultChecker.accept(listener.response); + responseChecker.accept(listener.response); verify(indexShard).refresh("refresh_flag_index"); verify(indexShard, never()).addRefreshListener(any(), any()); } public void testPrimaryWaitForRefresh() throws Exception { - waitForRefresh(TestAction::shardOperationOnPrimary, (r, forcedRefresh) -> assertEquals(forcedRefresh, r.forcedRefresh)); + waitForRefresh(TestAction::shardOperationOnPrimary, TestAction.WritePrimaryResult::respond, + (r, forcedRefresh) -> assertEquals(forcedRefresh, r.forcedRefresh)); } public void testReplicaWaitForRefresh() throws Exception { - waitForRefresh(TestAction::shardOperationOnReplica, (r, forcedRefresh) -> {}); + waitForRefresh(TestAction::shardOperationOnReplica, TestAction.WriteReplicaResult::respond, (r, forcedRefresh) -> {}); } - private void waitForRefresh(ThrowingBiFunction> action, - BiConsumer resultChecker) throws Exception { + private void waitForRefresh(ThrowingBiFunction action, + BiConsumer> responder, + BiConsumer resultChecker) throws Exception { TestRequest request = new TestRequest(); request.setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); - RespondingWriteResult result = action.apply(new TestAction(), request); - CapturingActionListener listener = new CapturingActionListener<>(); - result.respond(listener); + Result result = action.apply(new TestAction(), request); + CapturingActionListener listener = new CapturingActionListener<>(); + responder.accept(result, listener); assertNull(listener.response); // Haven't reallresponded yet @SuppressWarnings({ "unchecked", "rawtypes" }) From 777e23a6592c75db0081a53458cc760f4db69507 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 6 Jun 2016 09:29:35 -0400 Subject: [PATCH 84/86] Replace static method that takes consumer with delegate class that takes an interface Same number of allocations, much less code duplication. --- .../replication/TransportWriteAction.java | 139 ++++++++++-------- 1 file changed, 78 insertions(+), 61 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 6861224e6a5b1..3ac4547dff042 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -39,7 +39,6 @@ import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; -import java.util.function.Consumer; import java.util.function.Supplier; /** @@ -117,106 +116,124 @@ public Translog.Location getLocation() { /** * Result of taking the action on the primary. */ - class WritePrimaryResult extends PrimaryResult { - boolean finishedAsyncActions; - ActionListener listener = null; + class WritePrimaryResult extends PrimaryResult implements RespondingWriteResult.RespondAfterAsyncAction { + private final RespondingWriteResult result; public WritePrimaryResult(Request request, Response finalResponse, - @Nullable Translog.Location location, - IndexShard indexShard) { + @Nullable Translog.Location location, + IndexShard indexShard) { super(request, finalResponse); /* - * We call this before replication because this might wait for a refresh and that can take a while. This way we wait for the - * refresh in parallel on the primary and on the replica. + * This *starts* async actions before replication. We do that because this might wait for a refresh and that can take a while. + * This way we wait for the refresh in parallel on the primary and on the replica. */ - postWriteActions(indexShard, request, location, this::onFinishedAsyncActions, logger); + result = new RespondingWriteResult<>(this, indexShard, request, location, logger); } @Override - public synchronized void respond(ActionListener listener) { - this.listener = listener; - respondIfPossible(); + public void respond(ActionListener listener) { + result.setListenerAndRespondIfPossible(listener); } - /** - * Respond if the refresh has occurred and the listener is ready. Always called while synchronized on {@code this}. - */ - protected void respondIfPossible() { - if (finishedAsyncActions && listener != null) { - super.respond(listener); + @Override + public void respondAfterAsyncAction(ActionListener listener) { + if (result.forcedRefresh) { + finalResponse.setForcedRefresh(result.forcedRefresh); } - } - - public synchronized void onFinishedAsyncActions(boolean forcedRefresh) { - finalResponse.setForcedRefresh(forcedRefresh); - finishedAsyncActions = true; - respondIfPossible(); + super.respond(listener); } } /** * Result of taking the action on the replica. */ - class WriteReplicaResult extends ReplicaResult { - boolean finishedAsyncActions; - private ActionListener listener; + class WriteReplicaResult extends ReplicaResult implements RespondingWriteResult.RespondAfterAsyncAction { + private final RespondingWriteResult result; public WriteReplicaResult(IndexShard indexShard, ReplicatedWriteRequest request, Translog.Location location) { - postWriteActions(indexShard, request, location, this::onFinishedAsyncActions, logger); + result = new RespondingWriteResult<>(this, indexShard, request, location, logger); } @Override public void respond(ActionListener listener) { - this.listener = listener; - respondIfPossible(); + result.setListenerAndRespondIfPossible(listener); } - /** - * Respond if the refresh has occurred and the listener is ready. Always called while synchronized on {@code this}. - */ - protected void respondIfPossible() { - if (finishedAsyncActions && listener != null) { - super.respond(listener); - } + @Override + public void respondAfterAsyncAction(ActionListener listener) { + super.respond(listener); } + } - public synchronized void onFinishedAsyncActions(boolean forcedRefresh) { - finishedAsyncActions = true; - respondIfPossible(); + private static class RespondingWriteResult { + private interface RespondAfterAsyncAction { + void respondAfterAsyncAction(ActionListener listener); } - } + private final RespondAfterAsyncAction respond; + boolean forcedRefresh; - static void postWriteActions(final IndexShard indexShard, - final WriteRequest request, - @Nullable final Translog.Location location, - final Consumer onDone, - final ESLogger logger) { - boolean pendingOps = false; - boolean immediateRefresh = false; - switch (request.getRefreshPolicy()) { + /** + * Are we waiting for async actions? We only respond when true and listener is set. Set to true either in the constructor or while + * synchronized on {@code this}. + */ + private boolean finishedAsyncActions; + /** + * The listener to respond with. We respond when both finishedAsyncActions is true and the listener has been set. This is only + * modified while synchronized on {@code this}. + */ + private ActionListener listener; + + public RespondingWriteResult(RespondAfterAsyncAction respond, final IndexShard indexShard, + final WriteRequest request, + @Nullable final Translog.Location location, + final ESLogger logger) { + this.respond = respond; + boolean pendingOps = false; + switch (request.getRefreshPolicy()) { case IMMEDIATE: indexShard.refresh("refresh_flag_index"); - immediateRefresh = true; + forcedRefresh = true; break; case WAIT_UNTIL: if (location != null) { - pendingOps = true; - indexShard.addRefreshListener(location, forcedRefresh -> { - logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); - onDone.accept(forcedRefresh); - }); + pendingOps = true; + indexShard.addRefreshListener(location, forcedRefresh -> { + logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + RespondingWriteResult.this.forcedRefresh = forcedRefresh; + onFinishedAsyncActions(); + }); } break; case NONE: break; + } + boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; + if (fsyncTranslog) { + indexShard.sync(location); + } + indexShard.maybeFlush(); + if (pendingOps == false) { + onFinishedAsyncActions(); + } } - boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; - if (fsyncTranslog) { - indexShard.sync(location); + + public synchronized void setListenerAndRespondIfPossible(ActionListener listener) { + this.listener = listener; + respondIfPossible(); } - indexShard.maybeFlush(); - if (pendingOps == false) { - onDone.accept(immediateRefresh); + + /** + * Respond if the refresh has occurred and the listener is ready. Always called while synchronized on {@code this}. + */ + private void respondIfPossible() { + if (finishedAsyncActions && listener != null) { + respond.respondAfterAsyncAction(listener); + } + } + + private synchronized void onFinishedAsyncActions() { + finishedAsyncActions = true; + respondIfPossible(); } } } From 31f7861a85b457fb7378a6f27fa0a0c171538f68 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 6 Jun 2016 10:07:55 -0400 Subject: [PATCH 85/86] Revert "Replace static method that takes consumer with delegate class that takes an interface" This reverts commit 777e23a6592c75db0081a53458cc760f4db69507. --- .../replication/TransportWriteAction.java | 139 ++++++++---------- 1 file changed, 61 insertions(+), 78 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 3ac4547dff042..6861224e6a5b1 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -39,6 +39,7 @@ import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; +import java.util.function.Consumer; import java.util.function.Supplier; /** @@ -116,124 +117,106 @@ public Translog.Location getLocation() { /** * Result of taking the action on the primary. */ - class WritePrimaryResult extends PrimaryResult implements RespondingWriteResult.RespondAfterAsyncAction { - private final RespondingWriteResult result; + class WritePrimaryResult extends PrimaryResult { + boolean finishedAsyncActions; + ActionListener listener = null; public WritePrimaryResult(Request request, Response finalResponse, - @Nullable Translog.Location location, - IndexShard indexShard) { + @Nullable Translog.Location location, + IndexShard indexShard) { super(request, finalResponse); /* - * This *starts* async actions before replication. We do that because this might wait for a refresh and that can take a while. - * This way we wait for the refresh in parallel on the primary and on the replica. + * We call this before replication because this might wait for a refresh and that can take a while. This way we wait for the + * refresh in parallel on the primary and on the replica. */ - result = new RespondingWriteResult<>(this, indexShard, request, location, logger); + postWriteActions(indexShard, request, location, this::onFinishedAsyncActions, logger); } @Override - public void respond(ActionListener listener) { - result.setListenerAndRespondIfPossible(listener); + public synchronized void respond(ActionListener listener) { + this.listener = listener; + respondIfPossible(); } - @Override - public void respondAfterAsyncAction(ActionListener listener) { - if (result.forcedRefresh) { - finalResponse.setForcedRefresh(result.forcedRefresh); + /** + * Respond if the refresh has occurred and the listener is ready. Always called while synchronized on {@code this}. + */ + protected void respondIfPossible() { + if (finishedAsyncActions && listener != null) { + super.respond(listener); } - super.respond(listener); + } + + public synchronized void onFinishedAsyncActions(boolean forcedRefresh) { + finalResponse.setForcedRefresh(forcedRefresh); + finishedAsyncActions = true; + respondIfPossible(); } } /** * Result of taking the action on the replica. */ - class WriteReplicaResult extends ReplicaResult implements RespondingWriteResult.RespondAfterAsyncAction { - private final RespondingWriteResult result; + class WriteReplicaResult extends ReplicaResult { + boolean finishedAsyncActions; + private ActionListener listener; public WriteReplicaResult(IndexShard indexShard, ReplicatedWriteRequest request, Translog.Location location) { - result = new RespondingWriteResult<>(this, indexShard, request, location, logger); + postWriteActions(indexShard, request, location, this::onFinishedAsyncActions, logger); } @Override public void respond(ActionListener listener) { - result.setListenerAndRespondIfPossible(listener); + this.listener = listener; + respondIfPossible(); } - @Override - public void respondAfterAsyncAction(ActionListener listener) { - super.respond(listener); + /** + * Respond if the refresh has occurred and the listener is ready. Always called while synchronized on {@code this}. + */ + protected void respondIfPossible() { + if (finishedAsyncActions && listener != null) { + super.respond(listener); + } } - } - private static class RespondingWriteResult { - private interface RespondAfterAsyncAction { - void respondAfterAsyncAction(ActionListener listener); + public synchronized void onFinishedAsyncActions(boolean forcedRefresh) { + finishedAsyncActions = true; + respondIfPossible(); } - private final RespondAfterAsyncAction respond; - boolean forcedRefresh; + } - /** - * Are we waiting for async actions? We only respond when true and listener is set. Set to true either in the constructor or while - * synchronized on {@code this}. - */ - private boolean finishedAsyncActions; - /** - * The listener to respond with. We respond when both finishedAsyncActions is true and the listener has been set. This is only - * modified while synchronized on {@code this}. - */ - private ActionListener listener; - - public RespondingWriteResult(RespondAfterAsyncAction respond, final IndexShard indexShard, - final WriteRequest request, - @Nullable final Translog.Location location, - final ESLogger logger) { - this.respond = respond; - boolean pendingOps = false; - switch (request.getRefreshPolicy()) { + static void postWriteActions(final IndexShard indexShard, + final WriteRequest request, + @Nullable final Translog.Location location, + final Consumer onDone, + final ESLogger logger) { + boolean pendingOps = false; + boolean immediateRefresh = false; + switch (request.getRefreshPolicy()) { case IMMEDIATE: indexShard.refresh("refresh_flag_index"); - forcedRefresh = true; + immediateRefresh = true; break; case WAIT_UNTIL: if (location != null) { - pendingOps = true; - indexShard.addRefreshListener(location, forcedRefresh -> { - logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); - RespondingWriteResult.this.forcedRefresh = forcedRefresh; - onFinishedAsyncActions(); - }); + pendingOps = true; + indexShard.addRefreshListener(location, forcedRefresh -> { + logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + onDone.accept(forcedRefresh); + }); } break; case NONE: break; - } - boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; - if (fsyncTranslog) { - indexShard.sync(location); - } - indexShard.maybeFlush(); - if (pendingOps == false) { - onFinishedAsyncActions(); - } } - - public synchronized void setListenerAndRespondIfPossible(ActionListener listener) { - this.listener = listener; - respondIfPossible(); + boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; + if (fsyncTranslog) { + indexShard.sync(location); } - - /** - * Respond if the refresh has occurred and the listener is ready. Always called while synchronized on {@code this}. - */ - private void respondIfPossible() { - if (finishedAsyncActions && listener != null) { - respond.respondAfterAsyncAction(listener); - } - } - - private synchronized void onFinishedAsyncActions() { - finishedAsyncActions = true; - respondIfPossible(); + indexShard.maybeFlush(); + if (pendingOps == false) { + onDone.accept(immediateRefresh); } } } From 59a753b89109828d2b8f0de05cb104fc663cf95e Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 6 Jun 2016 10:18:23 -0400 Subject: [PATCH 86/86] Replace a method reference with implementing an interface Saves a single allocation and forces more commonality between the WriteResults. --- .../replication/TransportWriteAction.java | 25 +++++++++++-------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 6861224e6a5b1..e50ad7f130634 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -39,7 +39,6 @@ import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; -import java.util.function.Consumer; import java.util.function.Supplier; /** @@ -117,7 +116,7 @@ public Translog.Location getLocation() { /** * Result of taking the action on the primary. */ - class WritePrimaryResult extends PrimaryResult { + class WritePrimaryResult extends PrimaryResult implements RespondingWriteResult { boolean finishedAsyncActions; ActionListener listener = null; @@ -129,7 +128,7 @@ public WritePrimaryResult(Request request, Response finalResponse, * We call this before replication because this might wait for a refresh and that can take a while. This way we wait for the * refresh in parallel on the primary and on the replica. */ - postWriteActions(indexShard, request, location, this::onFinishedAsyncActions, logger); + postWriteActions(indexShard, request, location, this, logger); } @Override @@ -147,7 +146,8 @@ protected void respondIfPossible() { } } - public synchronized void onFinishedAsyncActions(boolean forcedRefresh) { + @Override + public synchronized void respondAfterAsyncAction(boolean forcedRefresh) { finalResponse.setForcedRefresh(forcedRefresh); finishedAsyncActions = true; respondIfPossible(); @@ -157,12 +157,12 @@ public synchronized void onFinishedAsyncActions(boolean forcedRefresh) { /** * Result of taking the action on the replica. */ - class WriteReplicaResult extends ReplicaResult { + class WriteReplicaResult extends ReplicaResult implements RespondingWriteResult { boolean finishedAsyncActions; private ActionListener listener; public WriteReplicaResult(IndexShard indexShard, ReplicatedWriteRequest request, Translog.Location location) { - postWriteActions(indexShard, request, location, this::onFinishedAsyncActions, logger); + postWriteActions(indexShard, request, location, this, logger); } @Override @@ -180,16 +180,21 @@ protected void respondIfPossible() { } } - public synchronized void onFinishedAsyncActions(boolean forcedRefresh) { + @Override + public synchronized void respondAfterAsyncAction(boolean forcedRefresh) { finishedAsyncActions = true; respondIfPossible(); } } + private interface RespondingWriteResult { + void respondAfterAsyncAction(boolean forcedRefresh); + } + static void postWriteActions(final IndexShard indexShard, final WriteRequest request, @Nullable final Translog.Location location, - final Consumer onDone, + final RespondingWriteResult respond, final ESLogger logger) { boolean pendingOps = false; boolean immediateRefresh = false; @@ -203,7 +208,7 @@ static void postWriteActions(final IndexShard indexShard, pendingOps = true; indexShard.addRefreshListener(location, forcedRefresh -> { logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); - onDone.accept(forcedRefresh); + respond.respondAfterAsyncAction(forcedRefresh); }); } break; @@ -216,7 +221,7 @@ static void postWriteActions(final IndexShard indexShard, } indexShard.maybeFlush(); if (pendingOps == false) { - onDone.accept(immediateRefresh); + respond.respondAfterAsyncAction(immediateRefresh); } } }