Skip to content

Commit

Permalink
Avoid background sync on relocated primary (#40800)
Browse files Browse the repository at this point in the history
There were some test failures caused by the background retention lease sync running on a relocated
primary. This commit fixes the situation that triggered the assertion and reactivates the failing test.

Closes #40731
  • Loading branch information
ywelsch committed Apr 3, 2019
1 parent b100f04 commit b1c6f8e
Show file tree
Hide file tree
Showing 3 changed files with 32 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,6 @@ protected void doRun() throws Exception {
return future;
}

@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40731")
public void testRecoveryWithConcurrentIndexing() throws Exception {
final String index = "recovery_with_concurrent_indexing";
Response response = client().performRequest(new Request("GET", "_nodes"));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -830,7 +830,11 @@ private void sync(final Consumer<IndexShard> sync, final String source) {
case STARTED:
try {
shard.runUnderPrimaryPermit(
() -> sync.accept(shard),
() -> {
if (shard.isRelocatedPrimary() == false) {
sync.accept(shard);
}
},
e -> {
if (e instanceof AlreadyClosedException == false
&& e instanceof IndexShardClosedException == false) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,20 +30,28 @@
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.shard.DocsStats;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.test.BackgroundIndexer;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.junit.annotations.TestLogging;

import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.Stream;

import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
Expand All @@ -58,6 +66,23 @@
public class RecoveryWhileUnderLoadIT extends ESIntegTestCase {
private final Logger logger = LogManager.getLogger(RecoveryWhileUnderLoadIT.class);

public static final class RetentionLeaseSyncIntervalSettingPlugin extends Plugin {

@Override
public List<Setting<?>> getSettings() {
return Collections.singletonList(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING);
}

}

@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Stream.concat(
super.nodePlugins().stream(),
Stream.of(RetentionLeaseSyncIntervalSettingPlugin.class))
.collect(Collectors.toList());
}

public void testRecoverWhileUnderLoadAllocateReplicasTest() throws Exception {
logger.info("--> creating test index ...");
int numberOfShards = numberOfShards();
Expand Down Expand Up @@ -260,7 +285,8 @@ public void testRecoverWhileRelocating() throws Exception {
assertAcked(prepareCreate("test", 3, Settings.builder()
.put(SETTING_NUMBER_OF_SHARDS, numShards)
.put(SETTING_NUMBER_OF_REPLICAS, numReplicas)
.put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC)));
.put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC)
.put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), randomFrom("100ms", "1s", "5s", "30s", "60s"))));

final int numDocs = scaledRandomIntBetween(200, 9999);

Expand Down

0 comments on commit b1c6f8e

Please sign in to comment.