Skip to content

Commit

Permalink
Remove S3 Eventual Consistency Related Tests (#74015) (#74043)
Browse files Browse the repository at this point in the history
S3 list, update etc. are consistent now => no need to have these tests around any longer.
  • Loading branch information
original-brownbear committed Jun 13, 2021
1 parent 17ae565 commit 67610e8
Show file tree
Hide file tree
Showing 5 changed files with 30 additions and 799 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -8,19 +8,13 @@
package org.elasticsearch.repositories.s3;

import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.common.blobstore.BlobMetadata;
import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.settings.MockSecureSettings;
import org.elasticsearch.common.settings.SecureSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase;
import org.elasticsearch.repositories.blobstore.BlobStoreRepository;

import java.util.Collection;
import java.util.Map;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;

import static org.hamcrest.Matchers.blankOrNullString;
import static org.hamcrest.Matchers.equalTo;
Expand Down Expand Up @@ -67,39 +61,4 @@ protected void createRepository(String repoName) {
.setSettings(settings).get();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
}

@Override
protected boolean assertCorruptionVisible(BlobStoreRepository repo, Executor genericExec) throws Exception {
// S3 is only eventually consistent for the list operations used by this assertions so we retry for 10 minutes assuming that
// listing operations will become consistent within these 10 minutes.
assertBusy(() -> assertTrue(super.assertCorruptionVisible(repo, genericExec)), 10L, TimeUnit.MINUTES);
return true;
}

@Override
protected void assertConsistentRepository(BlobStoreRepository repo, Executor executor) throws Exception {
// S3 is only eventually consistent for the list operations used by this assertions so we retry for 10 minutes assuming that
// listing operations will become consistent within these 10 minutes.
assertBusy(() -> super.assertConsistentRepository(repo, executor), 10L, TimeUnit.MINUTES);
}

protected void assertBlobsByPrefix(BlobPath path, String prefix, Map<String, BlobMetadata> blobs) throws Exception {
// AWS S3 is eventually consistent so we retry for 10 minutes assuming a list operation will never take longer than that
// to become consistent.
assertBusy(() -> super.assertBlobsByPrefix(path, prefix, blobs), 10L, TimeUnit.MINUTES);
}

@Override
protected void assertChildren(BlobPath path, Collection<String> children) throws Exception {
// AWS S3 is eventually consistent so we retry for 10 minutes assuming a list operation will never take longer than that
// to become consistent.
assertBusy(() -> super.assertChildren(path, children), 10L, TimeUnit.MINUTES);
}

@Override
protected void assertDeleted(BlobPath path, String name) throws Exception {
// AWS S3 is eventually consistent so we retry for 10 minutes assuming a list operation will never take longer than that
// to become consistent.
assertBusy(() -> super.assertDeleted(path, name), 10L, TimeUnit.MINUTES);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,6 @@
import org.elasticsearch.cluster.service.FakeThreadPoolMasterService;
import org.elasticsearch.cluster.service.MasterService;
import org.elasticsearch.core.CheckedConsumer;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.ClusterSettings;
Expand Down Expand Up @@ -171,7 +170,6 @@
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.snapshots.mockstore.MockEventuallyConsistentRepository;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.disruption.DisruptableMockTransport;
import org.elasticsearch.threadpool.ThreadPool;
Expand Down Expand Up @@ -228,19 +226,9 @@ public class SnapshotResiliencyTests extends ESTestCase {

private Path tempDir;

/**
* Context shared by all the node's {@link Repository} instances if the eventually consistent blobstore is to be used.
* {@code null} if not using the eventually consistent blobstore.
*/
@Nullable
private MockEventuallyConsistentRepository.Context blobStoreContext;

@Before
public void createServices() {
tempDir = createTempDir();
if (randomBoolean()) {
blobStoreContext = new MockEventuallyConsistentRepository.Context();
}
deterministicTaskQueue = new DeterministicTaskQueue(Settings.builder().put(NODE_NAME_SETTING.getKey(), "shared").build(), random());
}

Expand Down Expand Up @@ -272,9 +260,6 @@ public void verifyReposThenStopServices() {

runUntil(cleanedUp::get, TimeUnit.MINUTES.toMillis(1L));

if (blobStoreContext != null) {
blobStoreContext.forceConsistent();
}
BlobStoreTestUtil.assertConsistency(
(BlobStoreRepository) testClusterNodes.randomMasterNodeSafe().repositoriesService.repository("repo"),
Runnable::run
Expand Down Expand Up @@ -1698,11 +1683,27 @@ protected NamedWriteableRegistry writeableRegistry() {
emptySet()
);
final IndexNameExpressionResolver indexNameExpressionResolver = TestIndexNameExpressionResolver.newInstance();
bigArrays = new BigArrays(new PageCacheRecycler(settings), null, "test");
repositoriesService = new RepositoriesService(
settings,
clusterService,
transportService,
Collections.singletonMap(FsRepository.TYPE, getRepoFactory(environment)),
Collections.singletonMap(
FsRepository.TYPE,
metadata -> new FsRepository(
metadata,
environment,
xContentRegistry(),
clusterService,
bigArrays,
recoverySettings
) {
@Override
protected void assertSnapshotOrGenericThread() {
// eliminate thread name check as we create repo in the test thread
}
}
),
emptyMap(),
threadPool
);
Expand Down Expand Up @@ -1734,7 +1735,6 @@ protected NamedWriteableRegistry writeableRegistry() {
settings,
IndexScopedSettings.BUILT_IN_INDEX_SETTINGS
);
bigArrays = new BigArrays(new PageCacheRecycler(settings), null, "test");
final MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry();
indicesService = new IndicesService(
settings,
Expand Down Expand Up @@ -2081,34 +2081,6 @@ protected NamedWriteableRegistry writeableRegistry() {
);
}

private Repository.Factory getRepoFactory(Environment environment) {
// Run half the tests with the eventually consistent repository
if (blobStoreContext == null) {
return metadata -> new FsRepository(
metadata,
environment,
xContentRegistry(),
clusterService,
bigArrays,
recoverySettings
) {
@Override
protected void assertSnapshotOrGenericThread() {
// eliminate thread name check as we create repo in the test thread
}
};
} else {
return metadata -> new MockEventuallyConsistentRepository(
metadata,
xContentRegistry(),
clusterService,
recoverySettings,
blobStoreContext,
random()
);
}
}

public void restart() {
testClusterNodes.disconnectNode(this);
final ClusterState oldState = this.clusterService.state();
Expand Down

0 comments on commit 67610e8

Please sign in to comment.