Skip to content

Commit

Permalink
Write next cluster state fully on all failures (#73631)
Browse files Browse the repository at this point in the history
Today we do not set the `LucenePersistedState#writeNextStateFully` flag
on all failures, notably on an `OutOfMemoryError`. Since we don't exit
immediately on an OOME we may have failed part-way through writing a
full state but still proceed with another apparently-incremental write.

With this commit we ensure `LucenePersistedState#writeNextStateFully` is
only set if the previous write was successful.
  • Loading branch information
DaveCTurner committed Jun 2, 2021
1 parent 51f6e7d commit b532f13
Show file tree
Hide file tree
Showing 2 changed files with 94 additions and 13 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -462,7 +462,7 @@ static class LucenePersistedState implements PersistedState {

// As the close method can be concurrently called to the other PersistedState methods, this class has extra protection in place.
private final AtomicReference<PersistedClusterStateService.Writer> persistenceWriter = new AtomicReference<>();
boolean writeNextStateFully;
private boolean writeNextStateFully;

LucenePersistedState(PersistedClusterStateService persistedClusterStateService, long currentTerm, ClusterState lastAcceptedState)
throws IOException {
Expand Down Expand Up @@ -505,13 +505,15 @@ public void setCurrentTerm(long currentTerm) {
try {
if (writeNextStateFully) {
getWriterSafe().writeFullStateAndCommit(currentTerm, lastAcceptedState);
writeNextStateFully = false;
} else {
writeNextStateFully = true; // in case of failure; this flag is cleared on success
getWriterSafe().writeIncrementalTermUpdateAndCommit(currentTerm, lastAcceptedState.version());
}
} catch (Exception e) {
handleExceptionOnWrite(e);
} catch (IOException e) {
throw new ElasticsearchException(e);
}

writeNextStateFully = false;
this.currentTerm = currentTerm;
}

Expand All @@ -520,8 +522,8 @@ public void setLastAcceptedState(ClusterState clusterState) {
try {
if (writeNextStateFully) {
getWriterSafe().writeFullStateAndCommit(currentTerm, clusterState);
writeNextStateFully = false;
} else {
writeNextStateFully = true; // in case of failure; this flag is cleared on success
if (clusterState.term() != lastAcceptedState.term()) {
assert clusterState.term() > lastAcceptedState.term() : clusterState.term() + " vs " + lastAcceptedState.term();
// In a new currentTerm, we cannot compare the persisted metadata's lastAcceptedVersion to those in the new state,
Expand All @@ -532,10 +534,11 @@ public void setLastAcceptedState(ClusterState clusterState) {
getWriterSafe().writeIncrementalStateAndCommit(currentTerm, lastAcceptedState, clusterState);
}
}
} catch (Exception e) {
handleExceptionOnWrite(e);
} catch (IOException e) {
throw new ElasticsearchException(e);
}

writeNextStateFully = false;
lastAcceptedState = clusterState;
}

Expand All @@ -562,11 +565,6 @@ private PersistedClusterStateService.Writer getWriterSafe() {
}
}

private void handleExceptionOnWrite(Exception e) {
writeNextStateFully = true;
throw ExceptionsHelper.convertToRuntime(e);
}

@Override
public void close() throws IOException {
IOUtils.close(persistenceWriter.getAndSet(null));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,12 +49,14 @@
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;

import static org.elasticsearch.test.NodeRoles.nonMasterNode;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.not;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;

Expand Down Expand Up @@ -296,6 +298,8 @@ public void testStatePersistedOnLoad() throws IOException {
new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L);
final ClusterState state = createClusterState(randomNonNegativeLong(),
Metadata.builder().clusterUUID(randomAlphaOfLength(10)).build());

//noinspection EmptyTryBlock
try (GatewayMetaState.LucenePersistedState ignored = new GatewayMetaState.LucenePersistedState(
persistedClusterStateService, 42L, state)) {

Expand Down Expand Up @@ -470,7 +474,7 @@ Directory createDirectory(Path path) {
wrapper.setRandomIOExceptionRateOnOpen(ioExceptionRate.get());
}

for (int i = 0; i < randomIntBetween(1, 5); i++) {
for (int i = between(1, 5); 0 <= i; i--) {
if (randomBoolean()) {
final long version = randomNonNegativeLong();
final String indexName = randomAlphaOfLength(10);
Expand Down Expand Up @@ -521,10 +525,89 @@ Directory createDirectory(Path path) {
}
}

public void testStatePersistenceWithFatalError() throws IOException {
final AtomicBoolean throwError = new AtomicBoolean();
final BigArrays realBigArrays = getBigArrays();
final BigArrays mockBigArrays = mock(BigArrays.class);
when(mockBigArrays.newByteArray(anyLong())).thenAnswer(invocationOnMock ->
{
if (throwError.get() && randomBoolean()) {
throw new TestError();
}
return realBigArrays.newByteArray((Long) invocationOnMock.getArguments()[0]);
});

final PersistedClusterStateService persistedClusterStateService =
new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), mockBigArrays,
new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L);
ClusterState state = createClusterState(randomNonNegativeLong(),
Metadata.builder().clusterUUID(randomAlphaOfLength(10)).build());
long currentTerm = 42L;
try (GatewayMetaState.LucenePersistedState persistedState = new GatewayMetaState.LucenePersistedState(
persistedClusterStateService, currentTerm, state)) {

throwError.set(false);

for (int i = between(1, 5); 0 <= i; i--) {
if (randomBoolean()) {
final ClusterState newState = createClusterState(
randomNonNegativeLong(),
Metadata.builder()
.clusterUUID(randomAlphaOfLength(10))
.coordinationMetadata(CoordinationMetadata.builder().term(currentTerm).build())
.build());
try {
persistedState.setLastAcceptedState(newState);
state = newState;
} catch (TestError e) {
// ok
}
} else {
final long newTerm = currentTerm + 1;
try {
persistedState.setCurrentTerm(newTerm);
currentTerm = newTerm;
} catch (TestError e) {
// ok
}
}
}

assertEquals(state, persistedState.getLastAcceptedState());
assertEquals(currentTerm, persistedState.getCurrentTerm());
}

nodeEnvironment.close();

for (Path path : nodeEnvironment.nodeDataPaths()) {
Settings settings = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath())
.put(Environment.PATH_DATA_SETTING.getKey(), path.getParent().getParent().toString()).build();
try (NodeEnvironment nodeEnvironment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings))) {
final PersistedClusterStateService newPersistedClusterStateService =
new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), getBigArrays(),
new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L);
final PersistedClusterStateService.OnDiskState onDiskState = newPersistedClusterStateService.loadBestOnDiskState();
assertFalse(onDiskState.empty());
assertThat(onDiskState.currentTerm, equalTo(currentTerm));
assertClusterStateEqual(state,
ClusterState.builder(ClusterName.DEFAULT)
.version(onDiskState.lastAcceptedVersion)
.metadata(onDiskState.metadata).build());
}
}
}

private static BigArrays getBigArrays() {
return usually()
? BigArrays.NON_RECYCLING_INSTANCE
: new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
}

private static final class TestError extends Error {
TestError() {
super("test error");
}
}

}

0 comments on commit b532f13

Please sign in to comment.