Skip to content

Commit

Permalink
Fixing tests
Browse files Browse the repository at this point in the history
Signed-off-by: Shourya Dutta Biswas <114977491+shourya035@users.noreply.github.com>
  • Loading branch information
shourya035 committed Sep 4, 2023
1 parent 054a5c1 commit b3262a5
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 13 deletions.
Expand Up @@ -199,6 +199,7 @@ private Map<String, long[]> assertAndCapturePrimaryTerms(Map<String, long[]> pre
return result;
}

// RemoteStore: Reducing number of docs being ingested to speed up test
public void testSingleNodeNoFlush() throws Exception {
internalCluster().startNode();

Expand Down Expand Up @@ -228,18 +229,19 @@ public void testSingleNodeNoFlush() throws Exception {

if (indexToAllShards) {
// insert enough docs so all shards will have a doc
value1Docs = randomIntBetween(numberOfShards * 10, numberOfShards * 20);
value2Docs = randomIntBetween(numberOfShards * 10, numberOfShards * 20);
value1Docs = randomIntBetween(numberOfShards * 2, numberOfShards * 5);
value2Docs = randomIntBetween(numberOfShards * 2, numberOfShards * 5);

} else {
// insert a two docs, some shards will not have anything
value1Docs = 1;
value2Docs = 1;
}

for (int i = 0; i < 1 + randomInt(10); i++) {
int toIndex = Math.max(value1Docs, value2Docs);
logger.info("About to index " + toIndex + " documents");
int toIndex = Math.max(value1Docs, value2Docs);
int multiplier = 1 + randomInt(5);
logger.info("About to index " + toIndex * multiplier + " documents");
for (int i = 0; i < multiplier; i++) {
for (int id = 0; id < toIndex; id++) {
if (id < value1Docs) {
index(
Expand Down
Expand Up @@ -52,7 +52,6 @@
import org.opensearch.cluster.ClusterName;
import org.opensearch.cluster.ClusterState;
import org.opensearch.cluster.action.index.MappingUpdatedAction;
import org.opensearch.cluster.block.ClusterBlockLevel;
import org.opensearch.cluster.coordination.ClusterBootstrapService;
import org.opensearch.cluster.coordination.NoClusterManagerBlockService;
import org.opensearch.cluster.metadata.IndexMetadata;
Expand Down Expand Up @@ -1325,13 +1324,7 @@ public synchronized void validateClusterFormed() {
/* Adding check to ensure that the repository checks are only performed when the cluster state has been recovered.
Useful for test cases which deliberately block cluster state recovery through gateway.xxxx cluster settings
*/
if (!cs.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK) && cs.nodes()
.getNodes()
.values()
.stream()
.findFirst()
.get()
.isRemoteStoreNode()) {
if (!gatewaySettingsBlockingStateRecovery(cs) && cs.nodes().getNodes().values().stream().findFirst().get().isRemoteStoreNode()) {
RepositoriesMetadata repositoriesMetadata = cs.metadata().custom(RepositoriesMetadata.TYPE);
assertTrue(repositoriesMetadata != null && !repositoriesMetadata.repositories().isEmpty());
}
Expand All @@ -1344,6 +1337,27 @@ public synchronized void validateClusterFormed() {
}
}

private boolean gatewaySettingsBlockingStateRecovery(ClusterState cs) {
// Is cluster state publication blocked?
boolean clusterStateNotRecovered = cs.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK);

// Iterate through each node and find out the max value of 'gateway.recover_after_nodes'
int recoverAfterNodes = -1;
for (NodeAndClient nodeAndClient: nodes.values()) {
Settings nodeSettings = nodeAndClient.node.settings();
if (nodeSettings.hasValue(GatewayService.RECOVER_AFTER_NODES_SETTING.getKey())) {
recoverAfterNodes = Math.max(recoverAfterNodes, Integer.parseInt(nodeSettings.get(GatewayService.RECOVER_AFTER_NODES_SETTING.getKey())));
}
}

// Return true if the cluster has state_not_recovered block and the current node count is less than 'gateway.recover_after_nodes'
if (recoverAfterNodes != -1 && clusterStateNotRecovered) {
return nodes.size() < recoverAfterNodes;
} else {
return false;
}
}

@Override
public synchronized void afterTest() {
wipePendingDataDirectories();
Expand Down

0 comments on commit b3262a5

Please sign in to comment.