diff --git a/gradle/documentation/render-javadoc.gradle b/gradle/documentation/render-javadoc.gradle
index 2c942e16732..26a1bc8da3a 100644
--- a/gradle/documentation/render-javadoc.gradle
+++ b/gradle/documentation/render-javadoc.gradle
@@ -151,8 +151,25 @@ configure(project(":solr:test-framework")) {
"org.apache.solr.analysis",
"org.apache.solr.cloud",
"org.apache.solr.core",
+ "org.apache.solr.handler",
"org.apache.solr.handler.component",
"org.apache.solr.update.processor",
+ "org.apache.solr.util",
+ "org.apache.solr.search.similarities",
+ "org.apache.solr.search.function",
+ "org.apache.solr.search.facet",
+ "org.apache.solr.schema"
+ ]
+ }
+}
+
+configure(project(":solr:modules:hdfs")) {
+ project.tasks.withType(RenderJavadocTask) {
+ // TODO: clean up split packages
+ javadocMissingIgnore = [
+ "org.apache.solr.core",
+ "org.apache.solr.core.backup.repository",
+ "org.apache.solr.update",
"org.apache.solr.util"
]
}
diff --git a/gradle/validation/forbidden-apis.gradle b/gradle/validation/forbidden-apis.gradle
index 2a79a4d6fcf..0ffb3a3a272 100644
--- a/gradle/validation/forbidden-apis.gradle
+++ b/gradle/validation/forbidden-apis.gradle
@@ -130,6 +130,11 @@ allprojects { prj ->
forbiddenApisMain.exclude("**/jmh_generated/**")
}
+ // Disable checks on code copied from Hadoop
+ if (prj.path == ":solr:modules:hdfs") {
+ forbiddenApisTest.exclude("**/org/apache/hadoop/**")
+ }
+
// We rely on resolved configurations to compute the relevant set of rule
// files for forbiddenApis. Since we don't want to resolve these configurations until
// the task is executed, we can't really use them as task inputs properly. This is a
@@ -141,4 +146,4 @@ allprojects { prj ->
task.inputs.dir(file(resources))
}
})
-}
\ No newline at end of file
+}
diff --git a/gradle/validation/rat-sources.gradle b/gradle/validation/rat-sources.gradle
index 9652c4e3715..80d90a45087 100644
--- a/gradle/validation/rat-sources.gradle
+++ b/gradle/validation/rat-sources.gradle
@@ -102,6 +102,12 @@ allprojects {
exclude "src/test-files/META-INF/services/*"
break
+ case ":solr:modules:hdfs":
+ exclude "src/test-files/**/*.aff"
+ exclude "src/test-files/**/*.dic"
+ exclude "src/test-files/**/*.incl"
+ break
+
case ":solr:modules:langid":
exclude "**/langdetect-profiles/*"
break
diff --git a/gradle/validation/spotless.gradle b/gradle/validation/spotless.gradle
index 395d86a9040..0f6f5b03d42 100644
--- a/gradle/validation/spotless.gradle
+++ b/gradle/validation/spotless.gradle
@@ -49,6 +49,7 @@ configure(project(":solr").subprojects) { prj ->
case ":solr:modules:clustering":
case ":solr:modules:extraction":
case ":solr:modules:gcs-repository":
+ case ":solr:modules:hdfs":
case ":solr:modules:langid":
case ":solr:modules:ltr":
case ":solr:modules:scripting":
diff --git a/gradle/validation/validate-source-patterns.gradle b/gradle/validation/validate-source-patterns.gradle
index d1744c17f39..3dc22f50b0d 100644
--- a/gradle/validation/validate-source-patterns.gradle
+++ b/gradle/validation/validate-source-patterns.gradle
@@ -108,7 +108,6 @@ allprojects {
configure(project(':solr:core')) {
project.tasks.withType(ValidateSourcePatternsTask) {
sourceFiles.exclude 'src/**/CheckLoggingConfiguration.java'
- sourceFiles.exclude 'src/test/org/apache/hadoop/**'
}
}
diff --git a/settings.gradle b/settings.gradle
index 66e7610653b..6a78dd40018 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -39,6 +39,7 @@ include "solr:modules:s3-repository"
include "solr:modules:scripting"
include "solr:modules:ltr"
include "solr:modules:gcs-repository"
+include "solr:modules:hdfs"
include "solr:webapp"
include "solr:benchmark"
include "solr:test-framework"
diff --git a/solr/core/build.gradle b/solr/core/build.gradle
index c96aa52efdd..99105f451c9 100644
--- a/solr/core/build.gradle
+++ b/solr/core/build.gradle
@@ -66,7 +66,6 @@ dependencies {
implementation 'commons-io:commons-io'
implementation 'com.carrotsearch:hppc'
implementation 'org.apache.commons:commons-collections4'
- runtimeOnly 'commons-collections:commons-collections' // for Hadoop and...?
implementation('com.github.ben-manes.caffeine:caffeine') { transitive = false }
@@ -136,27 +135,17 @@ dependencies {
implementation ('org.apache.calcite:calcite-linq4j') { transitive = false }
implementation ('org.apache.calcite.avatica:avatica-core') { transitive = false }
- // Hadoop; general stuff
- implementation ('org.apache.hadoop:hadoop-common') { transitive = false } // too many to ignore
- implementation ('org.apache.hadoop:hadoop-annotations')
- runtimeOnly 'org.apache.htrace:htrace-core4' // note: removed in Hadoop 3.3.2
- runtimeOnly "org.apache.commons:commons-configuration2"
- testImplementation ('org.apache.hadoop:hadoop-common::tests') { transitive = false }
- // Hadoop Test hacks for Java Security Manager -- SOLR-14033
- testImplementation 'org.apache.commons:commons-compress'
- testImplementation ('com.sun.jersey:jersey-servlet') { transitive = false }
-
- // HDFS
- implementation ('org.apache.hadoop:hadoop-hdfs-client') { transitive = false }
- testImplementation ('org.apache.hadoop:hadoop-hdfs') { transitive = false }
- testImplementation ('org.apache.hadoop:hadoop-hdfs::tests') { transitive = false }
- testImplementation 'org.apache.logging.log4j:log4j-1.2-api'
-
// Hadoop auth framework
+ implementation ('org.apache.hadoop:hadoop-annotations')
implementation ('org.apache.hadoop:hadoop-auth') { transitive = false }
- runtimeOnly 'com.google.re2j:re2j' // transitive of hadoop-common; used by Kerberos auth
- runtimeOnly ('org.apache.kerby:kerb-core')
- runtimeOnly ('org.apache.kerby:kerb-util')
+ implementation ('org.apache.hadoop:hadoop-common') { transitive = false }
+ // transitive of hadoop-common; used by Kerberos auth
+ runtimeOnly 'commons-collections:commons-collections'
+ runtimeOnly 'com.google.re2j:re2j'
+ runtimeOnly 'org.apache.commons:commons-configuration2'
+ runtimeOnly 'org.apache.htrace:htrace-core4' // note: removed in Hadoop 3.3.2
+ runtimeOnly 'org.apache.kerby:kerb-core'
+ runtimeOnly 'org.apache.kerby:kerb-util'
// Hadoop MiniKdc Dependencies (for Kerberos auth tests)
testImplementation ('org.apache.hadoop:hadoop-minikdc', {
diff --git a/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
index 2ccc5923ec2..ea278514907 100644
--- a/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
+++ b/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
@@ -36,6 +36,7 @@
import org.apache.lucene.store.LockFactory;
import org.apache.solr.common.SolrException;
import org.apache.solr.core.CachingDirectoryFactory.CloseListener;
+import org.apache.solr.update.UpdateLog;
import org.apache.solr.util.plugin.NamedListInitializedPlugin;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -67,7 +68,6 @@ public enum DirContext {DEFAULT, META_DATA}
public final static String LOCK_TYPE_NATIVE = "native";
public final static String LOCK_TYPE_SINGLE = "single";
public final static String LOCK_TYPE_NONE = "none";
- public final static String LOCK_TYPE_HDFS = "hdfs";
protected volatile CoreContainer coreContainer;
@@ -430,4 +430,12 @@ static DirectoryFactory loadDirectoryFactory(SolrConfig config, CoreContainer cc
}
return dirFactory;
}
+
+ /**
+ * Returns a default Update Log instance. UpdateHandler will invoke this method if the
+ * solrconfig / plugin info does not specify any ulog class.
+ */
+ public UpdateLog newDefaultUpdateLog() {
+ return new UpdateLog();
+ }
}
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateHandler.java b/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
index ade92401370..f7a62b51512 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
@@ -115,12 +115,12 @@ public UpdateHandler(SolrCore core, UpdateLog updateLog) {
boolean skipUpdateLog = core.getCoreDescriptor().getCloudDescriptor() != null && !core.getCoreDescriptor().getCloudDescriptor().requiresTransactionLog();
if (updateLog == null && ulogPluginInfo != null && ulogPluginInfo.isEnabled() && !skipUpdateLog) {
DirectoryFactory dirFactory = core.getDirectoryFactory();
- if (dirFactory instanceof HdfsDirectoryFactory) {
- ulog = new HdfsUpdateLog(((HdfsDirectoryFactory)dirFactory).getConfDir());
- } else {
- ulog = ulogPluginInfo.className == null ? new UpdateLog():
- core.getResourceLoader().newInstance(ulogPluginInfo, UpdateLog.class, true);
- }
+
+ // if the update log class is not defined in the plugin info / solrconfig.xml
+ // (like )
+ // we fall back use the one which is the default for the given directory factory
+ ulog = ulogPluginInfo.className == null ? dirFactory.newDefaultUpdateLog() :
+ core.getResourceLoader().newInstance(ulogPluginInfo, UpdateLog.class, true);
if (!core.isReloaded() && !dirFactory.isPersistent()) {
ulog.clearLog(core, ulogPluginInfo);
diff --git a/solr/core/src/test-files/core-site.xml b/solr/core/src/test-files/core-site.xml
index df15d529354..66718b71127 100644
--- a/solr/core/src/test-files/core-site.xml
+++ b/solr/core/src/test-files/core-site.xml
@@ -18,6 +18,6 @@
hadoop.security.group.mapping
- org.apache.solr.cloud.hdfs.FakeGroupMapping
+ org.apache.solr.security.hadoop.HadoopAuthFakeGroupMapping
diff --git a/solr/core/src/test/org/apache/solr/TestSolrCoreProperties.java b/solr/core/src/test/org/apache/solr/TestSolrCoreProperties.java
index 9a044ff3433..5387f04ce23 100644
--- a/solr/core/src/test/org/apache/solr/TestSolrCoreProperties.java
+++ b/solr/core/src/test/org/apache/solr/TestSolrCoreProperties.java
@@ -76,7 +76,7 @@ public static void beforeTest() throws Exception {
Properties nodeProperties = new Properties();
// this sets the property for jetty starting SolrDispatchFilter
- if (System.getProperty("solr.data.dir") == null && System.getProperty("solr.hdfs.home") == null) {
+ if (System.getProperty("solr.data.dir") == null) {
nodeProperties.setProperty("solr.data.dir", createTempDir().toFile().getCanonicalPath());
}
jetty = new JettySolrRunner(homeDir.getAbsolutePath(), nodeProperties, buildJettyConfig("/solr"));
diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java
index c3a30d8e47f..f6b96a98f10 100644
--- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java
+++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java
@@ -16,444 +16,22 @@
*/
package org.apache.solr.cloud;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.lucene.mockfile.FilterPath;
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.handler.BackupStatusChecker;
-import org.apache.solr.handler.ReplicationHandler;
import org.junit.Test;
/**
* This test simply does a bunch of basic things in solrcloud mode and asserts things
* work as expected.
+ * Implementation moved to AbstractBasicDistributedZk2TestBase as it is used by HDFS contrib module tests.
*/
@SuppressSSL(bugUrl = "https://issues.apache.org/jira/browse/SOLR-5776")
-public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
- private static final String SHARD2 = "shard2";
- private static final String SHARD1 = "shard1";
- private static final String ONE_NODE_COLLECTION = "onenodecollection";
- private final boolean onlyLeaderIndexes = random().nextBoolean();
-
-
- public BasicDistributedZk2Test() {
- super();
- // we need DVs on point fields to compute stats & facets
- if (Boolean.getBoolean(NUMERIC_POINTS_SYSPROP)) System.setProperty(NUMERIC_DOCVALUES_SYSPROP,"true");
-
- sliceCount = 2;
- }
+public class BasicDistributedZk2Test extends AbstractBasicDistributedZk2TestBase {
- @Override
- protected boolean useTlogReplicas() {
- return false; // TODO: tlog replicas makes commits take way to long due to what is likely a bug and it's TestInjection use
- }
-
@Test
+ @Override
@ShardsFixed(num = 4)
public void test() throws Exception {
- boolean testFinished = false;
- try {
- handle.clear();
- handle.put("timestamp", SKIPVAL);
-
- testNodeWithoutCollectionForwarding();
-
- indexr(id, 1, i1, 100, tlong, 100, t1,
- "now is the time for all good men", "foo_f", 1.414f, "foo_b", "true",
- "foo_d", 1.414d);
-
- commit();
-
- // make sure we are in a steady state...
- waitForRecoveriesToFinish(false);
-
- assertDocCounts(false);
-
- indexAbunchOfDocs();
-
- // check again
- waitForRecoveriesToFinish(false);
-
- commit();
-
- assertDocCounts(VERBOSE);
- checkQueries();
-
- assertDocCounts(VERBOSE);
-
- query("q", "*:*", "sort", "n_tl1 desc");
-
- brindDownShardIndexSomeDocsAndRecover();
-
- query("q", "*:*", "sort", "n_tl1 desc");
-
- // test adding another replica to a shard - it should do a
- // recovery/replication to pick up the index from the leader
- addNewReplica();
-
- long docId = testUpdateAndDelete();
-
- // index a bad doc...
- expectThrows(SolrException.class, () -> indexr(t1, "a doc with no id"));
-
- // TODO: bring this to its own method?
- // try indexing to a leader that has no replicas up
- ZkStateReader zkStateReader = cloudClient.getZkStateReader();
- ZkNodeProps leaderProps = zkStateReader.getLeaderRetry(
- DEFAULT_COLLECTION, SHARD2);
-
- String nodeName = leaderProps.getStr(ZkStateReader.NODE_NAME_PROP);
- chaosMonkey.stopShardExcept(SHARD2, nodeName);
-
- SolrClient client = getClient(nodeName);
-
- index_specific(client, "id", docId + 1, t1, "what happens here?");
-
- // expire a session...
- CloudJettyRunner cloudJetty = shardToJetty.get(SHARD1).get(0);
- chaosMonkey.expireSession(cloudJetty.jetty);
-
- indexr("id", docId + 1, t1, "slip this doc in");
-
- waitForRecoveriesToFinish(false);
-
- checkShardConsistency(SHARD1);
- checkShardConsistency(SHARD2);
-
- testFinished = true;
- } finally {
- if (!testFinished) {
- printLayoutOnTearDown = true;
- }
- }
-
- }
-
- private void testNodeWithoutCollectionForwarding() throws Exception {
- assertEquals(0, CollectionAdminRequest
- .createCollection(ONE_NODE_COLLECTION, "conf1", 1, 1)
- .setCreateNodeSet("")
- .process(cloudClient).getStatus());
- assertTrue(CollectionAdminRequest
- .addReplicaToShard(ONE_NODE_COLLECTION, "shard1")
- .setCoreName(ONE_NODE_COLLECTION + "core")
- .process(cloudClient).isSuccess());
-
- waitForCollection(cloudClient.getZkStateReader(), ONE_NODE_COLLECTION, 1);
- waitForRecoveriesToFinish(ONE_NODE_COLLECTION, cloudClient.getZkStateReader(), false);
-
- cloudClient.getZkStateReader().getLeaderRetry(ONE_NODE_COLLECTION, SHARD1, 30000);
-
- int docs = 2;
- for (SolrClient client : clients) {
- final String clientUrl = getBaseUrl((HttpSolrClient) client);
- addAndQueryDocs(clientUrl, docs);
- docs += 2;
- }
- }
-
- // 2 docs added every call
- private void addAndQueryDocs(final String baseUrl, int docs)
- throws Exception {
-
- SolrQuery query = new SolrQuery("*:*");
-
- try (HttpSolrClient qclient = getHttpSolrClient(baseUrl + "/onenodecollection" + "core")) {
-
- // it might take a moment for the proxy node to see us in their cloud state
- waitForNon403or404or503(qclient);
-
- // add a doc
- SolrInputDocument doc = new SolrInputDocument();
- doc.addField("id", docs);
- qclient.add(doc);
- qclient.commit();
-
-
- QueryResponse results = qclient.query(query);
- assertEquals(docs - 1, results.getResults().getNumFound());
- }
-
- try (HttpSolrClient qclient = getHttpSolrClient(baseUrl + "/onenodecollection")) {
- QueryResponse results = qclient.query(query);
- assertEquals(docs - 1, results.getResults().getNumFound());
-
- SolrInputDocument doc = new SolrInputDocument();
- doc.addField("id", docs + 1);
- qclient.add(doc);
- qclient.commit();
-
- query = new SolrQuery("*:*");
- query.set("rows", 0);
- results = qclient.query(query);
- assertEquals(docs, results.getResults().getNumFound());
- }
- }
-
- private long testUpdateAndDelete() throws Exception {
- long docId = 99999999L;
- indexr("id", docId, t1, "originalcontent");
-
- commit();
-
- ModifiableSolrParams params = new ModifiableSolrParams();
- params.add("q", t1 + ":originalcontent");
- QueryResponse results = clients.get(0).query(params);
- assertEquals(1, results.getResults().getNumFound());
-
- // update doc
- indexr("id", docId, t1, "updatedcontent");
-
- commit();
-
- results = clients.get(0).query(params);
- assertEquals(0, results.getResults().getNumFound());
-
- params.set("q", t1 + ":updatedcontent");
-
- results = clients.get(0).query(params);
- assertEquals(1, results.getResults().getNumFound());
-
- UpdateRequest uReq = new UpdateRequest();
- // uReq.setParam(UpdateParams.UPDATE_CHAIN, DISTRIB_UPDATE_CHAIN);
- uReq.deleteById(Long.toString(docId)).process(clients.get(0));
-
- commit();
-
- results = clients.get(0).query(params);
- assertEquals(0, results.getResults().getNumFound());
- return docId;
- }
-
- private void brindDownShardIndexSomeDocsAndRecover() throws Exception {
- SolrQuery query = new SolrQuery("*:*");
- query.set("distrib", false);
-
- commit();
-
- long deadShardCount = shardToJetty.get(SHARD2).get(0).client.solrClient
- .query(query).getResults().getNumFound();
-
- query("q", "*:*", "sort", "n_tl1 desc");
-
- int oldLiveNodes = cloudClient.getZkStateReader().getZkClient().getChildren(ZkStateReader.LIVE_NODES_ZKNODE, null, true).size();
-
- assertEquals(5, oldLiveNodes);
-
- // kill a shard
- CloudJettyRunner deadShard = chaosMonkey.stopShard(SHARD1, 0);
-
- // ensure shard is dead
- expectThrows(SolrServerException.class,
- "This server should be down and this update should have failed",
- () -> index_specific(deadShard.client.solrClient, id, 999, i1, 107, t1, "specific doc!")
- );
-
- commit();
-
- query("q", "*:*", "sort", "n_tl1 desc");
-
- // long cloudClientDocs = cloudClient.query(new
- // SolrQuery("*:*")).getResults().getNumFound();
- // System.out.println("clouddocs:" + cloudClientDocs);
-
- // try to index to a living shard at shard2
-
-
- long numFound1 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
-
- cloudClient.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1, 60000);
-
- try {
- index_specific(shardToJetty.get(SHARD1).get(1).client.solrClient, id, 1000, i1, 108, t1,
- "specific doc!");
- } catch (Exception e) {
- // wait and try again
- Thread.sleep(4000);
- index_specific(shardToJetty.get(SHARD1).get(1).client.solrClient, id, 1000, i1, 108, t1,
- "specific doc!");
- }
-
- commit();
-
- checkShardConsistency(true, false);
-
- query("q", "*:*", "sort", "n_tl1 desc");
-
-
- cloudClient.setDefaultCollection(DEFAULT_COLLECTION);
-
- long numFound2 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
-
- assertEquals(numFound1 + 1, numFound2);
-
- SolrInputDocument doc = new SolrInputDocument();
- doc.addField("id", 1001);
-
- controlClient.add(doc);
-
- // try adding a doc with CloudSolrServer
- UpdateRequest ureq = new UpdateRequest();
- ureq.add(doc);
- // ureq.setParam("update.chain", DISTRIB_UPDATE_CHAIN);
-
- try {
- ureq.process(cloudClient);
- } catch(SolrServerException e){
- // try again
- Thread.sleep(3500);
- ureq.process(cloudClient);
- }
-
- commit();
-
- query("q", "*:*", "sort", "n_tl1 desc");
-
- long numFound3 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
-
- // lets just check that the one doc since last commit made it in...
- assertEquals(numFound2 + 1, numFound3);
-
- // test debugging
- testDebugQueries();
-
- if (VERBOSE) {
- System.err.println(controlClient.query(new SolrQuery("*:*")).getResults()
- .getNumFound());
-
- for (SolrClient client : clients) {
- try {
- SolrQuery q = new SolrQuery("*:*");
- q.set("distrib", false);
- System.err.println(client.query(q).getResults()
- .getNumFound());
- } catch (Exception e) {
-
- }
- }
- }
- // TODO: This test currently fails because debug info is obtained only
- // on shards with matches.
- // query("q","matchesnothing","fl","*,score", "debugQuery", "true");
-
- // this should trigger a recovery phase on deadShard
- deadShard.jetty.start();
-
- // make sure we have published we are recovering
- Thread.sleep(1500);
-
- waitForRecoveriesToFinish(false);
-
- deadShardCount = shardToJetty.get(SHARD1).get(0).client.solrClient
- .query(query).getResults().getNumFound();
- // if we properly recovered, we should now have the couple missing docs that
- // came in while shard was down
- checkShardConsistency(true, false);
-
-
- // recover over 100 docs so we do more than just peer sync (replicate recovery)
- chaosMonkey.stopJetty(deadShard);
-
- for (int i = 0; i < 226; i++) {
- doc = new SolrInputDocument();
- doc.addField("id", 2000 + i);
- controlClient.add(doc);
- ureq = new UpdateRequest();
- ureq.add(doc);
- // ureq.setParam("update.chain", DISTRIB_UPDATE_CHAIN);
- ureq.process(cloudClient);
- }
- commit();
-
- Thread.sleep(1500);
-
- deadShard.jetty.start();
-
- // make sure we have published we are recovering
- Thread.sleep(1500);
-
- waitForThingsToLevelOut(1, TimeUnit.MINUTES);
-
- Thread.sleep(500);
-
- waitForRecoveriesToFinish(false);
-
- checkShardConsistency(true, false);
-
- // try a backup command
- try(final HttpSolrClient client = getHttpSolrClient((String) shardToJetty.get(SHARD2).get(0).info.get("base_url"))) {
- final String backupName = "the_backup";
- ModifiableSolrParams params = new ModifiableSolrParams();
- params.set("qt", ReplicationHandler.PATH);
- params.set("command", "backup");
- params.set("name", backupName);
- final Path location = FilterPath.unwrap(createTempDir()).toRealPath();
- // Allow non-standard location outside SOLR_HOME
- jettys.forEach(j -> j.getCoreContainer().getAllowPaths().add(location));
- params.set("location", location.toString());
-
- QueryRequest request = new QueryRequest(params);
- client.request(request, DEFAULT_TEST_COLLECTION_NAME);
-
-
- final BackupStatusChecker backupStatus
- = new BackupStatusChecker(client, "/" + DEFAULT_TEST_COLLECTION_NAME + "/replication");
- final String backupDirName = backupStatus.waitForBackupSuccess(backupName, 30);
- assertTrue("Backup dir does not exist: " + backupDirName,
- Files.exists(location.resolve(backupDirName)));
- }
-
- }
-
- private void addNewReplica() throws Exception {
-
- waitForRecoveriesToFinish(false);
-
- // new server should be part of first shard
- // how many docs are on the new shard?
- for (CloudJettyRunner cjetty : shardToJetty.get(SHARD1)) {
- if (VERBOSE) System.err.println("shard1 total:"
- + cjetty.client.solrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
- }
- for (CloudJettyRunner cjetty : shardToJetty.get(SHARD2)) {
- if (VERBOSE) System.err.println("shard2 total:"
- + cjetty.client.solrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
- }
-
- checkShardConsistency(SHARD1);
- checkShardConsistency(SHARD2);
-
- assertDocCounts(VERBOSE);
- }
-
- private void testDebugQueries() throws Exception {
- handle.put("explain", SKIPVAL);
- handle.put("debug", UNORDERED);
- handle.put("time", SKIPVAL);
- handle.put("track", SKIP);
- query("q", "now their fox sat had put", "fl", "*,score",
- CommonParams.DEBUG_QUERY, "true");
- query("q", "id_i1:[1 TO 5]", CommonParams.DEBUG_QUERY, "true");
- query("q", "id_i1:[1 TO 5]", CommonParams.DEBUG, CommonParams.TIMING);
- query("q", "id_i1:[1 TO 5]", CommonParams.DEBUG, CommonParams.RESULTS);
- query("q", "id_i1:[1 TO 5]", CommonParams.DEBUG, CommonParams.QUERY);
+ super.test();
}
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
index 6a14e1d4db2..6afabf39070 100644
--- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
@@ -16,1339 +16,26 @@
*/
package org.apache.solr.cloud;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CompletionService;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorCompletionService;
-import java.util.concurrent.Future;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.JSONTestUtil;
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
-import org.apache.solr.client.solrj.request.CoreAdminRequest.Unload;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.request.StreamingUpdateRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.client.solrj.response.CollectionAdminResponse;
-import org.apache.solr.client.solrj.response.FacetField;
-import org.apache.solr.client.solrj.response.Group;
-import org.apache.solr.client.solrj.response.GroupCommand;
-import org.apache.solr.client.solrj.response.GroupResponse;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.client.solrj.response.UpdateResponse;
-import org.apache.solr.cloud.api.collections.CollectionHandlingUtils;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams.CollectionAction;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.UpdateParams;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SolrNamedThreadFactory;
-import org.apache.solr.util.TestInjection;
-import org.apache.solr.util.TestInjection.Hook;
-import org.junit.BeforeClass;
import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* This test simply does a bunch of basic things in solrcloud mode and asserts things
* work as expected.
+ * Implementation moved to AbstractBasicDistributedZkTestBase as it is used by many HDFS contrib tests.
*/
@Slow
@SuppressSSL(bugUrl = "https://issues.apache.org/jira/browse/SOLR-5776")
-public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
-
- private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
- private static final String DEFAULT_COLLECTION = "collection1";
-
- private final boolean onlyLeaderIndexes = random().nextBoolean();
-
- String t1="a_t";
- String i1="a_i1";
- String tlong = "other_tl1";
- String tsort="t_sortable";
-
- String oddField="oddField_s";
- String missingField="ignore_exception__missing_but_valid_field_t";
-
- private Map> otherCollectionClients = new HashMap<>();
-
- private String oneInstanceCollection = "oneInstanceCollection";
- private String oneInstanceCollection2 = "oneInstanceCollection2";
-
- private AtomicInteger nodeCounter = new AtomicInteger();
-
- CompletionService
*/
public class DirectoryFactoriesTest extends SolrTestCaseJ4 {
-
- // TODO: what do we need to setup to be able to test HdfsDirectoryFactory?
public static final List> ALL_CLASSES
= Arrays.asList(MMapDirectoryFactory.class,
MockDirectoryFactory.class,
diff --git a/solr/core/src/test/org/apache/solr/core/DirectoryFactoryTest.java b/solr/core/src/test/org/apache/solr/core/DirectoryFactoryTest.java
index f614bc7ae3c..57d51c8996a 100755
--- a/solr/core/src/test/org/apache/solr/core/DirectoryFactoryTest.java
+++ b/solr/core/src/test/org/apache/solr/core/DirectoryFactoryTest.java
@@ -64,7 +64,6 @@ public void testLockTypesUnchanged() throws Exception {
assertEquals("native", DirectoryFactory.LOCK_TYPE_NATIVE);
assertEquals("single", DirectoryFactory.LOCK_TYPE_SINGLE);
assertEquals("none", DirectoryFactory.LOCK_TYPE_NONE);
- assertEquals("hdfs", DirectoryFactory.LOCK_TYPE_HDFS);
}
@Test
diff --git a/solr/core/src/test/org/apache/solr/core/TestBackupRepositoryFactory.java b/solr/core/src/test/org/apache/solr/core/TestBackupRepositoryFactory.java
index 307a9b75743..fde8709d386 100644
--- a/solr/core/src/test/org/apache/solr/core/TestBackupRepositoryFactory.java
+++ b/solr/core/src/test/org/apache/solr/core/TestBackupRepositoryFactory.java
@@ -28,7 +28,6 @@
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.core.backup.repository.BackupRepository;
import org.apache.solr.core.backup.repository.BackupRepositoryFactory;
-import org.apache.solr.core.backup.repository.HdfsBackupRepository;
import org.apache.solr.core.backup.repository.LocalFileSystemRepository;
import org.apache.solr.schema.FieldType;
import org.junit.AfterClass;
@@ -122,7 +121,7 @@ public void testNonExistantBackupRepository() {
@Test
public void testRepositoryConfig() {
- PluginInfo[] plugins = new PluginInfo[2];
+ PluginInfo[] plugins = new PluginInfo[1];
{
Map attrs = new HashMap<>();
@@ -132,13 +131,7 @@ public void testRepositoryConfig() {
attrs.put("location", "/tmp");
plugins[0] = new PluginInfo("repository", attrs);
}
- {
- Map attrs = new HashMap<>();
- attrs.put(CoreAdminParams.NAME, "boom");
- attrs.put(FieldType.CLASS_NAME, HdfsBackupRepository.class.getName());
- attrs.put("location", "/tmp");
- plugins[1] = new PluginInfo("repository", attrs);
- }
+
Collections.shuffle(Arrays.asList(plugins), random());
BackupRepositoryFactory f = new BackupRepositoryFactory(plugins);
diff --git a/solr/core/src/test/org/apache/solr/handler/TestRestoreCore.java b/solr/core/src/test/org/apache/solr/handler/TestRestoreCore.java
index 4edee45f1bd..1d48de02373 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestRestoreCore.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestRestoreCore.java
@@ -18,18 +18,14 @@
import java.io.File;
import java.io.IOException;
-import java.io.InputStream;
-import java.net.URL;
+
import java.net.URLEncoder;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Properties;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import org.apache.commons.io.IOUtils;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
@@ -174,7 +170,7 @@ public void testSimpleRestore() throws Exception {
TestReplicationHandlerBackup.runBackupCommand(leaderJetty, ReplicationHandler.CMD_RESTORE, params);
- while (!fetchRestoreStatus(baseUrl, DEFAULT_TEST_CORENAME)) {
+ while (!TestRestoreCoreUtil.fetchRestoreStatus(baseUrl, DEFAULT_TEST_CORENAME)) {
Thread.sleep(1000);
}
@@ -223,7 +219,7 @@ public void testFailedRestore() throws Exception {
expectThrows(AssertionError.class, () -> {
for (int i = 0; i < 10; i++) {
// this will throw an assertion once we get what we expect
- fetchRestoreStatus(baseUrl, DEFAULT_TEST_CORENAME);
+ TestRestoreCoreUtil.fetchRestoreStatus(baseUrl, DEFAULT_TEST_CORENAME);
Thread.sleep(50);
}
// if we never got an assertion let expectThrows complain
@@ -237,29 +233,5 @@ public void testFailedRestore() throws Exception {
}
- public static boolean fetchRestoreStatus (String baseUrl, String coreName) throws IOException {
- String leaderUrl = baseUrl + "/" + coreName +
- ReplicationHandler.PATH + "?wt=xml&command=" + ReplicationHandler.CMD_RESTORE_STATUS;
- final Pattern pException = Pattern.compile("(.*?)");
- InputStream stream = null;
- try {
- URL url = new URL(leaderUrl);
- stream = url.openStream();
- String response = IOUtils.toString(stream, "UTF-8");
- Matcher matcher = pException.matcher(response);
- if(matcher.find()) {
- fail("Failed to complete restore action with exception " + matcher.group(1));
- }
- if(response.contains("success")) {
- return true;
- } else if (response.contains("failed")){
- fail("Restore Failed");
- }
- stream.close();
- } finally {
- IOUtils.closeQuietly(stream);
- }
- return false;
- }
}
diff --git a/solr/core/src/test/org/apache/solr/security/hadoop/HadoopAuthFakeGroupMapping.java b/solr/core/src/test/org/apache/solr/security/hadoop/HadoopAuthFakeGroupMapping.java
new file mode 100644
index 00000000000..b77a7232d14
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/security/hadoop/HadoopAuthFakeGroupMapping.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.security.hadoop;
+
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.security.GroupMappingServiceProvider;
+
+/**
+ * Fake mapping for Hadoop to prevent falling back to Shell group provider
+ */
+public class HadoopAuthFakeGroupMapping implements GroupMappingServiceProvider {
+ @Override
+ public List getGroups(String user) {
+ return Collections.singletonList("supergroup");
+ }
+
+ @Override
+ public void cacheGroupsRefresh() {
+ }
+
+ @Override
+ public void cacheGroupsAdd(List groups) {
+ }
+}
diff --git a/solr/core/src/test/org/apache/solr/security/hadoop/HadoopTestUtil.java b/solr/core/src/test/org/apache/solr/security/hadoop/HadoopTestUtil.java
new file mode 100644
index 00000000000..f8a7222f942
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/security/hadoop/HadoopTestUtil.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.security.hadoop;
+
+import java.lang.invoke.MethodHandles;
+import org.apache.hadoop.io.nativeio.NativeIO;
+
+import org.apache.lucene.util.Constants;
+import org.apache.solr.SolrTestCase;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class HadoopTestUtil {
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+ public static void checkAssumptions() {
+ ensureHadoopHomeNotSet();
+ checkHadoopWindows();
+ }
+
+ /**
+ * If Hadoop home is set via environment variable HADOOP_HOME or Java system property
+ * hadoop.home.dir, the behavior of test is undefined. Ensure that these are not set
+ * before starting. It is not possible to easily unset environment variables so better
+ * to bail out early instead of trying to test.
+ */
+ protected static void ensureHadoopHomeNotSet() {
+ if (System.getenv("HADOOP_HOME") != null) {
+ SolrTestCase.fail("Ensure that HADOOP_HOME environment variable is not set.");
+ }
+ if (System.getProperty("hadoop.home.dir") != null) {
+ SolrTestCase.fail("Ensure that \"hadoop.home.dir\" Java property is not set.");
+ }
+ }
+
+ /**
+ * Hadoop integration tests fail on Windows without Hadoop NativeIO
+ */
+ protected static void checkHadoopWindows() {
+ SolrTestCase.assumeTrue("Hadoop does not work on Windows without Hadoop NativeIO",
+ !Constants.WINDOWS || NativeIO.isAvailable());
+ }
+}
diff --git a/solr/core/src/test/org/apache/solr/security/hadoop/TestDelegationWithHadoopAuth.java b/solr/core/src/test/org/apache/solr/security/hadoop/TestDelegationWithHadoopAuth.java
index c685a7b2bc3..86737bbb3f3 100644
--- a/solr/core/src/test/org/apache/solr/security/hadoop/TestDelegationWithHadoopAuth.java
+++ b/solr/core/src/test/org/apache/solr/security/hadoop/TestDelegationWithHadoopAuth.java
@@ -36,7 +36,6 @@
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
import org.apache.solr.client.solrj.response.DelegationTokenResponse;
import org.apache.solr.cloud.SolrCloudTestCase;
-import org.apache.solr.cloud.hdfs.HdfsTestUtil;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.params.ModifiableSolrParams;
@@ -54,7 +53,7 @@ public class TestDelegationWithHadoopAuth extends SolrCloudTestCase {
@BeforeClass
public static void setupClass() throws Exception {
- HdfsTestUtil.checkAssumptions();
+ HadoopTestUtil.checkAssumptions();
configureCluster(NUM_SERVERS)// nodes
.withSecurityJson(TEST_PATH().resolve("security").resolve("hadoop_simple_auth_with_delegation.json"))
diff --git a/solr/core/src/test/org/apache/solr/security/hadoop/TestImpersonationWithHadoopAuth.java b/solr/core/src/test/org/apache/solr/security/hadoop/TestImpersonationWithHadoopAuth.java
index c4902800649..049d756b522 100644
--- a/solr/core/src/test/org/apache/solr/security/hadoop/TestImpersonationWithHadoopAuth.java
+++ b/solr/core/src/test/org/apache/solr/security/hadoop/TestImpersonationWithHadoopAuth.java
@@ -29,7 +29,6 @@
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.cloud.SolrCloudTestCase;
-import org.apache.solr.cloud.hdfs.HdfsTestUtil;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.Utils;
import org.apache.solr.security.HadoopAuthPlugin;
@@ -51,7 +50,7 @@ public class TestImpersonationWithHadoopAuth extends SolrCloudTestCase {
@SuppressWarnings("unchecked")
@BeforeClass
public static void setupClass() throws Exception {
- HdfsTestUtil.checkAssumptions();
+ HadoopTestUtil.checkAssumptions();
InetAddress loopback = InetAddress.getLoopbackAddress();
Path securityJsonPath = TEST_PATH().resolve("security").resolve("hadoop_simple_auth_with_delegation.json");
diff --git a/solr/core/src/test/org/apache/solr/security/hadoop/TestSolrCloudWithHadoopAuthPlugin.java b/solr/core/src/test/org/apache/solr/security/hadoop/TestSolrCloudWithHadoopAuthPlugin.java
index 36520be199c..063f3dcadf2 100644
--- a/solr/core/src/test/org/apache/solr/security/hadoop/TestSolrCloudWithHadoopAuthPlugin.java
+++ b/solr/core/src/test/org/apache/solr/security/hadoop/TestSolrCloudWithHadoopAuthPlugin.java
@@ -23,7 +23,6 @@
import org.apache.solr.cloud.AbstractDistribZkTestBase;
import org.apache.solr.cloud.KerberosTestServices;
import org.apache.solr.cloud.SolrCloudAuthTestCase;
-import org.apache.solr.cloud.hdfs.HdfsTestUtil;
import org.apache.solr.common.SolrInputDocument;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -37,7 +36,7 @@ public class TestSolrCloudWithHadoopAuthPlugin extends SolrCloudAuthTestCase {
@BeforeClass
public static void setupClass() throws Exception {
- HdfsTestUtil.checkAssumptions();
+ HadoopTestUtil.checkAssumptions();
kerberosTestServices = KerberosUtils.setupMiniKdc(createTempDir());
diff --git a/solr/core/src/test/org/apache/solr/security/hadoop/TestZkAclsWithHadoopAuth.java b/solr/core/src/test/org/apache/solr/security/hadoop/TestZkAclsWithHadoopAuth.java
index d2f91221201..26e2a6a8711 100644
--- a/solr/core/src/test/org/apache/solr/security/hadoop/TestZkAclsWithHadoopAuth.java
+++ b/solr/core/src/test/org/apache/solr/security/hadoop/TestZkAclsWithHadoopAuth.java
@@ -29,7 +29,6 @@
import org.apache.solr.cloud.MiniSolrCloudCluster;
import org.apache.solr.cloud.SolrCloudTestCase;
-import org.apache.solr.cloud.hdfs.HdfsTestUtil;
import org.apache.solr.common.cloud.SecurityAwareZkACLProvider;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.VMParamsAllAndReadonlyDigestZkACLProvider;
@@ -55,7 +54,7 @@ public class TestZkAclsWithHadoopAuth extends SolrCloudTestCase {
@BeforeClass
public static void setupClass() throws Exception {
- HdfsTestUtil.checkAssumptions();
+ HadoopTestUtil.checkAssumptions();
System.setProperty(SolrZkClient.ZK_ACL_PROVIDER_CLASS_NAME_VM_PARAM_NAME,
VMParamsAllAndReadonlyDigestZkACLProvider.class.getName());
diff --git a/solr/core/src/test/org/apache/solr/update/SolrIndexConfigTest.java b/solr/core/src/test/org/apache/solr/update/SolrIndexConfigTest.java
index 93bfdf1d89d..4a40abcd888 100644
--- a/solr/core/src/test/org/apache/solr/update/SolrIndexConfigTest.java
+++ b/solr/core/src/test/org/apache/solr/update/SolrIndexConfigTest.java
@@ -198,8 +198,7 @@ public void testToMap() throws Exception {
assertTrue(DirectoryFactory.LOCK_TYPE_SIMPLE.equals(lockType) ||
DirectoryFactory.LOCK_TYPE_NATIVE.equals(lockType) ||
DirectoryFactory.LOCK_TYPE_SINGLE.equals(lockType) ||
- DirectoryFactory.LOCK_TYPE_NONE.equals(lockType) ||
- DirectoryFactory.LOCK_TYPE_HDFS.equals(lockType));
+ DirectoryFactory.LOCK_TYPE_NONE.equals(lockType));
}
++mSizeExpected; assertTrue(m.get("infoStreamEnabled") instanceof Boolean);
diff --git a/solr/licenses/checker-qual-3.10.0.jar.sha1 b/solr/licenses/checker-qual-3.10.0.jar.sha1
deleted file mode 100644
index 30969364cbe..00000000000
--- a/solr/licenses/checker-qual-3.10.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-710fd6abff4b26b40dc0917050dc4c67efcf60b6
diff --git a/solr/licenses/checker-qual-3.19.0.jar.sha1 b/solr/licenses/checker-qual-3.19.0.jar.sha1
new file mode 100644
index 00000000000..fb782cef1ce
--- /dev/null
+++ b/solr/licenses/checker-qual-3.19.0.jar.sha1
@@ -0,0 +1 @@
+838b42bb6f7f73315167b359d24649845cef1c48
diff --git a/solr/modules/hdfs/README.md b/solr/modules/hdfs/README.md
new file mode 100644
index 00000000000..86fcb6be74f
--- /dev/null
+++ b/solr/modules/hdfs/README.md
@@ -0,0 +1,29 @@
+Apache Solr HDFS Module
+===============================
+
+Introduction
+------------
+This module implements the support for Hadoop Distributed File System in Apache Solr.
+
+Building
+--------
+The HDFS module uses the same Gradle build as the core Solr components.
+
+To build the module, you can use
+
+```
+./gradlew :solr:modules:hdfs:assemble
+```
+
+The resulting module will be placed to the libs directory, for example:
+`solr/modules/hdfs/build/libs/solr-hdfs-9.0.0-SNAPSHOT.jar`
+
+To execute the module tests:
+
+```
+./gradlew :solr:modules:hdfs:test
+```
+
+Usage
+-----
+Please refer to the 'Running Solr on HDFS' section of the reference guide: https://solr.apache.org/guide/running-solr-on-hdfs.html
diff --git a/solr/modules/hdfs/build.gradle b/solr/modules/hdfs/build.gradle
new file mode 100644
index 00000000000..515d238205f
--- /dev/null
+++ b/solr/modules/hdfs/build.gradle
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+apply plugin: 'java-library'
+
+description = 'HDFS Contrib Module'
+
+dependencies {
+ configurations.all {
+ exclude group: 'log4j', module: 'log4j'
+ exclude group: 'commons-logging', module: 'commons-logging'
+ exclude group: 'org.slf4j', module: 'slf4j-log4j12'
+ exclude group: 'org.apache.yetus', module: 'audience-annotations'
+ exclude group: 'org.codehaus.mojo', module: 'animal-sniffer-annotations'
+ // be conservative on what's added here. Affects *all* configs, including internal ones.
+ }
+
+ implementation project(':solr:core')
+
+ // Hadoop dependencies
+ implementation ('org.apache.hadoop:hadoop-annotations') { transitive = false }
+ implementation ('org.apache.hadoop:hadoop-auth') { transitive = false }
+ implementation ('org.apache.hadoop:hadoop-common') { transitive = false }
+ // transitive of hadoop-common; used by HDFS
+ runtimeOnly 'commons-collections:commons-collections'
+ runtimeOnly 'com.google.re2j:re2j'
+ runtimeOnly 'org.apache.commons:commons-configuration2'
+ runtimeOnly 'org.apache.htrace:htrace-core4' // note: removed in Hadoop 3.3.2
+ runtimeOnly 'org.apache.kerby:kerb-core'
+ runtimeOnly 'org.apache.kerby:kerb-util'
+ implementation ('org.apache.hadoop:hadoop-hdfs-client') { transitive = false }
+
+ // Guava implements the VisibleForTesting annotations
+ implementation ('com.google.guava:guava') { transitive = false }
+
+ // Caffeine cache to implement HDFS block caching
+ implementation ('com.github.ben-manes.caffeine:caffeine')
+
+ // Many HDFS tests are using/subclassing test framework classes
+ testImplementation project(':solr:solrj')
+ testImplementation project(':solr:test-framework')
+
+ // hadoop dependencies for tests
+ testImplementation ('org.apache.hadoop:hadoop-hdfs') { transitive = false }
+ testImplementation ('org.apache.hadoop:hadoop-common::tests') { transitive = false }
+ testImplementation ('org.apache.hadoop:hadoop-hdfs::tests') { transitive = false }
+ testImplementation ('org.apache.hadoop:hadoop-minikdc') { transitive = false }
+
+ testImplementation 'org.slf4j:jcl-over-slf4j'
+ testImplementation 'org.apache.logging.log4j:log4j-1.2-api'
+
+ // classes like solr.ICUCollationField, used by NNFailoverTest for example.
+ testImplementation project(':solr:modules:analysis-extras')
+
+ // required for instantiating a Zookeeper server in tests or embedded
+ runtimeOnly ('org.xerial.snappy:snappy-java')
+
+ // commons packages needed by test classes
+ testImplementation('commons-io:commons-io') { transitive = false }
+
+ // used by the hadoop-specific test framework classes
+ testImplementation ('org.apache.commons:commons-compress') { transitive = false }
+ testImplementation ('org.apache.commons:commons-collections4') { transitive = false }
+ testImplementation ('org.apache.commons:commons-lang3') { transitive = false }
+ testImplementation ('com.sun.jersey:jersey-servlet') { transitive = false }
+
+ // Zookeeper dependency - some tests like HdfsCloudBackupRestore need this
+ implementation ('org.apache.zookeeper:zookeeper')
+}
+
+
+// Copy all the test resource files from core to the build/resources/test directory
+// of the HDFS module so we can avoid duplication of the test resource files like
+// schemas and SolrConfigs. This can be improved later by making the test classes
+// load the resources from core directories directly.
+task copySolrCoreTestResources(type: Copy) {
+ from(project(':solr:core').sourceSets.test.resources.srcDirs) {
+ exclude '**/*.java'
+ }
+ into sourceSets.test.output.resourcesDir
+}
+
+tasks.processTestResources.configure {
+ dependsOn copySolrCoreTestResources
+}
diff --git a/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java b/solr/modules/hdfs/src/java/org/apache/solr/core/HdfsDirectoryFactory.java
similarity index 98%
rename from solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java
rename to solr/modules/hdfs/src/java/org/apache/solr/core/HdfsDirectoryFactory.java
index 6b7c105b094..b88eb3a733c 100644
--- a/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java
+++ b/solr/modules/hdfs/src/java/org/apache/solr/core/HdfsDirectoryFactory.java
@@ -64,6 +64,8 @@
import org.apache.solr.store.hdfs.HdfsDirectory;
import org.apache.solr.store.hdfs.HdfsLocalityReporter;
import org.apache.solr.store.hdfs.HdfsLockFactory;
+import org.apache.solr.update.HdfsUpdateLog;
+import org.apache.solr.update.UpdateLog;
import org.apache.solr.util.HdfsUtil;
import org.apache.solr.util.plugin.SolrCoreAware;
import org.slf4j.Logger;
@@ -71,13 +73,11 @@
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
-/**
- * @deprecated since 8.6
- */
-@Deprecated
public class HdfsDirectoryFactory extends CachingDirectoryFactory implements SolrCoreAware, SolrMetricProducer {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
+
+ public final static String LOCK_TYPE_HDFS = "hdfs";
+
public static final String BLOCKCACHE_SLAB_COUNT = "solr.hdfs.blockcache.slab.count";
public static final String BLOCKCACHE_DIRECT_MEMORY_ALLOCATION = "solr.hdfs.blockcache.direct.memory.allocation";
public static final String BLOCKCACHE_ENABLED = "solr.hdfs.blockcache.enabled";
@@ -102,6 +102,8 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
public static final String CACHE_MERGES = "solr.hdfs.blockcache.cachemerges";
public static final String CACHE_READONCE = "solr.hdfs.blockcache.cachereadonce";
+
+ public static final String HDFS_UPDATE_LOG_CLASS_NAME = "solr.HdfsUpdateLog";
private SolrParams params;
@@ -182,12 +184,12 @@ public void init(NamedList> args) {
@Override
protected LockFactory createLockFactory(String rawLockType) throws IOException {
if (null == rawLockType) {
- rawLockType = DirectoryFactory.LOCK_TYPE_HDFS;
+ rawLockType = LOCK_TYPE_HDFS;
log.warn("No lockType configured, assuming '{}'.", rawLockType);
}
final String lockType = rawLockType.toLowerCase(Locale.ROOT).trim();
switch (lockType) {
- case DirectoryFactory.LOCK_TYPE_HDFS:
+ case LOCK_TYPE_HDFS:
return HdfsLockFactory.INSTANCE;
case DirectoryFactory.LOCK_TYPE_SINGLE:
return new SingleInstanceLockFactory();
@@ -633,4 +635,10 @@ public void move(Directory fromDir, Directory toDir, String fileName, IOContext
super.move(fromDir, toDir, fileName, ioContext);
}
+
+ @Override
+ public UpdateLog newDefaultUpdateLog() {
+ return new HdfsUpdateLog(getConfDir());
+ }
+
}
diff --git a/solr/core/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java b/solr/modules/hdfs/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java
similarity index 98%
rename from solr/core/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java
rename to solr/modules/hdfs/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java
index 632bca91e94..a1f9aebb48d 100644
--- a/solr/core/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java
+++ b/solr/modules/hdfs/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java
@@ -46,10 +46,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-/**
- * @deprecated since 8.6
- */
-@Deprecated
public class HdfsBackupRepository implements BackupRepository {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -67,8 +63,6 @@ public class HdfsBackupRepository implements BackupRepository {
public void init(NamedList> args) {
this.config = args;
- log.warn("HDFS support in Solr has been deprecated as of 8.6. See SOLR-14021 for details.");
-
// Configure the size of the buffer used for copying index files to/from HDFS, if specified.
if (args.get(HDFS_COPY_BUFFER_SIZE_PARAM) != null) {
this.copyBufferSize = (Integer)args.get(HDFS_COPY_BUFFER_SIZE_PARAM);
diff --git a/solr/core/src/java/org/apache/solr/index/hdfs/CheckHdfsIndex.java b/solr/modules/hdfs/src/java/org/apache/solr/index/hdfs/CheckHdfsIndex.java
similarity index 100%
rename from solr/core/src/java/org/apache/solr/index/hdfs/CheckHdfsIndex.java
rename to solr/modules/hdfs/src/java/org/apache/solr/index/hdfs/CheckHdfsIndex.java
diff --git a/solr/core/src/java/org/apache/solr/index/hdfs/package-info.java b/solr/modules/hdfs/src/java/org/apache/solr/index/hdfs/package-info.java
similarity index 100%
rename from solr/core/src/java/org/apache/solr/index/hdfs/package-info.java
rename to solr/modules/hdfs/src/java/org/apache/solr/index/hdfs/package-info.java
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/BlockCache.java b/solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/BlockCache.java
similarity index 100%
rename from solr/core/src/java/org/apache/solr/store/blockcache/BlockCache.java
rename to solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/BlockCache.java
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/BlockCacheKey.java b/solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/BlockCacheKey.java
similarity index 100%
rename from solr/core/src/java/org/apache/solr/store/blockcache/BlockCacheKey.java
rename to solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/BlockCacheKey.java
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/BlockCacheLocation.java b/solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/BlockCacheLocation.java
similarity index 100%
rename from solr/core/src/java/org/apache/solr/store/blockcache/BlockCacheLocation.java
rename to solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/BlockCacheLocation.java
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/BlockDirectory.java b/solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/BlockDirectory.java
similarity index 100%
rename from solr/core/src/java/org/apache/solr/store/blockcache/BlockDirectory.java
rename to solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/BlockDirectory.java
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/BlockDirectoryCache.java b/solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/BlockDirectoryCache.java
similarity index 100%
rename from solr/core/src/java/org/apache/solr/store/blockcache/BlockDirectoryCache.java
rename to solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/BlockDirectoryCache.java
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/BlockLocks.java b/solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/BlockLocks.java
similarity index 100%
rename from solr/core/src/java/org/apache/solr/store/blockcache/BlockLocks.java
rename to solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/BlockLocks.java
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/BufferStore.java b/solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/BufferStore.java
similarity index 100%
rename from solr/core/src/java/org/apache/solr/store/blockcache/BufferStore.java
rename to solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/BufferStore.java
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/Cache.java b/solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/Cache.java
similarity index 100%
rename from solr/core/src/java/org/apache/solr/store/blockcache/Cache.java
rename to solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/Cache.java
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/CachedIndexOutput.java b/solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/CachedIndexOutput.java
similarity index 100%
rename from solr/core/src/java/org/apache/solr/store/blockcache/CachedIndexOutput.java
rename to solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/CachedIndexOutput.java
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/CustomBufferedIndexInput.java b/solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/CustomBufferedIndexInput.java
similarity index 100%
rename from solr/core/src/java/org/apache/solr/store/blockcache/CustomBufferedIndexInput.java
rename to solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/CustomBufferedIndexInput.java
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/Metrics.java b/solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/Metrics.java
similarity index 100%
rename from solr/core/src/java/org/apache/solr/store/blockcache/Metrics.java
rename to solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/Metrics.java
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/ReusedBufferedIndexOutput.java b/solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/ReusedBufferedIndexOutput.java
similarity index 100%
rename from solr/core/src/java/org/apache/solr/store/blockcache/ReusedBufferedIndexOutput.java
rename to solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/ReusedBufferedIndexOutput.java
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/Store.java b/solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/Store.java
similarity index 100%
rename from solr/core/src/java/org/apache/solr/store/blockcache/Store.java
rename to solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/Store.java
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/package-info.java b/solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/package-info.java
similarity index 100%
rename from solr/core/src/java/org/apache/solr/store/blockcache/package-info.java
rename to solr/modules/hdfs/src/java/org/apache/solr/store/blockcache/package-info.java
diff --git a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsDirectory.java b/solr/modules/hdfs/src/java/org/apache/solr/store/hdfs/HdfsDirectory.java
similarity index 98%
rename from solr/core/src/java/org/apache/solr/store/hdfs/HdfsDirectory.java
rename to solr/modules/hdfs/src/java/org/apache/solr/store/hdfs/HdfsDirectory.java
index af8a5400277..6efe2cde562 100644
--- a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsDirectory.java
+++ b/solr/modules/hdfs/src/java/org/apache/solr/store/hdfs/HdfsDirectory.java
@@ -44,11 +44,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-/**
- * @deprecated since 8.6
- */
-
-@Deprecated
public class HdfsDirectory extends BaseDirectory {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public static final int DEFAULT_BUFFER_SIZE = 4096;
@@ -102,7 +97,6 @@ public HdfsDirectory(Path hdfsDirPath, LockFactory lockFactory, Configuration co
org.apache.solr.common.util.IOUtils.closeQuietly(fileSystem);
throw new RuntimeException("Problem creating directory: " + hdfsDirPath, e);
}
- log.warn("HDFS support in Solr has been deprecated as of 8.6. See SOLR-14021 for details.");
}
@Override
diff --git a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsFileWriter.java b/solr/modules/hdfs/src/java/org/apache/solr/store/hdfs/HdfsFileWriter.java
similarity index 98%
rename from solr/core/src/java/org/apache/solr/store/hdfs/HdfsFileWriter.java
rename to solr/modules/hdfs/src/java/org/apache/solr/store/hdfs/HdfsFileWriter.java
index e8c9db30630..4099d958e6c 100644
--- a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsFileWriter.java
+++ b/solr/modules/hdfs/src/java/org/apache/solr/store/hdfs/HdfsFileWriter.java
@@ -30,9 +30,7 @@
/**
* @lucene.experimental
- * @deprecated since 8.6
*/
-@Deprecated
public class HdfsFileWriter extends OutputStreamIndexOutput {
public static final String HDFS_SYNC_BLOCK = "solr.hdfs.sync.block";
diff --git a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsLocalityReporter.java b/solr/modules/hdfs/src/java/org/apache/solr/store/hdfs/HdfsLocalityReporter.java
similarity index 99%
rename from solr/core/src/java/org/apache/solr/store/hdfs/HdfsLocalityReporter.java
rename to solr/modules/hdfs/src/java/org/apache/solr/store/hdfs/HdfsLocalityReporter.java
index fe19d120a55..c95396d60bc 100644
--- a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsLocalityReporter.java
+++ b/solr/modules/hdfs/src/java/org/apache/solr/store/hdfs/HdfsLocalityReporter.java
@@ -35,10 +35,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-/**
- * @deprecated since 8.6
- */
-@Deprecated
public class HdfsLocalityReporter implements SolrInfoBean {
public static final String LOCALITY_BYTES_TOTAL = "locality.bytes.total";
public static final String LOCALITY_BYTES_LOCAL = "locality.bytes.local";
diff --git a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsLockFactory.java b/solr/modules/hdfs/src/java/org/apache/solr/store/hdfs/HdfsLockFactory.java
similarity index 99%
rename from solr/core/src/java/org/apache/solr/store/hdfs/HdfsLockFactory.java
rename to solr/modules/hdfs/src/java/org/apache/solr/store/hdfs/HdfsLockFactory.java
index 0b4e2e33c9c..c62b5f4a4aa 100644
--- a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsLockFactory.java
+++ b/solr/modules/hdfs/src/java/org/apache/solr/store/hdfs/HdfsLockFactory.java
@@ -34,10 +34,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-/**
- * @deprecated since 8.6
- */
-@Deprecated
public class HdfsLockFactory extends LockFactory {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
diff --git a/solr/core/src/java/org/apache/solr/store/hdfs/package-info.java b/solr/modules/hdfs/src/java/org/apache/solr/store/hdfs/package-info.java
similarity index 100%
rename from solr/core/src/java/org/apache/solr/store/hdfs/package-info.java
rename to solr/modules/hdfs/src/java/org/apache/solr/store/hdfs/package-info.java
diff --git a/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java b/solr/modules/hdfs/src/java/org/apache/solr/update/HdfsTransactionLog.java
similarity index 99%
rename from solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java
rename to solr/modules/hdfs/src/java/org/apache/solr/update/HdfsTransactionLog.java
index 8982b6fe323..977c451f938 100644
--- a/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java
+++ b/solr/modules/hdfs/src/java/org/apache/solr/update/HdfsTransactionLog.java
@@ -57,9 +57,7 @@
* This would also allow to not log document data for requests with commit=true
* in them (since we know that if the request succeeds, all docs will be committed)
*
- * @deprecated since 8.6
*/
-@Deprecated
public class HdfsTransactionLog extends TransactionLog {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private static boolean debug = log.isDebugEnabled();
diff --git a/solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java b/solr/modules/hdfs/src/java/org/apache/solr/update/HdfsUpdateLog.java
similarity index 99%
rename from solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java
rename to solr/modules/hdfs/src/java/org/apache/solr/update/HdfsUpdateLog.java
index 5dbd53d504f..0a83b425bdd 100644
--- a/solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java
+++ b/solr/modules/hdfs/src/java/org/apache/solr/update/HdfsUpdateLog.java
@@ -44,9 +44,7 @@
/**
* @lucene.experimental
- * @deprecated since 8.6
*/
-@Deprecated
public class HdfsUpdateLog extends UpdateLog {
private final Object fsLock = new Object();
diff --git a/solr/core/src/java/org/apache/solr/util/FSHDFSUtils.java b/solr/modules/hdfs/src/java/org/apache/solr/util/FSHDFSUtils.java
similarity index 100%
rename from solr/core/src/java/org/apache/solr/util/FSHDFSUtils.java
rename to solr/modules/hdfs/src/java/org/apache/solr/util/FSHDFSUtils.java
diff --git a/solr/core/src/java/org/apache/solr/util/HdfsUtil.java b/solr/modules/hdfs/src/java/org/apache/solr/util/HdfsUtil.java
similarity index 100%
rename from solr/core/src/java/org/apache/solr/util/HdfsUtil.java
rename to solr/modules/hdfs/src/java/org/apache/solr/util/HdfsUtil.java
diff --git a/solr/modules/hdfs/src/test-files/core-site.xml b/solr/modules/hdfs/src/test-files/core-site.xml
new file mode 100644
index 00000000000..c949ff35224
--- /dev/null
+++ b/solr/modules/hdfs/src/test-files/core-site.xml
@@ -0,0 +1,23 @@
+
+
+
+
+ hadoop.security.group.mapping
+ org.apache.solr.cloud.hdfs.HdfsFakeGroupMapping
+
+
diff --git a/solr/core/src/test/org/apache/hadoop/fs/FileUtil.java b/solr/modules/hdfs/src/test/org/apache/hadoop/fs/FileUtil.java
similarity index 99%
rename from solr/core/src/test/org/apache/hadoop/fs/FileUtil.java
rename to solr/modules/hdfs/src/test/org/apache/hadoop/fs/FileUtil.java
index 5045b18c102..e21a5b92c2e 100644
--- a/solr/core/src/test/org/apache/hadoop/fs/FileUtil.java
+++ b/solr/modules/hdfs/src/test/org/apache/hadoop/fs/FileUtil.java
@@ -77,7 +77,7 @@ public class FileUtil {
public static final Object SOLR_HACK_FOR_CLASS_VERIFICATION = new Object();
// Apparently the Hadoop code expectes upper-case LOG, so...
- private static final Logger LOG = LoggerFactory.getLogger(FileUtil.class); //nowarn
+ private static final Logger LOG = LoggerFactory.getLogger(FileUtil.class); // nowarn_valid_logger
/* The error code is defined in winutils to indicate insufficient
* privilege to create symbolic links. This value need to keep in
diff --git a/solr/core/src/test/org/apache/hadoop/fs/HardLink.java b/solr/modules/hdfs/src/test/org/apache/hadoop/fs/HardLink.java
similarity index 100%
rename from solr/core/src/test/org/apache/hadoop/fs/HardLink.java
rename to solr/modules/hdfs/src/test/org/apache/hadoop/fs/HardLink.java
diff --git a/solr/core/src/test/org/apache/hadoop/fs/RawLocalFileSystem.java b/solr/modules/hdfs/src/test/org/apache/hadoop/fs/RawLocalFileSystem.java
similarity index 100%
rename from solr/core/src/test/org/apache/hadoop/fs/RawLocalFileSystem.java
rename to solr/modules/hdfs/src/test/org/apache/hadoop/fs/RawLocalFileSystem.java
diff --git a/solr/core/src/test/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/solr/modules/hdfs/src/test/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
similarity index 99%
rename from solr/core/src/test/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
rename to solr/modules/hdfs/src/test/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index c75ef8df72d..26c3d3c6f76 100644
--- a/solr/core/src/test/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++ b/solr/modules/hdfs/src/test/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -82,7 +82,7 @@ public class BlockPoolSlice {
public static final Object SOLR_HACK_FOR_CLASS_VERIFICATION = new Object();
// Apparently the Hadoop code expectes upper-case LOG, so...
- static final Logger LOG = LoggerFactory.getLogger(BlockPoolSlice.class); //nowarn
+ static final Logger LOG = LoggerFactory.getLogger(BlockPoolSlice.class); // nowarn_valid_logger
private final String bpid;
private final FsVolumeImpl volume; // volume to which this BlockPool belongs to
diff --git a/solr/core/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java b/solr/modules/hdfs/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java
similarity index 100%
rename from solr/core/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java
rename to solr/modules/hdfs/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java
diff --git a/solr/core/src/test/org/apache/hadoop/http/HttpServer2.java b/solr/modules/hdfs/src/test/org/apache/hadoop/http/HttpServer2.java
similarity index 99%
rename from solr/core/src/test/org/apache/hadoop/http/HttpServer2.java
rename to solr/modules/hdfs/src/test/org/apache/hadoop/http/HttpServer2.java
index 3dcbb9f10b4..d9da3f5491d 100644
--- a/solr/core/src/test/org/apache/hadoop/http/HttpServer2.java
+++ b/solr/modules/hdfs/src/test/org/apache/hadoop/http/HttpServer2.java
@@ -119,7 +119,7 @@ public final class HttpServer2 implements FilterContainer {
public static final Object SOLR_HACK_FOR_CLASS_VERIFICATION = new Object();
// Apparently the Hadoop code expectes upper-case LOG, so...
- public static final Logger LOG = LoggerFactory.getLogger(HttpServer2.class); //nowarn
+ public static final Logger LOG = LoggerFactory.getLogger(HttpServer2.class); // nowarn_valid_logger
public static final String HTTP_SCHEME = "http";
public static final String HTTPS_SCHEME = "https";
diff --git a/solr/core/src/test/org/apache/hadoop/package-info.java b/solr/modules/hdfs/src/test/org/apache/hadoop/package-info.java
similarity index 100%
rename from solr/core/src/test/org/apache/hadoop/package-info.java
rename to solr/modules/hdfs/src/test/org/apache/hadoop/package-info.java
diff --git a/solr/core/src/test/org/apache/hadoop/util/DiskChecker.java b/solr/modules/hdfs/src/test/org/apache/hadoop/util/DiskChecker.java
similarity index 99%
rename from solr/core/src/test/org/apache/hadoop/util/DiskChecker.java
rename to solr/modules/hdfs/src/test/org/apache/hadoop/util/DiskChecker.java
index 6699ce13108..98127b057fb 100644
--- a/solr/core/src/test/org/apache/hadoop/util/DiskChecker.java
+++ b/solr/modules/hdfs/src/test/org/apache/hadoop/util/DiskChecker.java
@@ -45,7 +45,7 @@
public class DiskChecker {
public static final Object SOLR_HACK_FOR_CLASS_VERIFICATION = new Object();
- public static final Logger log = LoggerFactory.getLogger(DiskChecker.class);
+ public static final Logger log = LoggerFactory.getLogger(DiskChecker.class); // nowarn_valid_logger
public static class DiskErrorException extends IOException {
public DiskErrorException(String msg) {
diff --git a/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSFailoverTest.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/MoveReplicaHDFSFailoverTest.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSFailoverTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/MoveReplicaHDFSFailoverTest.java
diff --git a/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java
similarity index 90%
rename from solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java
index 726a8060be6..aee051e425d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java
@@ -20,6 +20,7 @@
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.lucene.util.QuickPatchThreadsFilter;
@@ -40,12 +41,12 @@
})
@TimeoutSuite(millis = TimeUnits.HOUR)
@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/SOLR-13924")
-public class MoveReplicaHDFSTest extends MoveReplicaTest {
+public class MoveReplicaHDFSTest extends AbstractMoveReplicaTestBase {
private static MiniDFSCluster dfsCluster;
@BeforeClass
public static void setupClass() throws Exception {
- dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
+ dfsCluster = HdfsTestUtil.setupClass(LuceneTestCase.createTempDir().toFile().getAbsolutePath());
}
@AfterClass
@@ -65,7 +66,7 @@ protected String getConfigSet() {
@Test
public void testNormalMove() throws Exception {
inPlaceMove = false;
- test();
+ super.test();
}
@Test
@@ -78,13 +79,9 @@ public void testNormalMove() throws Exception {
@BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
public void testNormalFailedMove() throws Exception {
inPlaceMove = false;
- testFailedMove();
- }
-
- @Test
- @AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/SOLR-12080") // added 03-Oct-2018
- public void testFailedMove() throws Exception {
super.testFailedMove();
}
+
+
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCloudIncrementalBackupTest.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/api/collections/HdfsCloudIncrementalBackupTest.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCloudIncrementalBackupTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/api/collections/HdfsCloudIncrementalBackupTest.java
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCollectionsAPIDistributedZkTest.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/api/collections/HdfsCollectionsAPIDistributedZkTest.java
similarity index 95%
rename from solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCollectionsAPIDistributedZkTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/api/collections/HdfsCollectionsAPIDistributedZkTest.java
index 614fd8b7b1c..e7992c248a8 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCollectionsAPIDistributedZkTest.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/cloud/api/collections/HdfsCollectionsAPIDistributedZkTest.java
@@ -37,7 +37,7 @@
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
@TimeoutSuite(millis = TimeUnits.HOUR)
-public class HdfsCollectionsAPIDistributedZkTest extends CollectionsAPIDistributedZkTest {
+public class HdfsCollectionsAPIDistributedZkTest extends AbstractCollectionsAPIDistributedZkTestBase {
private static MiniDFSCluster dfsCluster;
@BeforeClass
@@ -54,7 +54,6 @@ public static void teardownClass() throws Exception {
}
}
- @Override
protected String getConfigSet() {
return "cloud-hdfs";
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HDFSCollectionsAPITest.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HDFSCollectionsAPITest.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/cloud/hdfs/HDFSCollectionsAPITest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HDFSCollectionsAPITest.java
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZk2Test.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZk2Test.java
similarity index 93%
rename from solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZk2Test.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZk2Test.java
index 0e3d5bdd92c..59fd45c6f73 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZk2Test.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZk2Test.java
@@ -23,7 +23,7 @@
import org.apache.lucene.util.QuickPatchThreadsFilter;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.SolrIgnoredThreadsFilter;
-import org.apache.solr.cloud.BasicDistributedZk2Test;
+import org.apache.solr.cloud.AbstractBasicDistributedZk2TestBase;
import org.apache.solr.util.BadHdfsThreadsFilter;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -31,6 +31,7 @@
import com.carrotsearch.randomizedtesting.annotations.Nightly;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
+
@Slow
@Nightly
@ThreadLeakFilters(defaultFilters = true, filters = {
@@ -39,7 +40,7 @@
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
@ThreadLeakLingering(linger = 10)
-public class HdfsBasicDistributedZk2Test extends BasicDistributedZk2Test {
+public class HdfsBasicDistributedZk2Test extends AbstractBasicDistributedZk2TestBase {
private static MiniDFSCluster dfsCluster;
@BeforeClass
@@ -60,4 +61,5 @@ public static void teardownClass() throws Exception {
protected String getDataDir(String dataDir) throws IOException {
return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
}
+
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZkTest.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZkTest.java
similarity index 88%
rename from solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZkTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZkTest.java
index 606402aa4d3..74571da0422 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZkTest.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZkTest.java
@@ -23,10 +23,11 @@
import org.apache.lucene.util.QuickPatchThreadsFilter;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.SolrIgnoredThreadsFilter;
-import org.apache.solr.cloud.BasicDistributedZkTest;
+import org.apache.solr.cloud.AbstractBasicDistributedZkTestBase;
import org.apache.solr.util.BadHdfsThreadsFilter;
import org.junit.AfterClass;
import org.junit.BeforeClass;
+import org.junit.Test;
import com.carrotsearch.randomizedtesting.annotations.Nightly;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
@@ -39,9 +40,9 @@
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
@ThreadLeakLingering(linger = 10)
-public class HdfsBasicDistributedZkTest extends BasicDistributedZkTest {
+public class HdfsBasicDistributedZkTest extends AbstractBasicDistributedZkTestBase {
private static MiniDFSCluster dfsCluster;
-
+
@BeforeClass
public static void setupClass() throws Exception {
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
@@ -60,9 +61,16 @@ public static void teardownClass() throws Exception {
dfsCluster = null;
}
}
-
+
@Override
protected String getDataDir(String dataDir) throws IOException {
return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
}
+
+ @Test
+ @Override
+ @ShardsFixed(num = 4)
+ public void test() throws Exception {
+ super.test();
+ }
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeyNothingIsSafeTest.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeyNothingIsSafeTest.java
similarity index 93%
rename from solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeyNothingIsSafeTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeyNothingIsSafeTest.java
index 160890430dd..0850d6a2c0e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeyNothingIsSafeTest.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeyNothingIsSafeTest.java
@@ -23,7 +23,7 @@
import org.apache.lucene.util.QuickPatchThreadsFilter;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.SolrIgnoredThreadsFilter;
-import org.apache.solr.cloud.ChaosMonkeyNothingIsSafeTest;
+import org.apache.solr.cloud.AbstractChaosMonkeyNothingIsSafeTestBase;
import org.apache.solr.util.BadHdfsThreadsFilter;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -39,9 +39,9 @@
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
@ThreadLeakLingering(linger = 10)
-public class HdfsChaosMonkeyNothingIsSafeTest extends ChaosMonkeyNothingIsSafeTest {
+public class HdfsChaosMonkeyNothingIsSafeTest extends AbstractChaosMonkeyNothingIsSafeTestBase {
private static MiniDFSCluster dfsCluster;
-
+
@BeforeClass
public static void setupClass() throws Exception {
System.setProperty("solr.hdfs.blockcache.global", "true"); // always use global cache, this test can create a lot of directories
@@ -57,15 +57,15 @@ public static void teardownClass() throws Exception {
System.clearProperty("solr.hdfs.blockcache.global");
}
}
-
+
@Override
public void distribSetUp() throws Exception {
super.distribSetUp();
-
+
// super class may hard code directory
useFactory("org.apache.solr.core.HdfsDirectoryFactory");
}
-
+
@Override
protected String getDataDir(String dataDir) throws IOException {
return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeySafeLeaderTest.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeySafeLeaderTest.java
similarity index 88%
rename from solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeySafeLeaderTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeySafeLeaderTest.java
index a92914b218b..40e4c012bbb 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeySafeLeaderTest.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeySafeLeaderTest.java
@@ -23,7 +23,7 @@
import org.apache.lucene.util.QuickPatchThreadsFilter;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.SolrIgnoredThreadsFilter;
-import org.apache.solr.cloud.ChaosMonkeySafeLeaderTest;
+import org.apache.solr.cloud.AbstractChaosMonkeySafeLeaderTestBase;
import org.apache.solr.util.BadHdfsThreadsFilter;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -31,6 +31,7 @@
import com.carrotsearch.randomizedtesting.annotations.Nightly;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
+
@Slow
@Nightly
@ThreadLeakFilters(defaultFilters = true, filters = {
@@ -39,7 +40,8 @@
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
@ThreadLeakLingering(linger = 10)
-public class HdfsChaosMonkeySafeLeaderTest extends ChaosMonkeySafeLeaderTest {
+public class HdfsChaosMonkeySafeLeaderTest extends AbstractChaosMonkeySafeLeaderTestBase {
+ private static final String DIRECTORY_FACTORY = "org.apache.solr.core.HdfsDirectoryFactory";
private static MiniDFSCluster dfsCluster;
@BeforeClass
@@ -70,4 +72,8 @@ public void distribSetUp() throws Exception {
protected String getDataDir(String dataDir) throws IOException {
return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
}
+
+ protected String getDirectoryFactory() {
+ return DIRECTORY_FACTORY;
+ }
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/FakeGroupMapping.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsFakeGroupMapping.java
similarity index 94%
rename from solr/core/src/test/org/apache/solr/cloud/hdfs/FakeGroupMapping.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsFakeGroupMapping.java
index fae1b00f7d0..2865d8174f2 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/FakeGroupMapping.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsFakeGroupMapping.java
@@ -16,15 +16,15 @@
*/
package org.apache.solr.cloud.hdfs;
+import org.apache.hadoop.security.GroupMappingServiceProvider;
+
import java.util.Collections;
import java.util.List;
-import org.apache.hadoop.security.GroupMappingServiceProvider;
-
/**
* Fake mapping for Hadoop to prevent falling back to Shell group provider
*/
-public class FakeGroupMapping implements GroupMappingServiceProvider {
+public class HdfsFakeGroupMapping implements GroupMappingServiceProvider {
@Override
public List getGroups(String user) {
return Collections.singletonList("supergroup");
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsNNFailoverTest.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsNNFailoverTest.java
similarity index 94%
rename from solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsNNFailoverTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsNNFailoverTest.java
index 30a25eafde7..35a8472ed73 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsNNFailoverTest.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsNNFailoverTest.java
@@ -23,7 +23,7 @@
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.lucene.util.QuickPatchThreadsFilter;
import org.apache.solr.SolrIgnoredThreadsFilter;
-import org.apache.solr.cloud.BasicDistributedZkTest;
+import org.apache.solr.cloud.AbstractBasicDistributedZkTestBase;
import org.apache.solr.util.BadHdfsThreadsFilter;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -38,7 +38,7 @@
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
@ThreadLeakLingering(linger = 40)
-public class HdfsNNFailoverTest extends BasicDistributedZkTest {
+public class HdfsNNFailoverTest extends AbstractBasicDistributedZkTestBase {
private static final String COLLECTION = "collection";
private static MiniDFSCluster dfsCluster;
@@ -72,6 +72,7 @@ protected String getSolrXml() {
}
@Test
+ @Override
public void test() throws Exception {
createCollection(COLLECTION, "conf1", 1, 1);
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoverLeaseTest.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsRecoverLeaseTest.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoverLeaseTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsRecoverLeaseTest.java
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoveryZkTest.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsRecoveryZkTest.java
similarity index 90%
rename from solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoveryZkTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsRecoveryZkTest.java
index 5c9c506b370..252f392ec9f 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoveryZkTest.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsRecoveryZkTest.java
@@ -19,24 +19,26 @@
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.LuceneTestCase.Nightly;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.lucene.util.QuickPatchThreadsFilter;
import org.apache.solr.SolrIgnoredThreadsFilter;
-import org.apache.solr.cloud.RecoveryZkTest;
+import org.apache.solr.cloud.AbstractRecoveryZkTestBase;
import org.apache.solr.util.BadHdfsThreadsFilter;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@Slow
@Nightly
+@LuceneTestCase.AwaitsFix(bugUrl = "SOLR-15405")
@ThreadLeakFilters(defaultFilters = true, filters = {
SolrIgnoredThreadsFilter.class,
QuickPatchThreadsFilter.class,
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
@ThreadLeakLingering(linger = 10)
-public class HdfsRecoveryZkTest extends RecoveryZkTest {
+public class HdfsRecoveryZkTest extends AbstractRecoveryZkTestBase {
private static MiniDFSCluster dfsCluster;
@BeforeClass
@@ -58,4 +60,5 @@ public static void teardownClass() throws Exception {
}
}
}
+
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRestartWhileUpdatingTest.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsRestartWhileUpdatingTest.java
similarity index 88%
rename from solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRestartWhileUpdatingTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsRestartWhileUpdatingTest.java
index 8f16b770295..d90db0bd4d7 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRestartWhileUpdatingTest.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsRestartWhileUpdatingTest.java
@@ -21,7 +21,7 @@
import org.apache.lucene.util.QuickPatchThreadsFilter;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.SolrIgnoredThreadsFilter;
-import org.apache.solr.cloud.RestartWhileUpdatingTest;
+import org.apache.solr.cloud.AbstractRestartWhileUpdatingTestBase;
import org.apache.solr.util.BadHdfsThreadsFilter;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -37,11 +37,14 @@
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
@ThreadLeakLingering(linger = 10)
-public class HdfsRestartWhileUpdatingTest extends RestartWhileUpdatingTest {
+public class HdfsRestartWhileUpdatingTest extends AbstractRestartWhileUpdatingTestBase {
private static MiniDFSCluster dfsCluster;
public HdfsRestartWhileUpdatingTest() throws Exception {
super();
+
+ // The constructor runs after setupClass so reset the DirectoryFactory
+ useFactory("org.apache.solr.core.HdfsDirectoryFactory");
}
@BeforeClass
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsSyncSliceTest.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsSyncSliceTest.java
similarity index 94%
rename from solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsSyncSliceTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsSyncSliceTest.java
index 82ff90d9dc2..68f4ee0137b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsSyncSliceTest.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsSyncSliceTest.java
@@ -23,7 +23,7 @@
import org.apache.lucene.util.QuickPatchThreadsFilter;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.SolrIgnoredThreadsFilter;
-import org.apache.solr.cloud.SyncSliceTest;
+import org.apache.solr.cloud.AbstractSyncSliceTestBase;
import org.apache.solr.util.BadHdfsThreadsFilter;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -39,7 +39,7 @@
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
@ThreadLeakLingering(linger = 10)
-public class HdfsSyncSliceTest extends SyncSliceTest {
+public class HdfsSyncSliceTest extends AbstractSyncSliceTestBase {
private static MiniDFSCluster dfsCluster;
@BeforeClass
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java
similarity index 93%
rename from solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java
index 1babbe295ef..1f640e6538d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java
@@ -52,17 +52,14 @@
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.util.DiskChecker;
import org.apache.lucene.util.Constants;
-import org.apache.lucene.util.LuceneTestCase;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.util.IOUtils;
import org.apache.solr.common.util.SuppressForbidden;
-import org.apache.solr.core.DirectoryFactory;
+import org.apache.solr.core.HdfsDirectoryFactory;
import org.apache.solr.util.HdfsUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.lucene.util.LuceneTestCase.random;
-
public class HdfsTestUtil {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -103,10 +100,10 @@ public static void checkAssumptions() {
*/
private static void ensureHadoopHomeNotSet() {
if (System.getenv("HADOOP_HOME") != null) {
- LuceneTestCase.fail("Ensure that HADOOP_HOME environment variable is not set.");
+ SolrTestCaseJ4.fail("Ensure that HADOOP_HOME environment variable is not set.");
}
if (System.getProperty("hadoop.home.dir") != null) {
- LuceneTestCase.fail("Ensure that \"hadoop.home.dir\" Java property is not set.");
+ SolrTestCaseJ4.fail("Ensure that \"hadoop.home.dir\" Java property is not set.");
}
}
@@ -114,7 +111,7 @@ private static void ensureHadoopHomeNotSet() {
* Hadoop integration tests fail on Windows without Hadoop NativeIO
*/
private static void checkHadoopWindows() {
- LuceneTestCase.assumeTrue("Hadoop does not work on Windows without Hadoop NativeIO",
+ SolrTestCaseJ4.assumeTrue("Hadoop does not work on Windows without Hadoop NativeIO",
!Constants.WINDOWS || NativeIO.isAvailable());
}
@@ -139,10 +136,10 @@ private static void checkOverriddenHadoopClasses() {
for (Class> clazz : modifiedHadoopClasses) {
try {
- LuceneTestCase.assertNotNull("Field on " + clazz.getCanonicalName() + " should not have been null",
+ SolrTestCaseJ4.assertNotNull("Field on " + clazz.getCanonicalName() + " should not have been null",
clazz.getField(SOLR_HACK_FOR_CLASS_VERIFICATION_FIELD));
} catch (NoSuchFieldException e) {
- LuceneTestCase.fail("Expected to load Solr modified Hadoop class " + clazz.getCanonicalName() +
+ SolrTestCaseJ4.fail("Expected to load Solr modified Hadoop class " + clazz.getCanonicalName() +
" , but it was not found.");
}
}
@@ -156,7 +153,7 @@ private static void checkFastDateFormat() {
try {
FastDateFormat.getInstance().format(System.currentTimeMillis());
} catch (ArrayIndexOutOfBoundsException e) {
- LuceneTestCase.assumeNoException("commons-lang3 FastDateFormat doesn't work with " +
+ SolrTestCaseJ4.assumeNoException("commons-lang3 FastDateFormat doesn't work with " +
Locale.getDefault().toLanguageTag(), e);
}
}
@@ -166,7 +163,7 @@ private static void checkFastDateFormat() {
*/
private static void checkGeneratedIdMatches() {
// This is basically how Namenode generates fsimage ids and checks that the fsimage filename matches
- LuceneTestCase.assumeTrue("Check that generated id matches regex",
+ SolrTestCaseJ4.assumeTrue("Check that generated id matches regex",
Pattern.matches("(\\d+)", String.format(Locale.getDefault(),"%019d", 0)));
}
@@ -186,12 +183,13 @@ public static MiniDFSCluster setupClass(String dir, boolean safeModeTesting, boo
System.setProperty("test.build.data", dir + File.separator + "hdfs" + File.separator + "build");
System.setProperty("test.cache.data", dir + File.separator + "hdfs" + File.separator + "cache");
- System.setProperty("solr.lock.type", DirectoryFactory.LOCK_TYPE_HDFS);
+ System.setProperty("solr.lock.type", HdfsDirectoryFactory.LOCK_TYPE_HDFS);
+ System.setProperty(SolrTestCaseJ4.UPDATELOG_SYSPROP, HdfsDirectoryFactory.HDFS_UPDATE_LOG_CLASS_NAME);
// test-files/solr/solr.xml sets this to be 15000. This isn't long enough for HDFS in some cases.
System.setProperty("socketTimeout", "90000");
- String blockcacheGlobal = System.getProperty("solr.hdfs.blockcache.global", Boolean.toString(random().nextBoolean()));
+ String blockcacheGlobal = System.getProperty("solr.hdfs.blockcache.global", Boolean.toString(SolrTestCaseJ4.random().nextBoolean()));
System.setProperty("solr.hdfs.blockcache.global", blockcacheGlobal);
// Limit memory usage for HDFS tests
if(Boolean.parseBoolean(blockcacheGlobal)) {
@@ -216,11 +214,11 @@ public static MiniDFSCluster setupClass(String dir, boolean safeModeTesting, boo
if (haTesting) dfsCluster.transitionToActive(0);
- int rndMode = random().nextInt(3);
+ int rndMode = SolrTestCaseJ4.random().nextInt(3);
if (safeModeTesting && rndMode == 1) {
NameNodeAdapter.enterSafeMode(dfsCluster.getNameNode(), false);
- int rnd = random().nextInt(10000);
+ int rnd = SolrTestCaseJ4.random().nextInt(10000);
Timer timer = new Timer();
synchronized (TIMERS_LOCK) {
if (timers == null) {
@@ -236,7 +234,7 @@ public void run() {
}
}, rnd);
} else if (haTesting && rndMode == 2) {
- int rnd = random().nextInt(30000);
+ int rnd = SolrTestCaseJ4.random().nextInt(30000);
Timer timer = new Timer();
synchronized (TIMERS_LOCK) {
if (timers == null) {
@@ -342,6 +340,8 @@ public static void teardownClass(MiniDFSCluster dfsCluster) throws Exception {
System.clearProperty("solr.lock.type");
+ System.clearProperty(SolrTestCaseJ4.UPDATELOG_SYSPROP);
+
// Clear "solr.hdfs." system properties
Enumeration> propertyNames = System.getProperties().propertyNames();
while(propertyNames.hasMoreElements()) {
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsThreadLeakTest.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsThreadLeakTest.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsThreadLeakTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsThreadLeakTest.java
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTlogReplayBufferedWhileIndexingTest.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsTlogReplayBufferedWhileIndexingTest.java
similarity index 91%
rename from solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTlogReplayBufferedWhileIndexingTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsTlogReplayBufferedWhileIndexingTest.java
index 278c30432f7..4a25cf1e4cf 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTlogReplayBufferedWhileIndexingTest.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsTlogReplayBufferedWhileIndexingTest.java
@@ -21,7 +21,7 @@
import org.apache.lucene.util.QuickPatchThreadsFilter;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.SolrIgnoredThreadsFilter;
-import org.apache.solr.cloud.TlogReplayBufferedWhileIndexingTest;
+import org.apache.solr.cloud.AbstractTlogReplayBufferedWhileIndexingTestBase;
import org.apache.solr.util.BadHdfsThreadsFilter;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -37,7 +37,7 @@
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
@ThreadLeakLingering(linger = 10)
-public class HdfsTlogReplayBufferedWhileIndexingTest extends TlogReplayBufferedWhileIndexingTest {
+public class HdfsTlogReplayBufferedWhileIndexingTest extends AbstractTlogReplayBufferedWhileIndexingTestBase {
private static MiniDFSCluster dfsCluster;
public HdfsTlogReplayBufferedWhileIndexingTest() throws Exception {
@@ -48,7 +48,7 @@ public HdfsTlogReplayBufferedWhileIndexingTest() throws Exception {
public static void setupClass() throws Exception {
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
}
-
+
@AfterClass
public static void teardownClass() throws Exception {
try {
@@ -57,7 +57,7 @@ public static void teardownClass() throws Exception {
dfsCluster = null;
}
}
-
+
@Override
protected String getDataDir(String dataDir) {
return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsUnloadDistributedZkTest.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsUnloadDistributedZkTest.java
similarity index 93%
rename from solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsUnloadDistributedZkTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsUnloadDistributedZkTest.java
index 5e1f73903c8..1711ed17c65 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsUnloadDistributedZkTest.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsUnloadDistributedZkTest.java
@@ -21,7 +21,7 @@
import org.apache.lucene.util.QuickPatchThreadsFilter;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.SolrIgnoredThreadsFilter;
-import org.apache.solr.cloud.UnloadDistributedZkTest;
+import org.apache.solr.cloud.AbstractUnloadDistributedZkTestBase;
import org.apache.solr.util.BadHdfsThreadsFilter;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -37,7 +37,7 @@
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
@ThreadLeakLingering(linger = 10)
-public class HdfsUnloadDistributedZkTest extends UnloadDistributedZkTest {
+public class HdfsUnloadDistributedZkTest extends AbstractUnloadDistributedZkTestBase {
private static MiniDFSCluster dfsCluster;
@BeforeClass
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java
similarity index 97%
rename from solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java
index a4ed792f2f8..22b31d96697 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java
@@ -39,7 +39,7 @@
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.cloud.BasicDistributedZkTest;
+import org.apache.solr.cloud.AbstractBasicDistributedZkTestBase;
import org.apache.solr.cloud.StoppableIndexingThread;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.core.DirectoryFactory;
@@ -63,7 +63,7 @@
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
@ThreadLeakLingering(linger = 10)
-public class HdfsWriteToMultipleCollectionsTest extends BasicDistributedZkTest {
+public class HdfsWriteToMultipleCollectionsTest extends AbstractBasicDistributedZkTestBase {
private static final String ACOLLECTION = "acollection";
private static MiniDFSCluster dfsCluster;
@@ -99,6 +99,7 @@ protected String getSolrXml() {
}
@Test
+ @Override
public void test() throws Exception {
int docCount = random().nextInt(1313) + 1;
int cnt = random().nextInt(4) + 1;
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java
similarity index 98%
rename from solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java
index dd0193bc4e6..d3cb41b994d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java
@@ -32,7 +32,7 @@
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.cloud.BasicDistributedZkTest;
+import org.apache.solr.cloud.AbstractBasicDistributedZkTestBase;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
@@ -64,7 +64,7 @@
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
@ThreadLeakLingering(linger = 10)
-public class StressHdfsTest extends BasicDistributedZkTest {
+public class StressHdfsTest extends AbstractBasicDistributedZkTestBase {
private static final String DELETE_DATA_DIR_COLLECTION = "delete_data_dir";
private static MiniDFSCluster dfsCluster;
@@ -101,6 +101,7 @@ protected String getSolrXml() {
}
@Test
+ @Override
public void test() throws Exception {
randomlyEnableAutoSoftCommit();
diff --git a/solr/core/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java b/solr/modules/hdfs/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java
diff --git a/solr/core/src/test/org/apache/solr/core/backup/repository/HdfsBackupRepositoryIntegrationTest.java b/solr/modules/hdfs/src/test/org/apache/solr/core/backup/repository/HdfsBackupRepositoryIntegrationTest.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/core/backup/repository/HdfsBackupRepositoryIntegrationTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/core/backup/repository/HdfsBackupRepositoryIntegrationTest.java
diff --git a/solr/core/src/test/org/apache/solr/core/backup/repository/HdfsBackupRepositoryTest.java b/solr/modules/hdfs/src/test/org/apache/solr/core/backup/repository/HdfsBackupRepositoryTest.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/core/backup/repository/HdfsBackupRepositoryTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/core/backup/repository/HdfsBackupRepositoryTest.java
diff --git a/solr/core/src/test/org/apache/solr/handler/TestHdfsBackupRestoreCore.java b/solr/modules/hdfs/src/test/org/apache/solr/handler/TestHdfsBackupRestoreCore.java
similarity index 99%
rename from solr/core/src/test/org/apache/solr/handler/TestHdfsBackupRestoreCore.java
rename to solr/modules/hdfs/src/test/org/apache/solr/handler/TestHdfsBackupRestoreCore.java
index b7a70a56412..a19d87c2b24 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestHdfsBackupRestoreCore.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/handler/TestHdfsBackupRestoreCore.java
@@ -234,7 +234,7 @@ public void test() throws Exception {
log.info("Running Restore via replication handler");
// Snapshooter prefixes "snapshot." to the backup name.
BackupRestoreUtils.runReplicationHandlerCommand(baseUrl, coreName, ReplicationHandler.CMD_RESTORE, "hdfs", backupName);
- while (!TestRestoreCore.fetchRestoreStatus(baseUrl, coreName)) {
+ while (!TestRestoreCoreUtil.fetchRestoreStatus(baseUrl, coreName)) {
Thread.sleep(1000);
}
} else {
diff --git a/solr/core/src/test/org/apache/solr/index/hdfs/CheckHdfsIndexTest.java b/solr/modules/hdfs/src/test/org/apache/solr/index/hdfs/CheckHdfsIndexTest.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/index/hdfs/CheckHdfsIndexTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/index/hdfs/CheckHdfsIndexTest.java
diff --git a/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java b/solr/modules/hdfs/src/test/org/apache/solr/search/TestRecoveryHdfs.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java
rename to solr/modules/hdfs/src/test/org/apache/solr/search/TestRecoveryHdfs.java
diff --git a/solr/core/src/test/org/apache/solr/store/blockcache/BlockCacheTest.java b/solr/modules/hdfs/src/test/org/apache/solr/store/blockcache/BlockCacheTest.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/store/blockcache/BlockCacheTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/store/blockcache/BlockCacheTest.java
diff --git a/solr/core/src/test/org/apache/solr/store/blockcache/BlockDirectoryTest.java b/solr/modules/hdfs/src/test/org/apache/solr/store/blockcache/BlockDirectoryTest.java
similarity index 99%
rename from solr/core/src/test/org/apache/solr/store/blockcache/BlockDirectoryTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/store/blockcache/BlockDirectoryTest.java
index d6ddea64f17..7bcb445940f 100644
--- a/solr/core/src/test/org/apache/solr/store/blockcache/BlockDirectoryTest.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/store/blockcache/BlockDirectoryTest.java
@@ -125,7 +125,7 @@ public void setUp() throws Exception {
}
random = random();
}
-
+
@After
public void tearDown() throws Exception {
super.tearDown();
@@ -151,7 +151,7 @@ public void testEOF() throws IOException {
private void testEof(String name, Directory directory, long length) throws IOException {
IndexInput input = directory.openInput(name, new IOContext());
try {
- input.seek(length);
+ input.seek(length);
try {
input.readByte();
fail("should throw eof");
@@ -209,14 +209,14 @@ private void assertInputsEquals(String name, Directory fsDir, Directory hdfs) th
byte[] hdfsBuf = new byte[fsBuf.length];
int offset = random.nextInt(fsBuf.length);
int length = random.nextInt(fsBuf.length - offset);
-
+
int pos;
if (fileLength == 0) {
pos = 0;
} else {
pos = random.nextInt(fileLength - length);
}
-
+
fsInput.seek(pos);
fsInput.readBytes(fsBuf, offset, length);
hdfsInput.seek(pos);
diff --git a/solr/core/src/test/org/apache/solr/store/blockcache/BufferStoreTest.java b/solr/modules/hdfs/src/test/org/apache/solr/store/blockcache/BufferStoreTest.java
similarity index 99%
rename from solr/core/src/test/org/apache/solr/store/blockcache/BufferStoreTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/store/blockcache/BufferStoreTest.java
index 4de783dd81b..f822f3f2df1 100644
--- a/solr/core/src/test/org/apache/solr/store/blockcache/BufferStoreTest.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/store/blockcache/BufferStoreTest.java
@@ -53,7 +53,7 @@ public void setup() {
public void clearBufferStores() {
BufferStore.clearBufferStores();
}
-
+
@Test
public void testBufferTakePut() {
byte[] b1 = store.takeBuffer(blockSize);
diff --git a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java b/solr/modules/hdfs/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java
diff --git a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsLockFactoryTest.java b/solr/modules/hdfs/src/test/org/apache/solr/store/hdfs/HdfsLockFactoryTest.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/store/hdfs/HdfsLockFactoryTest.java
rename to solr/modules/hdfs/src/test/org/apache/solr/store/hdfs/HdfsLockFactoryTest.java
diff --git a/solr/core/src/test/org/apache/solr/update/TestHdfsUpdateLog.java b/solr/modules/hdfs/src/test/org/apache/solr/update/TestHdfsUpdateLog.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/update/TestHdfsUpdateLog.java
rename to solr/modules/hdfs/src/test/org/apache/solr/update/TestHdfsUpdateLog.java
diff --git a/solr/packaging/build.gradle b/solr/packaging/build.gradle
index e6cc9103d82..40006f0003b 100644
--- a/solr/packaging/build.gradle
+++ b/solr/packaging/build.gradle
@@ -51,6 +51,7 @@ dependencies {
":solr:modules:extraction",
":solr:modules:clustering",
":solr:modules:gcs-repository",
+ ":solr:modules:hdfs",
":solr:modules:jaegertracer-configurator",
":solr:modules:langid",
":solr:modules:ltr",
diff --git a/solr/solr-ref-guide/src/backup-restore.adoc b/solr/solr-ref-guide/src/backup-restore.adoc
index 22d0ccfb0ca..9a5117c0ac4 100644
--- a/solr/solr-ref-guide/src/backup-restore.adoc
+++ b/solr/solr-ref-guide/src/backup-restore.adoc
@@ -401,9 +401,7 @@ An example configuration using this property can be found below.
=== HdfsBackupRepository
-Stores and retrieves backup files from HDFS directories.
-
-WARNING: HdfsBackupRepository is deprecated and may be removed or relocated in a subsequent version of Solr.
+Stores and retrieves backup files from HDFS directories. This plugin must first be <> before using.
HdfsBackupRepository accepts the following configuration options:
diff --git a/solr/solr-ref-guide/src/index-location-format.adoc b/solr/solr-ref-guide/src/index-location-format.adoc
index 7e0d6cd0936..311e015cf5a 100644
--- a/solr/solr-ref-guide/src/index-location-format.adoc
+++ b/solr/solr-ref-guide/src/index-location-format.adoc
@@ -60,6 +60,6 @@ Use this DirectoryFactory to store your index in RAM.
[NOTE]
====
-If you are using Hadoop and would like to store your indexes in HDFS, you should use the {solr-javadocs}/core/org/apache/solr/core/HdfsDirectoryFactory.html[`solr.HdfsDirectoryFactory`] instead of either of the above implementations.
+If you are using Hadoop and would like to store your indexes in HDFS, you should use the {solr-javadocs}/modules/hdfs/org/apache/solr/core/HdfsDirectoryFactory.html[`solr.HdfsDirectoryFactory`] instead of either of the above implementations.
For more details, see the section <>.
====
diff --git a/solr/solr-ref-guide/src/major-changes-in-solr-9.adoc b/solr/solr-ref-guide/src/major-changes-in-solr-9.adoc
index 66f842d7a10..a08b7c1a6ce 100644
--- a/solr/solr-ref-guide/src/major-changes-in-solr-9.adoc
+++ b/solr/solr-ref-guide/src/major-changes-in-solr-9.adoc
@@ -107,6 +107,9 @@ The `response` field is now a map, containing information about the backup (`sta
* SOLR-15884: In Backup request responses, the `response` key now uses a map to return information instead of a list.
This is only applicable for users returning information in JSON format, which is the default behavior.
+* SOLR-14660: HDFS storage support has been moved to a module. Existing Solr configurations need no HDFS-related
+changes, however the module needs to be installed. Please refer to the readme of the module.
+
== New Features & Enhancements
* Replica placement plugins
@@ -243,6 +246,7 @@ Instead, these libraries will be included with all other module dependencies in
* SOLR-15954: The prometheus-exporter is no longer packaged as a Solr module. It can be found under `solr/prometheus-exporter/`.
+
== Deprecations & Removed Features
The following list of features have been permanently removed from Solr:
@@ -269,8 +273,6 @@ Users have to modify their existing configurations to use CaffeineCache instead.
* CDCR
-* Storing indexes and backups in HDFS
-
* Solr's blob store
** SOLR-14654: plugins cannot be loaded using "runtimeLib=true" option. Use the package manager to use and load plugins
diff --git a/solr/solr-ref-guide/src/solr-on-hdfs.adoc b/solr/solr-ref-guide/src/solr-on-hdfs.adoc
index 131d62ba1c9..7dc0513a0b4 100644
--- a/solr/solr-ref-guide/src/solr-on-hdfs.adoc
+++ b/solr/solr-ref-guide/src/solr-on-hdfs.adoc
@@ -16,10 +16,8 @@
// specific language governing permissions and limitations
// under the License.
-WARNING: Storing indexes in HDFS is deprecated and may be be removed in 9.0.
-This functionality may be moved to a 3rd-party plugin in the future.
-Solr has support for writing and reading its index and transaction log files to the HDFS distributed filesystem.
+The Solr HDFS module has support for writing and reading its index and transaction log files to the HDFS distributed filesystem. This plugin must first be <> before using. This module needs to be installed as lib, it is planned to evolve to a Solr package in a later release.
This does not use Hadoop MapReduce to process Solr data, rather it only uses the HDFS filesystem for index and transaction log file storage.
@@ -245,6 +243,19 @@ s|Required |Default: none
+
The Kerberos principal that Solr should use to authenticate to secure Hadoop; the format of a typical Kerberos V5 principal is: `primary/instance@realm`.
+== Update Log settings
+When using HDFS to store Solr indexes, it is recommended to also store the transaction logs on HDFS. This can be done by using the `solr.HdfsUpdateLog` update log hander class.
+The solrconfig.xml is often used to define an update log handler class name either using a variable reference or direct specification, for example:
+
+[source,xml]
+----
+
+----
+
+When specifying a class like this, it needs to be ensured that the correct class name is specified.
+When no class name is specified, Solr automatically picks the correct update log handler class `solr.HdfsUpdateLog` for collections which are configured to use the HdfsDirectory Factory.
+
+
== Example solrconfig.xml for HDFS
Here is a sample `solrconfig.xml` configuration for storing Solr indexes on HDFS:
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractBasicDistributedZk2TestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractBasicDistributedZk2TestBase.java
new file mode 100644
index 00000000000..77540ae2bac
--- /dev/null
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractBasicDistributedZk2TestBase.java
@@ -0,0 +1,457 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud;
+
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.lucene.mockfile.FilterPath;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.handler.BackupStatusChecker;
+import org.apache.solr.handler.ReplicationHandler;
+import org.junit.Test;
+
+/**
+ * This test simply does a bunch of basic things in solrcloud mode and asserts things
+ * work as expected.
+ */
+public abstract class AbstractBasicDistributedZk2TestBase extends AbstractFullDistribZkTestBase {
+ private static final String SHARD2 = "shard2";
+ private static final String SHARD1 = "shard1";
+ private static final String ONE_NODE_COLLECTION = "onenodecollection";
+ private final boolean onlyLeaderIndexes = random().nextBoolean();
+
+
+ public AbstractBasicDistributedZk2TestBase() {
+ super();
+ // we need DVs on point fields to compute stats & facets
+ if (Boolean.getBoolean(NUMERIC_POINTS_SYSPROP)) System.setProperty(NUMERIC_DOCVALUES_SYSPROP,"true");
+
+ sliceCount = 2;
+ }
+
+ @Override
+ protected boolean useTlogReplicas() {
+ return false; // TODO: tlog replicas makes commits take way to long due to what is likely a bug and it's TestInjection use
+ }
+
+ @Test
+ @ShardsFixed(num = 4)
+ public void test() throws Exception {
+ boolean testFinished = false;
+ try {
+ handle.clear();
+ handle.put("timestamp", SKIPVAL);
+
+ testNodeWithoutCollectionForwarding();
+
+ indexr(id, 1, i1, 100, tlong, 100, t1,
+ "now is the time for all good men", "foo_f", 1.414f, "foo_b", "true",
+ "foo_d", 1.414d);
+
+ commit();
+
+ // make sure we are in a steady state...
+ waitForRecoveriesToFinish(false);
+
+ assertDocCounts(false);
+
+ indexAbunchOfDocs();
+
+ // check again
+ waitForRecoveriesToFinish(false);
+
+ commit();
+
+ assertDocCounts(VERBOSE);
+ checkQueries();
+
+ assertDocCounts(VERBOSE);
+
+ query("q", "*:*", "sort", "n_tl1 desc");
+
+ bringDownShardIndexSomeDocsAndRecover();
+
+ query("q", "*:*", "sort", "n_tl1 desc");
+
+ // test adding another replica to a shard - it should do a
+ // recovery/replication to pick up the index from the leader
+ addNewReplica();
+
+ long docId = testUpdateAndDelete();
+
+ // index a bad doc...
+ expectThrows(SolrException.class, () -> indexr(t1, "a doc with no id"));
+
+ // TODO: bring this to its own method?
+ // try indexing to a leader that has no replicas up
+ ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+ ZkNodeProps leaderProps = zkStateReader.getLeaderRetry(
+ DEFAULT_COLLECTION, SHARD2);
+
+ String nodeName = leaderProps.getStr(ZkStateReader.NODE_NAME_PROP);
+ chaosMonkey.stopShardExcept(SHARD2, nodeName);
+
+ SolrClient client = getClient(nodeName);
+
+ index_specific(client, "id", docId + 1, t1, "what happens here?");
+
+ // expire a session...
+ CloudJettyRunner cloudJetty = shardToJetty.get(SHARD1).get(0);
+ chaosMonkey.expireSession(cloudJetty.jetty);
+
+ indexr("id", docId + 1, t1, "slip this doc in");
+
+ waitForRecoveriesToFinish(false);
+
+ checkShardConsistency(SHARD1);
+ checkShardConsistency(SHARD2);
+
+ testFinished = true;
+ } finally {
+ if (!testFinished) {
+ printLayoutOnTearDown = true;
+ }
+ }
+
+ }
+
+ private void testNodeWithoutCollectionForwarding() throws Exception {
+ assertEquals(0, CollectionAdminRequest
+ .createCollection(ONE_NODE_COLLECTION, "conf1", 1, 1)
+ .setCreateNodeSet("")
+ .process(cloudClient).getStatus());
+ assertTrue(CollectionAdminRequest
+ .addReplicaToShard(ONE_NODE_COLLECTION, "shard1")
+ .setCoreName(ONE_NODE_COLLECTION + "core")
+ .process(cloudClient).isSuccess());
+
+ waitForCollection(cloudClient.getZkStateReader(), ONE_NODE_COLLECTION, 1);
+ waitForRecoveriesToFinish(ONE_NODE_COLLECTION, cloudClient.getZkStateReader(), false);
+
+ cloudClient.getZkStateReader().getLeaderRetry(ONE_NODE_COLLECTION, SHARD1, 30000);
+
+ int docs = 2;
+ for (SolrClient client : clients) {
+ final String clientUrl = getBaseUrl((HttpSolrClient) client);
+ addAndQueryDocs(clientUrl, docs);
+ docs += 2;
+ }
+ }
+
+ // 2 docs added every call
+ private void addAndQueryDocs(final String baseUrl, int docs)
+ throws Exception {
+
+ SolrQuery query = new SolrQuery("*:*");
+
+ try (HttpSolrClient qclient = getHttpSolrClient(baseUrl + "/onenodecollection" + "core")) {
+
+ // it might take a moment for the proxy node to see us in their cloud state
+ waitForNon403or404or503(qclient);
+
+ // add a doc
+ SolrInputDocument doc = new SolrInputDocument();
+ doc.addField("id", docs);
+ qclient.add(doc);
+ qclient.commit();
+
+
+ QueryResponse results = qclient.query(query);
+ assertEquals(docs - 1, results.getResults().getNumFound());
+ }
+
+ try (HttpSolrClient qclient = getHttpSolrClient(baseUrl + "/onenodecollection")) {
+ QueryResponse results = qclient.query(query);
+ assertEquals(docs - 1, results.getResults().getNumFound());
+
+ SolrInputDocument doc = new SolrInputDocument();
+ doc.addField("id", docs + 1);
+ qclient.add(doc);
+ qclient.commit();
+
+ query = new SolrQuery("*:*");
+ query.set("rows", 0);
+ results = qclient.query(query);
+ assertEquals(docs, results.getResults().getNumFound());
+ }
+ }
+
+ private long testUpdateAndDelete() throws Exception {
+ long docId = 99999999L;
+ indexr("id", docId, t1, "originalcontent");
+
+ commit();
+
+ ModifiableSolrParams params = new ModifiableSolrParams();
+ params.add("q", t1 + ":originalcontent");
+ QueryResponse results = clients.get(0).query(params);
+ assertEquals(1, results.getResults().getNumFound());
+
+ // update doc
+ indexr("id", docId, t1, "updatedcontent");
+
+ commit();
+
+ results = clients.get(0).query(params);
+ assertEquals(0, results.getResults().getNumFound());
+
+ params.set("q", t1 + ":updatedcontent");
+
+ results = clients.get(0).query(params);
+ assertEquals(1, results.getResults().getNumFound());
+
+ UpdateRequest uReq = new UpdateRequest();
+ // uReq.setParam(UpdateParams.UPDATE_CHAIN, DISTRIB_UPDATE_CHAIN);
+ uReq.deleteById(Long.toString(docId)).process(clients.get(0));
+
+ commit();
+
+ results = clients.get(0).query(params);
+ assertEquals(0, results.getResults().getNumFound());
+ return docId;
+ }
+
+ private void bringDownShardIndexSomeDocsAndRecover() throws Exception {
+ SolrQuery query = new SolrQuery("*:*");
+ query.set("distrib", false);
+
+ commit();
+
+ long deadShardCount = shardToJetty.get(SHARD2).get(0).client.solrClient
+ .query(query).getResults().getNumFound();
+
+ query("q", "*:*", "sort", "n_tl1 desc");
+
+ int oldLiveNodes = cloudClient.getZkStateReader().getZkClient().getChildren(ZkStateReader.LIVE_NODES_ZKNODE, null, true).size();
+
+ assertEquals(5, oldLiveNodes);
+
+ // kill a shard
+ CloudJettyRunner deadShard = chaosMonkey.stopShard(SHARD1, 0);
+
+ // ensure shard is dead
+ expectThrows(SolrServerException.class,
+ "This server should be down and this update should have failed",
+ () -> index_specific(deadShard.client.solrClient, id, 999, i1, 107, t1, "specific doc!")
+ );
+
+ commit();
+
+ query("q", "*:*", "sort", "n_tl1 desc");
+
+ // long cloudClientDocs = cloudClient.query(new
+ // SolrQuery("*:*")).getResults().getNumFound();
+ // System.out.println("clouddocs:" + cloudClientDocs);
+
+ // try to index to a living shard at shard2
+
+
+ long numFound1 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
+
+ cloudClient.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1, 60000);
+
+ try {
+ index_specific(shardToJetty.get(SHARD1).get(1).client.solrClient, id, 1000, i1, 108, t1,
+ "specific doc!");
+ } catch (Exception e) {
+ // wait and try again
+ Thread.sleep(4000);
+ index_specific(shardToJetty.get(SHARD1).get(1).client.solrClient, id, 1000, i1, 108, t1,
+ "specific doc!");
+ }
+
+ commit();
+
+ checkShardConsistency(true, false);
+
+ query("q", "*:*", "sort", "n_tl1 desc");
+
+
+ cloudClient.setDefaultCollection(DEFAULT_COLLECTION);
+
+ long numFound2 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
+
+ assertEquals(numFound1 + 1, numFound2);
+
+ SolrInputDocument doc = new SolrInputDocument();
+ doc.addField("id", 1001);
+
+ controlClient.add(doc);
+
+ // try adding a doc with CloudSolrServer
+ UpdateRequest ureq = new UpdateRequest();
+ ureq.add(doc);
+ // ureq.setParam("update.chain", DISTRIB_UPDATE_CHAIN);
+
+ try {
+ ureq.process(cloudClient);
+ } catch(SolrServerException e){
+ // try again
+ Thread.sleep(3500);
+ ureq.process(cloudClient);
+ }
+
+ commit();
+
+ query("q", "*:*", "sort", "n_tl1 desc");
+
+ long numFound3 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
+
+ // lets just check that the one doc since last commit made it in...
+ assertEquals(numFound2 + 1, numFound3);
+
+ // test debugging
+ testDebugQueries();
+
+ if (VERBOSE) {
+ System.err.println(controlClient.query(new SolrQuery("*:*")).getResults()
+ .getNumFound());
+
+ for (SolrClient client : clients) {
+ try {
+ SolrQuery q = new SolrQuery("*:*");
+ q.set("distrib", false);
+ System.err.println(client.query(q).getResults()
+ .getNumFound());
+ } catch (Exception e) {
+
+ }
+ }
+ }
+ // TODO: This test currently fails because debug info is obtained only
+ // on shards with matches.
+ // query("q","matchesnothing","fl","*,score", "debugQuery", "true");
+
+ // this should trigger a recovery phase on deadShard
+ deadShard.jetty.start();
+
+ // make sure we have published we are recovering
+ Thread.sleep(1500);
+
+ waitForRecoveriesToFinish(false);
+
+ deadShardCount = shardToJetty.get(SHARD1).get(0).client.solrClient
+ .query(query).getResults().getNumFound();
+ // if we properly recovered, we should now have the couple missing docs that
+ // came in while shard was down
+ checkShardConsistency(true, false);
+
+
+ // recover over 100 docs so we do more than just peer sync (replicate recovery)
+ chaosMonkey.stopJetty(deadShard);
+
+ for (int i = 0; i < 226; i++) {
+ doc = new SolrInputDocument();
+ doc.addField("id", 2000 + i);
+ controlClient.add(doc);
+ ureq = new UpdateRequest();
+ ureq.add(doc);
+ // ureq.setParam("update.chain", DISTRIB_UPDATE_CHAIN);
+ ureq.process(cloudClient);
+ }
+ commit();
+
+ Thread.sleep(1500);
+
+ deadShard.jetty.start();
+
+ // make sure we have published we are recovering
+ Thread.sleep(1500);
+
+ waitForThingsToLevelOut(1, TimeUnit.MINUTES);
+
+ Thread.sleep(500);
+
+ waitForRecoveriesToFinish(false);
+
+ checkShardConsistency(true, false);
+
+ // try a backup command
+ try(final HttpSolrClient client = getHttpSolrClient((String) shardToJetty.get(SHARD2).get(0).info.get("base_url"))) {
+ final String backupName = "the_backup";
+ ModifiableSolrParams params = new ModifiableSolrParams();
+ params.set("qt", ReplicationHandler.PATH);
+ params.set("command", "backup");
+ params.set("name", backupName);
+ final Path location = FilterPath.unwrap(createTempDir()).toRealPath();
+ // Allow non-standard location outside SOLR_HOME
+ jettys.forEach(j -> j.getCoreContainer().getAllowPaths().add(location));
+ params.set("location", location.toString());
+
+ QueryRequest request = new QueryRequest(params);
+ client.request(request, DEFAULT_TEST_COLLECTION_NAME);
+
+
+ final BackupStatusChecker backupStatus
+ = new BackupStatusChecker(client, "/" + DEFAULT_TEST_COLLECTION_NAME + "/replication");
+ final String backupDirName = backupStatus.waitForBackupSuccess(backupName, 30);
+ assertTrue("Backup dir does not exist: " + backupDirName,
+ Files.exists(location.resolve(backupDirName)));
+ }
+
+ }
+
+ private void addNewReplica() throws Exception {
+
+ waitForRecoveriesToFinish(false);
+
+ // new server should be part of first shard
+ // how many docs are on the new shard?
+ for (CloudJettyRunner cjetty : shardToJetty.get(SHARD1)) {
+ if (VERBOSE) System.err.println("shard1 total:"
+ + cjetty.client.solrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
+ }
+ for (CloudJettyRunner cjetty : shardToJetty.get(SHARD2)) {
+ if (VERBOSE) System.err.println("shard2 total:"
+ + cjetty.client.solrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
+ }
+
+ checkShardConsistency(SHARD1);
+ checkShardConsistency(SHARD2);
+
+ assertDocCounts(VERBOSE);
+ }
+
+ private void testDebugQueries() throws Exception {
+ handle.put("explain", SKIPVAL);
+ handle.put("debug", UNORDERED);
+ handle.put("time", SKIPVAL);
+ handle.put("track", SKIP);
+ query("q", "now their fox sat had put", "fl", "*,score",
+ CommonParams.DEBUG_QUERY, "true");
+ query("q", "id_i1:[1 TO 5]", CommonParams.DEBUG_QUERY, "true");
+ query("q", "id_i1:[1 TO 5]", CommonParams.DEBUG, CommonParams.TIMING);
+ query("q", "id_i1:[1 TO 5]", CommonParams.DEBUG, CommonParams.RESULTS);
+ query("q", "id_i1:[1 TO 5]", CommonParams.DEBUG, CommonParams.QUERY);
+ }
+
+}
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractBasicDistributedZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractBasicDistributedZkTestBase.java
new file mode 100644
index 00000000000..4b71235197f
--- /dev/null
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractBasicDistributedZkTestBase.java
@@ -0,0 +1,1350 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.Future;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.lucene.util.IOUtils;
+import org.apache.solr.JSONTestUtil;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
+import org.apache.solr.client.solrj.request.CoreAdminRequest.Unload;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.StreamingUpdateRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.response.CollectionAdminResponse;
+import org.apache.solr.client.solrj.response.FacetField;
+import org.apache.solr.client.solrj.response.Group;
+import org.apache.solr.client.solrj.response.GroupCommand;
+import org.apache.solr.client.solrj.response.GroupResponse;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.client.solrj.response.UpdateResponse;
+import org.apache.solr.cloud.api.collections.CollectionHandlingUtils;
+import org.apache.solr.common.SolrDocument;
+import org.apache.solr.common.SolrDocumentList;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkCoreNodeProps;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams.CollectionAction;
+import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.params.UpdateParams;
+import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SolrNamedThreadFactory;
+import org.apache.solr.util.TestInjection;
+import org.apache.solr.util.TestInjection.Hook;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * This test simply does a bunch of basic things in solrcloud mode and asserts things
+ * work as expected.
+ */
+public abstract class AbstractBasicDistributedZkTestBase extends AbstractFullDistribZkTestBase {
+
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+ private static final String DEFAULT_COLLECTION = "collection1";
+
+ private final boolean onlyLeaderIndexes = random().nextBoolean();
+
+ String t1="a_t";
+ String i1="a_i1";
+ String tlong = "other_tl1";
+ String tsort="t_sortable";
+
+ String oddField="oddField_s";
+ String missingField="ignore_exception__missing_but_valid_field_t";
+
+ private Map> otherCollectionClients = new HashMap<>();
+
+ private String oneInstanceCollection = "oneInstanceCollection";
+ private String oneInstanceCollection2 = "oneInstanceCollection2";
+
+ private AtomicInteger nodeCounter = new AtomicInteger();
+
+ CompletionService completionService;
+ Set> pending;
+
+ private static Hook newSearcherHook = new Hook() {
+ volatile CountDownLatch latch;
+ AtomicReference collection = new AtomicReference<>();
+
+ @Override
+ public void newSearcher(String collectionName) {
+ String c = collection.get();
+ if (c != null && c.equals(collectionName)) {
+ log.info("Hook detected newSearcher");
+ try {
+ latch.countDown();
+ } catch (NullPointerException e) {
+
+ }
+ }
+ }
+
+ public void waitForSearcher(String collection, int cnt, int timeoutms, boolean failOnTimeout) throws InterruptedException {
+ latch = new CountDownLatch(cnt);
+ this.collection.set(collection);
+ boolean timeout = !latch.await(timeoutms, TimeUnit.MILLISECONDS);
+ if (timeout && failOnTimeout) {
+ fail("timed out waiting for new searcher event " + latch.getCount());
+ }
+ }
+
+ };
+
+ public AbstractBasicDistributedZkTestBase() {
+ // we need DVs on point fields to compute stats & facets
+ if (Boolean.getBoolean(NUMERIC_POINTS_SYSPROP)) System.setProperty(NUMERIC_DOCVALUES_SYSPROP,"true");
+
+ sliceCount = 2;
+ completionService = new ExecutorCompletionService<>(executor);
+ pending = new HashSet<>();
+
+ }
+
+ @BeforeClass
+ public static void beforeBDZKTClass() {
+ TestInjection.newSearcherHook(newSearcherHook);
+ }
+
+ @Override
+ protected boolean useTlogReplicas() {
+ return false; // TODO: tlog replicas makes commits take way to long due to what is likely a bug and it's TestInjection use
+ }
+
+ @Override
+ protected void setDistributedParams(ModifiableSolrParams params) {
+
+ if (r.nextBoolean()) {
+ // don't set shards, let that be figured out from the cloud state
+ } else {
+ // use shard ids rather than physical locations
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < getShardCount(); i++) {
+ if (i > 0)
+ sb.append(',');
+ sb.append("shard" + (i + 3));
+ }
+ params.set("shards", sb.toString());
+ }
+ }
+
+ @Test
+ @ShardsFixed(num = 4)
+ // commented out on: 17-Feb-2019 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // annotated on: 24-Dec-2018
+ protected void test() throws Exception {
+ // setLoggingLevel(null);
+
+ ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+ // make sure we have leaders for each shard
+ for (int j = 1; j < sliceCount; j++) {
+ zkStateReader.getLeaderRetry(DEFAULT_COLLECTION, "shard" + j, 10000);
+ } // make sure we again have leaders for each shard
+
+ waitForRecoveriesToFinish(false);
+
+ handle.clear();
+ handle.put("timestamp", SKIPVAL);
+
+ del("*:*");
+ queryAndCompareShards(params("q", "*:*", "distrib", "false", "sanity_check", "is_empty"));
+
+ // ask every individual replica of every shard to update+commit the same doc id
+ // with an incrementing counter on each update+commit
+ int foo_i_counter = 0;
+ for (SolrClient client : clients) {
+ foo_i_counter++;
+ indexDoc(client, params("commit", "true"), // SOLR-4923
+ sdoc(id,1, i1,100, tlong,100, "foo_i", foo_i_counter));
+ // after every update+commit, check all the shards consistency
+ queryAndCompareShards(params("q", "id:1", "distrib", "false",
+ "sanity_check", "non_distrib_id_1_lookup"));
+ queryAndCompareShards(params("q", "id:1",
+ "sanity_check", "distrib_id_1_lookup"));
+ }
+
+ indexr(id,1, i1, 100, tlong, 100,t1,"now is the time for all good men"
+ ,"foo_f", 1.414f, "foo_b", "true", "foo_d", 1.414d, tsort, "now is the time for all good men");
+ indexr(id, 2, i1, 50, tlong, 50, t1, "to come to the aid of their country."
+ , tsort, "to come to the aid of their country.");
+ indexr(id, 3, i1, 2, tlong, 2, t1, "how now brown cow", tsort, "how now brown cow");
+ indexr(id, 4, i1, -100, tlong, 101, t1, "the quick fox jumped over the lazy dog"
+ , tsort, "the quick fox jumped over the lazy dog");
+ indexr(id, 5, i1, 500, tlong, 500, t1, "the quick fox jumped way over the lazy dog"
+ , tsort, "the quick fox jumped over the lazy dog");
+ indexr(id, 6, i1, -600, tlong, 600, t1, "humpty dumpy sat on a wall", tsort, "the quick fox jumped over the lazy dog");
+ indexr(id, 7, i1, 123, tlong, 123, t1, "humpty dumpy had a great fall", tsort, "the quick fox jumped over the lazy dog");
+ indexr(id,8, i1, 876, tlong, 876,t1,"all the kings horses and all the kings men",tsort,"all the kings horses and all the kings men");
+ indexr(id, 9, i1, 7, tlong, 7, t1, "couldn't put humpty together again", tsort, "the quick fox jumped over the lazy dog");
+ indexr(id,10, i1, 4321, tlong, 4321,t1,"this too shall pass",tsort,"this too shall pass");
+ indexr(id,11, i1, -987, tlong, 987,t1,"An eye for eye only ends up making the whole world blind."
+ ,tsort,"An eye for eye only ends up making the whole world blind.");
+ indexr(id,12, i1, 379, tlong, 379,t1,"Great works are performed, not by strength, but by perseverance.",
+ tsort,"Great works are performed, not by strength, but by perseverance.");
+ indexr(id,13, i1, 232, tlong, 232,t1,"no eggs on wall, lesson learned", oddField, "odd man out",
+ tsort,"no eggs on wall, lesson learned");
+
+ indexr(id, 14, "SubjectTerms_mfacet", new String[] {"mathematical models", "mathematical analysis"});
+ indexr(id, 15, "SubjectTerms_mfacet", new String[] {"test 1", "test 2", "test3"});
+ indexr(id, 16, "SubjectTerms_mfacet", new String[] {"test 1", "test 2", "test3"});
+ String[] vals = new String[100];
+ for (int i=0; i<100; i++) {
+ vals[i] = "test " + i;
+ }
+ indexr(id, 17, "SubjectTerms_mfacet", vals);
+
+ for (int i=100; i<150; i++) {
+ indexr(id, i);
+ }
+
+ commit();
+
+ testTokenizedGrouping();
+ testSortableTextFaceting();
+ testSortableTextSorting();
+ testSortableTextGrouping();
+
+ queryAndCompareShards(params("q", "*:*",
+ "sort", "id desc",
+ "distrib", "false",
+ "sanity_check", "is_empty"));
+
+ // random value sort
+ for (String f : fieldNames) {
+ query(false, new String[] {"q","*:*", "sort",f+" desc"});
+ query(false, new String[] {"q","*:*", "sort",f+" asc"});
+ }
+
+ // these queries should be exactly ordered and scores should exactly match
+ query(false, new String[] {"q","*:*", "sort",i1+" desc"});
+ query(false, new String[] {"q","*:*", "sort",i1+" asc"});
+ query(false, new String[] {"q","*:*", "sort",i1+" desc", "fl","*,score"});
+ query(false, new String[] {"q","*:*", "sort","n_tl1 asc", "fl","*,score"});
+ query(false, new String[] {"q","*:*", "sort","n_tl1 desc"});
+ handle.put("maxScore", SKIPVAL);
+ query(false, new String[] {"q","{!func}"+i1});// does not expect maxScore. So if it comes ,ignore it. JavaBinCodec.writeSolrDocumentList()
+ //is agnostic of request params.
+ handle.remove("maxScore");
+ query(false, new String[] {"q","{!func}"+i1, "fl","*,score"}); // even scores should match exactly here
+
+ handle.put("highlighting", UNORDERED);
+ handle.put("response", UNORDERED);
+
+ handle.put("maxScore", SKIPVAL);
+ query(false, new String[] {"q","quick"});
+ query(false, new String[] {"q","all","fl","id","start","0"});
+ query(false, new String[] {"q","all","fl","foofoofoo","start","0"}); // no fields in returned docs
+ query(false, new String[] {"q","all","fl","id","start","100"});
+
+ handle.put("score", SKIPVAL);
+ query(false, new String[] {"q","quick","fl","*,score"});
+ query(false, new String[] {"q","all","fl","*,score","start","1"});
+ query(false, new String[] {"q","all","fl","*,score","start","100"});
+
+ query(false, new String[] {"q","now their fox sat had put","fl","*,score",
+ "hl","true","hl.fl",t1});
+
+ query(false, new String[] {"q","now their fox sat had put","fl","foofoofoo",
+ "hl","true","hl.fl",t1});
+
+ query(false, new String[] {"q","matchesnothing","fl","*,score"});
+
+ query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1});
+ query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","count"});
+ query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","count", "facet.mincount",2});
+ query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","index"});
+ query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","index", "facet.mincount",2});
+ query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1,"facet.limit",1});
+ query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.query","quick", "facet.query","all", "facet.query","*:*"});
+ query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.offset",1});
+ query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.mincount",2});
+
+ // test faceting multiple things at once
+ query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.query","quick", "facet.query","all", "facet.query","*:*"
+ ,"facet.field",t1});
+
+ // test filter tagging, facet exclusion, and naming (multi-select facet support)
+ query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.query","{!key=myquick}quick", "facet.query","{!key=myall ex=a}all", "facet.query","*:*"
+ ,"facet.field","{!key=mykey ex=a}"+t1
+ ,"facet.field","{!key=other ex=b}"+t1
+ ,"facet.field","{!key=again ex=a,b}"+t1
+ ,"facet.field",t1
+ ,"fq","{!tag=a}id_i1:[1 TO 7]", "fq","{!tag=b}id_i1:[3 TO 9]"}
+ );
+ query(false, new Object[] {"q", "*:*", "facet", "true", "facet.field", "{!ex=t1}SubjectTerms_mfacet", "fq", "{!tag=t1}SubjectTerms_mfacet:(test 1)", "facet.limit", "10", "facet.mincount", "1"});
+
+ // test field that is valid in schema but missing in all shards
+ query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",missingField, "facet.mincount",2});
+ // test field that is valid in schema and missing in some shards
+ query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",oddField, "facet.mincount",2});
+
+ query(false, new Object[] {"q","*:*", "sort",i1+" desc", "stats", "true", "stats.field", i1});
+
+ /*** TODO: the failure may come back in "exception"
+ try {
+ // test error produced for field that is invalid for schema
+ query("q","*:*", "rows",100, "facet","true", "facet.field",invalidField, "facet.mincount",2);
+ TestCase.fail("SolrServerException expected for invalid field that is not in schema");
+ } catch (SolrServerException ex) {
+ // expected
+ }
+ ***/
+
+ // Try to get better coverage for refinement queries by turning off over requesting.
+ // This makes it much more likely that we may not get the top facet values and hence
+ // we turn of that checking.
+ handle.put("facet_fields", SKIPVAL);
+ query(false, new Object[] {"q","*:*", "rows",0, "facet","true", "facet.field",t1,"facet.limit",5, "facet.shard.limit",5});
+ // check a complex key name
+ query(false, new Object[] {"q","*:*", "rows",0, "facet","true", "facet.field","{!key='a b/c \\' \\} foo'}"+t1,"facet.limit",5, "facet.shard.limit",5});
+ handle.remove("facet_fields");
+
+
+ // index the same document to two servers and make sure things
+ // don't blow up.
+ if (clients.size()>=2) {
+ index(id,100, i1, 107 ,t1,"oh no, a duplicate!");
+ for (int i=0; i ffs = resp.getFacetFields();
+ for (FacetField ff : ffs) {
+ if (ff.getName().equals(tsort) == false) continue;
+ for (FacetField.Count count : ff.getValues()) {
+ long num = count.getCount();
+ switch (count.getName()) {
+ case "all the kings horses and all the kings men":
+ case "An eye for eye only ends up making the whole world blind.":
+ case "Great works are performed, not by strength, but by perseverance.":
+ case "how now brown cow":
+ case "no eggs on wall, lesson learned":
+ case "now is the time for all good men":
+ case "this too shall pass":
+ case "to come to the aid of their country.":
+ assertEquals("Should have exactly one facet count for field " + ff.getName(), 1, num);
+ break;
+ case "the quick fox jumped over the lazy dog":
+ assertEquals("Should have 5 docs for the lazy dog", 5, num);
+ break;
+ default:
+ fail("No case for facet '" + ff.getName() + "'");
+
+ }
+ }
+ }
+ }
+
+ private void testSortableTextSorting() throws Exception {
+ SolrQuery query = new SolrQuery("*:*");
+ query.addSort(tsort, SolrQuery.ORDER.desc);
+ query.addField("*");
+ query.addField("eoe_sortable");
+ query.addField(tsort);
+ QueryResponse resp = queryServer(query);
+
+ SolrDocumentList docs = resp.getResults();
+
+ String title = docs.get(0).getFieldValue(tsort).toString();
+ for (SolrDocument doc : docs) {
+ assertTrue("Docs should be back in sorted order, descending", title.compareTo(doc.getFieldValue(tsort).toString()) >= 0);
+ title = doc.getFieldValue(tsort).toString();
+ }
+ }
+
+ private void testSortableTextGrouping() throws Exception {
+ SolrQuery query = new SolrQuery("*:*");
+ query.add("group", "true");
+ query.add("group.field", tsort);
+ QueryResponse resp = queryServer(query);
+ GroupResponse groupResp = resp.getGroupResponse();
+ List grpCmds = groupResp.getValues();
+ for (GroupCommand grpCmd : grpCmds) {
+ if (grpCmd.getName().equals(tsort) == false) continue;
+ for (Group grp : grpCmd.getValues()) {
+ long count = grp.getResult().getNumFound();
+ if (grp.getGroupValue() == null) continue; // Don't count the groups without an entry as the numnber is variable
+ switch (grp.getGroupValue()) {
+ case "all the kings horses and all the kings men":
+ case "An eye for eye only ends up making the whole world blind.":
+ case "Great works are performed, not by strength, but by perseverance.":
+ case "how now brown cow":
+ case "no eggs on wall, lesson learned":
+ case "now is the time for all good men":
+ case "this too shall pass":
+ case "to come to the aid of their country.":
+ assertEquals("Should have exactly one facet count for field " + grpCmd.getName(), 1, count);
+ break;
+ case "the quick fox jumped over the lazy dog":
+ assertEquals("Should have 5 docs for the lazy dog", 5, count);
+ break;
+ default:
+ fail("No case for facet '" + grpCmd.getName() + "'");
+
+ }
+ }
+ }
+ }
+
+ private void testTokenizedGrouping() throws Exception {
+ SolrException ex = expectThrows(SolrException.class, () -> {
+ query(false, new String[]{"q", "*:*", "group", "true", "group.field", t1});
+ });
+ assertTrue("Expected error from server that SortableTextFields are required", ex.getMessage().contains("Sorting on a tokenized field that is not a SortableTextField is not supported in cloud mode"));
+ }
+
+ private void assertSliceCounts(String msg, long expected, DocCollection dColl) throws Exception {
+ long found = checkSlicesSameCounts(dColl);
+
+ if (found != expected) {
+ // we get one do over in a bad race
+ Thread.sleep(1000);
+ found = checkSlicesSameCounts(dColl);
+ }
+
+ assertEquals(msg, expected, checkSlicesSameCounts(dColl));
+ }
+
+ // Ensure that total docs found is the expected number.
+ private void waitForDocCount(long expectedNumFound, long waitMillis, String failureMessage)
+ throws Exception {
+ AtomicLong total = new AtomicLong(-1);
+ try {
+ getCommonCloudSolrClient().getZkStateReader().waitForState(DEFAULT_COLLECTION, waitMillis, TimeUnit.MILLISECONDS, (n, c) -> {
+ long docTotal;
+ try {
+ docTotal = checkSlicesSameCounts(c);
+ } catch (SolrServerException | IOException e) {
+ throw new RuntimeException(e);
+ }
+ total.set(docTotal);
+ if (docTotal == expectedNumFound) {
+ return true;
+ }
+ return false;
+ });
+ } catch (TimeoutException | InterruptedException e) {
+
+ }
+ // We could fail here if we broke out of the above because we exceeded the time allowed.
+ assertEquals(failureMessage, expectedNumFound, total.get());
+
+ // This should be redundant, but it caught a test error after all.
+ for (SolrClient client : clients) {
+ assertEquals(failureMessage, expectedNumFound, client.query(new SolrQuery("*:*")).getResults().getNumFound());
+ }
+ }
+
+ // Insure that counts are the same for all replicas in each shard
+ // Return the total doc count for the query.
+ private long checkSlicesSameCounts(DocCollection dColl) throws SolrServerException, IOException {
+ long docTotal = 0; // total number of documents found counting only one replica per slice.
+ for (Slice slice : dColl.getActiveSlices()) {
+ long sliceDocCount = -1;
+ for (Replica rep : slice.getReplicas()) {
+ try (HttpSolrClient one = getHttpSolrClient(rep.getCoreUrl())) {
+ SolrQuery query = new SolrQuery("*:*");
+ query.setDistrib(false);
+ QueryResponse resp = one.query(query);
+ long hits = resp.getResults().getNumFound();
+ if (sliceDocCount == -1) {
+ sliceDocCount = hits;
+ docTotal += hits;
+ } else {
+ if (hits != sliceDocCount) {
+ return -1;
+ }
+ }
+ }
+ }
+ }
+ return docTotal;
+ }
+
+ private void testShardParamVariations() throws Exception {
+ SolrQuery query = new SolrQuery("*:*");
+ Map shardCounts = new HashMap<>();
+
+ for (String shard : shardToJetty.keySet()) {
+ // every client should give the same numDocs for this shard
+ // shffle the clients in a diff order for each shard
+ List solrclients = new ArrayList<>(this.clients);
+ Collections.shuffle(solrclients, random());
+ for (SolrClient client : solrclients) {
+ query.set("shards", shard);
+ long numDocs = client.query(query).getResults().getNumFound();
+ assertTrue("numDocs < 0 for shard "+shard+" via "+client,
+ 0 <= numDocs);
+ if (!shardCounts.containsKey(shard)) {
+ shardCounts.put(shard, numDocs);
+ }
+ assertEquals("inconsitent numDocs for shard "+shard+" via "+client,
+ shardCounts.get(shard).longValue(), numDocs);
+
+ List replicaJetties
+ = new ArrayList<>(shardToJetty.get(shard));
+ Collections.shuffle(replicaJetties, random());
+
+ // each replica should also give the same numDocs
+ ArrayList replicaAlts = new ArrayList<>(replicaJetties.size() * 2);
+ for (CloudJettyRunner replicaJetty : shardToJetty.get(shard)) {
+ String replica = replicaJetty.url;
+ query.set("shards", replica);
+
+ // replicas already shuffled, use this in the alternative check below
+ if (0 == random().nextInt(3) || replicaAlts.size() < 2) {
+ replicaAlts.add(replica);
+ }
+
+ numDocs = client.query(query).getResults().getNumFound();
+ assertTrue("numDocs < 0 for replica "+replica+" via "+client,
+ 0 <= numDocs);
+ assertEquals("inconsitent numDocs for shard "+shard+
+ " in replica "+replica+" via "+client,
+ shardCounts.get(shard).longValue(), numDocs);
+ }
+
+ // any combination of replica alternatives should give same numDocs
+ String replicas = String.join("|", replicaAlts);
+ query.set("shards", replicas);
+ numDocs = client.query(query).getResults().getNumFound();
+ assertTrue("numDocs < 0 for replicas "+replicas+" via "+client,
+ 0 <= numDocs);
+ assertEquals("inconsitent numDocs for replicas "+replicas+
+ " via "+client,
+ shardCounts.get(shard).longValue(), numDocs);
+ }
+ }
+
+ // sums of multiple shards should add up regardless of how we
+ // query those shards or which client we use
+ long randomShardCountsExpected = 0;
+ ArrayList randomShards = new ArrayList<>(shardCounts.size());
+ for (Map.Entry shardData : shardCounts.entrySet()) {
+ if (random().nextBoolean() || randomShards.size() < 2) {
+ String shard = shardData.getKey();
+ randomShardCountsExpected += shardData.getValue();
+ if (random().nextBoolean()) {
+ // use shard id
+ randomShards.add(shard);
+ } else {
+ // use some set explicit replicas
+ ArrayList replicas = new ArrayList<>(7);
+ for (CloudJettyRunner replicaJetty : shardToJetty.get(shard)) {
+ if (0 == random().nextInt(3) || 0 == replicas.size()) {
+ replicas.add(replicaJetty.url);
+ }
+ }
+ Collections.shuffle(replicas, random());
+ randomShards.add(String.join("|", replicas));
+ }
+ }
+ }
+ String randShards = String.join(",", randomShards);
+ query.set("shards", randShards);
+ for (SolrClient client : this.clients) {
+ assertEquals("numDocs for "+randShards+" via "+client,
+ randomShardCountsExpected,
+ client.query(query).getResults().getNumFound());
+ }
+
+ // total num docs must match sum of every shard's numDocs
+ query = new SolrQuery("*:*");
+ long totalShardNumDocs = 0;
+ for (Long c : shardCounts.values()) {
+ totalShardNumDocs += c;
+ }
+ for (SolrClient client : clients) {
+ assertEquals("sum of shard numDocs on client: " + client,
+ totalShardNumDocs,
+ client.query(query).getResults().getNumFound());
+ }
+ assertTrue("total numDocs <= 0, WTF? Test is useless",
+ 0 < totalShardNumDocs);
+
+ }
+
+ private void testStopAndStartCoresInOneInstance() throws Exception {
+ JettySolrRunner jetty = jettys.get(0);
+ try (final HttpSolrClient httpSolrClient = (HttpSolrClient) jetty.newClient(15000, 60000)) {
+ ThreadPoolExecutor executor = null;
+ try {
+ executor = new ExecutorUtil.MDCAwareThreadPoolExecutor(0, Integer.MAX_VALUE,
+ 5, TimeUnit.SECONDS, new SynchronousQueue(),
+ new SolrNamedThreadFactory("testExecutor"));
+ int cnt = 3;
+
+ // create the cores
+ createCollectionInOneInstance(httpSolrClient, jetty.getNodeName(), executor, "multiunload2", 1, cnt);
+ } finally {
+ if (executor != null) {
+ ExecutorUtil.shutdownAndAwaitTermination(executor);
+ }
+ }
+ }
+
+ cloudJettys.get(0).jetty.stop();
+ printLayout();
+
+ cloudJettys.get(0).jetty.start();
+ cloudClient.getZkStateReader().forceUpdateCollection("multiunload2");
+ try {
+ cloudClient.getZkStateReader().getLeaderRetry("multiunload2", "shard1", 30000);
+ } catch (SolrException e) {
+ printLayout();
+ throw e;
+ }
+
+ printLayout();
+
+ }
+
+ /**
+ * Create a collection in single node
+ */
+ protected void createCollectionInOneInstance(final SolrClient client, String nodeName,
+ ThreadPoolExecutor executor, final String collection,
+ final int numShards, int numReplicas) {
+ assertNotNull(nodeName);
+ try {
+ assertEquals(0, CollectionAdminRequest.createCollection(collection, "conf1", numShards, 1)
+ .setCreateNodeSet("")
+ .process(client).getStatus());
+ } catch (SolrServerException | IOException e) {
+ throw new RuntimeException(e);
+ }
+ for (int i = 0; i < numReplicas; i++) {
+ final int freezeI = i;
+ executor.execute(() -> {
+ try {
+ assertTrue(CollectionAdminRequest.addReplicaToShard(collection, "shard"+((freezeI%numShards)+1))
+ .setCoreName(collection + freezeI)
+ .setNode(nodeName).process(client).isSuccess());
+ } catch (SolrServerException | IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
+ }
+ }
+
+ protected String getBaseUrl(SolrClient client) {
+ String url2 = ((HttpSolrClient) client).getBaseURL()
+ .substring(
+ 0,
+ ((HttpSolrClient) client).getBaseURL().length()
+ - DEFAULT_COLLECTION.length() -1);
+ return url2;
+ }
+
+ @Override
+ protected CollectionAdminResponse createCollection(Map> collectionInfos,
+ String collectionName, String configSetName, int numShards, int numReplicas, SolrClient client, String createNodeSetStr) throws SolrServerException, IOException {
+ // TODO: Use CollectionAdminRequest for this test
+ ModifiableSolrParams params = new ModifiableSolrParams();
+ params.set("action", CollectionAction.CREATE.toString());
+
+ params.set(CollectionHandlingUtils.NUM_SLICES, numShards);
+ params.set(ZkStateReader.REPLICATION_FACTOR, numReplicas);
+ if (createNodeSetStr != null) params.set(CollectionHandlingUtils.CREATE_NODE_SET, createNodeSetStr);
+
+ int clientIndex = clients.size() > 1 ? random().nextInt(2) : 0;
+ List list = new ArrayList<>();
+ list.add(numShards);
+ list.add(numReplicas);
+ if (collectionInfos != null) {
+ collectionInfos.put(collectionName, list);
+ }
+ params.set("name", collectionName);
+ params.set("collection.configName", configSetName);
+ QueryRequest request = new QueryRequest(params);
+ request.setPath("/admin/collections");
+
+ CollectionAdminResponse res = new CollectionAdminResponse();
+ if (client == null) {
+ final String baseUrl = ((HttpSolrClient) clients.get(clientIndex)).getBaseURL().substring(
+ 0,
+ ((HttpSolrClient) clients.get(clientIndex)).getBaseURL().length()
+ - DEFAULT_COLLECTION.length() - 1);
+
+ try (SolrClient aClient = createNewSolrClient("", baseUrl)) {
+ res.setResponse(aClient.request(request));
+ }
+ } else {
+ res.setResponse(client.request(request));
+ }
+ return res;
+ }
+
+ protected ZkCoreNodeProps getLeaderUrlFromZk(String collection, String slice) {
+ ClusterState clusterState = getCommonCloudSolrClient().getZkStateReader().getClusterState();
+ ZkNodeProps leader = clusterState.getCollection(collection).getLeader(slice);
+ if (leader == null) {
+ throw new RuntimeException("Could not find leader:" + collection + " " + slice);
+ }
+ return new ZkCoreNodeProps(leader);
+ }
+
+ /**
+ * Expects a RegexReplaceProcessorFactories in the chain which will
+ * "double up" the values in two (stored) string fields.
+ *
+ * If the values are "double-doubled" or "not-doubled" then we know
+ * the processor was not run the appropriate number of times
+ *
+ */
+ private void testUpdateProcessorsRunOnlyOnce(final String chain) throws Exception {
+
+ final String fieldA = "regex_dup_A_s";
+ final String fieldB = "regex_dup_B_s";
+ final String val = "x";
+ final String expected = "x_x";
+ final ModifiableSolrParams updateParams = new ModifiableSolrParams();
+ updateParams.add(UpdateParams.UPDATE_CHAIN, chain);
+
+ final int numLoops = atLeast(50);
+
+ for (int i = 1; i < numLoops; i++) {
+ // add doc to random client
+ SolrClient updateClient = clients.get(random().nextInt(clients.size()));
+ SolrInputDocument doc = new SolrInputDocument();
+ addFields(doc, id, i, fieldA, val, fieldB, val);
+ UpdateResponse ures = add(updateClient, updateParams, doc);
+ assertEquals(chain + ": update failed", 0, ures.getStatus());
+ ures = updateClient.commit();
+ assertEquals(chain + ": commit failed", 0, ures.getStatus());
+ }
+
+ // query for each doc, and check both fields to ensure the value is correct
+ for (int i = 1; i < numLoops; i++) {
+ final String query = id + ":" + i;
+ QueryResponse qres = queryServer(new SolrQuery(query));
+ assertEquals(chain + ": query failed: " + query,
+ 0, qres.getStatus());
+ assertEquals(chain + ": didn't find correct # docs with query: " + query,
+ 1, qres.getResults().getNumFound());
+ SolrDocument doc = qres.getResults().get(0);
+
+ for (String field : new String[] {fieldA, fieldB}) {
+ assertEquals(chain + ": doc#" + i+ " has wrong value for " + field,
+ expected, doc.getFirstValue(field));
+ }
+ }
+
+ }
+
+ // cloud level test mainly needed just to make sure that versions and errors are propagated correctly
+ private void doOptimisticLockingAndUpdating() throws Exception {
+ log.info("### STARTING doOptimisticLockingAndUpdating");
+ printLayout();
+
+ final SolrInputDocument sd = sdoc("id", 1000, "_version_", -1);
+ indexDoc(sd);
+
+ ignoreException("version conflict");
+ for (SolrClient client : clients) {
+ SolrException e = expectThrows(SolrException.class, () -> client.add(sd));
+ assertEquals(409, e.code());
+ }
+ unIgnoreException("version conflict");
+
+ // TODO: test deletes. SolrJ needs a good way to pass version for delete...
+
+ final SolrInputDocument sd2 = sdoc("id", 1000, "foo_i",5);
+ clients.get(0).add(sd2);
+
+ List expected = new ArrayList<>();
+ int val = 0;
+ for (SolrClient client : clients) {
+ val += 10;
+ client.add(sdoc("id", 1000, "val_i", map("add",val), "foo_i",val));
+ expected.add(val);
+ }
+
+ QueryRequest qr = new QueryRequest(params("qt", "/get", "id","1000"));
+ for (SolrClient client : clients) {
+ val += 10;
+ NamedList> rsp = client.request(qr);
+ String match = JSONTestUtil.matchObj("/val_i", rsp.get("doc"), expected);
+ if (match != null) throw new RuntimeException(match);
+ }
+ }
+
+ private void testNumberOfCommitsWithCommitAfterAdd()
+ throws SolrServerException, IOException {
+ log.info("### STARTING testNumberOfCommitsWithCommitAfterAdd");
+ long startCommits = getNumCommits((HttpSolrClient) clients.get(0));
+
+
+ NamedList result = clients.get(0).request(
+ new StreamingUpdateRequest("/update",
+ getFile("books_numeric_ids.csv").toPath(), "application/csv")
+ .setCommitWithin(900000)
+ .setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true));
+
+ long endCommits = getNumCommits((HttpSolrClient) clients.get(0));
+
+ assertEquals(startCommits + 1L, endCommits);
+ }
+
+ private Long getNumCommits(HttpSolrClient sourceClient) throws
+ SolrServerException, IOException {
+ // construct the /admin/metrics URL
+ URL url = new URL(sourceClient.getBaseURL());
+ String path = url.getPath().substring(1);
+ String[] elements = path.split("/");
+ String collection = elements[elements.length - 1];
+ String urlString = url.toString();
+ urlString = urlString.substring(0, urlString.length() - collection.length() - 1);
+ try (HttpSolrClient client = getHttpSolrClient(urlString, 15000, 60000)) {
+ ModifiableSolrParams params = new ModifiableSolrParams();
+ //params.set("qt", "/admin/metrics?prefix=UPDATE.updateHandler®istry=solr.core." + collection);
+ params.set("qt", "/admin/metrics");
+ params.set("prefix", "UPDATE.updateHandler");
+ params.set("registry", "solr.core." + collection);
+ // use generic request to avoid extra processing of queries
+ QueryRequest req = new QueryRequest(params);
+ NamedList resp = client.request(req);
+ NamedList> metrics = (NamedList>) resp.get("metrics");
+ NamedList> uhandlerCat = (NamedList>) metrics.getVal(0);
+ @SuppressWarnings({"unchecked"})
+ Map commits = (Map) uhandlerCat.get("UPDATE.updateHandler.commits");
+ return (Long) commits.get("count");
+ }
+ }
+
+ private void testANewCollectionInOneInstanceWithManualShardAssignement() throws Exception {
+ log.info("### STARTING testANewCollectionInOneInstanceWithManualShardAssignement");
+ assertEquals(0, CollectionAdminRequest.createCollection(oneInstanceCollection2, "conf1", 2, 2)
+ .setCreateNodeSet("")
+ .process(cloudClient).getStatus());
+
+ List collectionClients = new ArrayList<>();
+ for (int i = 0; i < 4; i++) {
+ CollectionAdminResponse resp = CollectionAdminRequest
+ .addReplicaToShard(oneInstanceCollection2, "shard" + ((i%2)+1))
+ .setNode(jettys.get(0).getNodeName())
+ .process(cloudClient);
+ for (String coreName : resp.getCollectionCoresStatus().keySet()) {
+ collectionClients.add(createNewSolrClient(coreName, jettys.get(0).getBaseUrl().toString()));
+ }
+
+
+ }
+
+ SolrClient client1 = collectionClients.get(0);
+ SolrClient client2 = collectionClients.get(1);
+ SolrClient client3 = collectionClients.get(2);
+ SolrClient client4 = collectionClients.get(3);
+
+
+ // no one should be recovering
+ waitForRecoveriesToFinish(oneInstanceCollection2, getCommonCloudSolrClient().getZkStateReader(), false, true);
+
+ assertAllActive(oneInstanceCollection2, getCommonCloudSolrClient().getZkStateReader());
+
+ //printLayout();
+
+ // TODO: enable when we don't falsely get slice1...
+ // solrj.getZkStateReader().getLeaderUrl(oneInstanceCollection2, "slice1", 30000);
+ // solrj.getZkStateReader().getLeaderUrl(oneInstanceCollection2, "slice2", 30000);
+ client2.add(getDoc(id, "1"));
+ client3.add(getDoc(id, "2"));
+ client4.add(getDoc(id, "3"));
+
+ client1.commit();
+ SolrQuery query = new SolrQuery("*:*");
+ query.set("distrib", false);
+ long oneDocs = client1.query(query).getResults().getNumFound();
+ long twoDocs = client2.query(query).getResults().getNumFound();
+ long threeDocs = client3.query(query).getResults().getNumFound();
+ long fourDocs = client4.query(query).getResults().getNumFound();
+
+ query.set("collection", oneInstanceCollection2);
+ query.set("distrib", true);
+ long allDocs = getCommonCloudSolrClient().query(query).getResults().getNumFound();
+
+// System.out.println("1:" + oneDocs);
+// System.out.println("2:" + twoDocs);
+// System.out.println("3:" + threeDocs);
+// System.out.println("4:" + fourDocs);
+// System.out.println("All Docs:" + allDocs);
+
+// assertEquals(oneDocs, threeDocs);
+// assertEquals(twoDocs, fourDocs);
+// assertNotSame(oneDocs, twoDocs);
+ assertEquals(3, allDocs);
+
+ // we added a role of none on these creates - check for it
+ ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
+ zkStateReader.forceUpdateCollection(oneInstanceCollection2);
+ Map slices = zkStateReader.getClusterState().getCollection(oneInstanceCollection2).getSlicesMap();
+ assertNotNull(slices);
+
+ ZkCoreNodeProps props = new ZkCoreNodeProps(getCommonCloudSolrClient().getZkStateReader().getClusterState()
+ .getCollection(oneInstanceCollection2).getLeader("shard1"));
+
+ // now test that unloading a core gets us a new leader
+ try (HttpSolrClient unloadClient = getHttpSolrClient(jettys.get(0).getBaseUrl().toString(), 15000, 60000)) {
+ Unload unloadCmd = new Unload(true);
+ unloadCmd.setCoreName(props.getCoreName());
+
+ String leader = props.getCoreUrl();
+
+ testExecutor.execute(new Runnable() {
+
+ @Override
+ public void run() {
+ try {
+ unloadClient.request(unloadCmd);
+ } catch (SolrServerException e) {
+ throw new RuntimeException(e);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ });
+
+ try {
+ getCommonCloudSolrClient().getZkStateReader().waitForState(oneInstanceCollection2, 20000, TimeUnit.MILLISECONDS, (n, c) -> {
+
+
+ try {
+ if (leader.equals(zkStateReader.getLeaderUrl(oneInstanceCollection2, "shard1", 10000))) {
+ return false;
+ }
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ return true;
+ });
+ } catch (TimeoutException | InterruptedException e) {
+ fail("Leader never changed");
+ }
+ }
+
+ IOUtils.close(collectionClients);
+
+ }
+
+ private void testSearchByCollectionName() throws SolrServerException, IOException {
+ log.info("### STARTING testSearchByCollectionName");
+ SolrClient client = clients.get(0);
+ final String baseUrl = ((HttpSolrClient) client).getBaseURL().substring(
+ 0,
+ ((HttpSolrClient) client).getBaseURL().length()
+ - DEFAULT_COLLECTION.length() - 1);
+
+ // the cores each have different names, but if we add the collection name to the url
+ // we should get mapped to the right core
+ try (SolrClient client1 = createNewSolrClient(oneInstanceCollection, baseUrl)) {
+ SolrQuery query = new SolrQuery("*:*");
+ long oneDocs = client1.query(query).getResults().getNumFound();
+ assertEquals(3, oneDocs);
+ }
+ }
+
+ private void testUpdateByCollectionName() throws SolrServerException, IOException {
+ log.info("### STARTING testUpdateByCollectionName");
+ SolrClient client = clients.get(0);
+ final String baseUrl = ((HttpSolrClient) client).getBaseURL().substring(
+ 0,
+ ((HttpSolrClient) client).getBaseURL().length()
+ - DEFAULT_COLLECTION.length() - 1);
+
+ // the cores each have different names, but if we add the collection name to the url
+ // we should get mapped to the right core
+ // test hitting an update url
+ try (SolrClient client1 = createNewSolrClient(oneInstanceCollection, baseUrl)) {
+ client1.commit();
+ }
+ }
+
+ private void testANewCollectionInOneInstance() throws Exception {
+ log.info("### STARTING testANewCollectionInOneInstance");
+ CollectionAdminResponse response = CollectionAdminRequest.createCollection(oneInstanceCollection, "conf1", 2, 2)
+ .setCreateNodeSet(jettys.get(0).getNodeName())
+ .process(cloudClient);
+ assertEquals(0, response.getStatus());
+ List collectionClients = new ArrayList<>();
+ for (String coreName : response.getCollectionCoresStatus().keySet()) {
+ collectionClients.add(createNewSolrClient(coreName, jettys.get(0).getBaseUrl().toString()));
+ }
+
+ SolrClient client1 = collectionClients.get(0);
+ SolrClient client2 = collectionClients.get(1);
+ SolrClient client3 = collectionClients.get(2);
+ SolrClient client4 = collectionClients.get(3);
+
+ waitForRecoveriesToFinish(oneInstanceCollection, getCommonCloudSolrClient().getZkStateReader(), false);
+ assertAllActive(oneInstanceCollection, getCommonCloudSolrClient().getZkStateReader());
+
+ client2.add(getDoc(id, "1"));
+ client3.add(getDoc(id, "2"));
+ client4.add(getDoc(id, "3"));
+
+ client1.commit();
+ SolrQuery query = new SolrQuery("*:*");
+ query.set("distrib", false);
+ long oneDocs = client1.query(query).getResults().getNumFound();
+ long twoDocs = client2.query(query).getResults().getNumFound();
+ long threeDocs = client3.query(query).getResults().getNumFound();
+ long fourDocs = client4.query(query).getResults().getNumFound();
+
+ query.set("collection", oneInstanceCollection);
+ query.set("distrib", true);
+ long allDocs = getCommonCloudSolrClient().query(query).getResults().getNumFound();
+
+// System.out.println("1:" + oneDocs);
+// System.out.println("2:" + twoDocs);
+// System.out.println("3:" + threeDocs);
+// System.out.println("4:" + fourDocs);
+// System.out.println("All Docs:" + allDocs);
+
+ assertEquals(3, allDocs);
+ IOUtils.close(collectionClients);
+
+ }
+
+ private void createCollection(String collection,
+ List collectionClients, String baseUrl, int num) {
+ createSolrCore(collection, collectionClients, baseUrl, num, null);
+ }
+
+ private void createSolrCore(final String collection,
+ List collectionClients, final String baseUrl, final int num,
+ final String shardId) {
+ Callable call = () -> {
+ try (HttpSolrClient client = getHttpSolrClient(baseUrl)) {
+ // client.setConnectionTimeout(15000);
+ Create createCmd = new Create();
+ createCmd.setRoles("none");
+ createCmd.setCoreName(collection + num);
+ createCmd.setCollection(collection);
+
+ if (random().nextBoolean()) {
+ // sometimes we use an explicit core node name
+ createCmd.setCoreNodeName("anode" + nodeCounter.incrementAndGet());
+ }
+
+ if (shardId == null) {
+ createCmd.setNumShards(2);
+ }
+ createCmd.setDataDir(getDataDir(createTempDir(collection).toFile().getAbsolutePath()));
+ if (shardId != null) {
+ createCmd.setShardId(shardId);
+ }
+ client.request(createCmd);
+ } catch (Exception e) {
+ e.printStackTrace();
+ //fail
+ }
+ return null;
+ };
+
+ pending.add(completionService.submit(call));
+
+
+ collectionClients.add(createNewSolrClient(collection + num, baseUrl));
+ }
+
+ private void testMultipleCollections() throws Exception {
+ log.info("### STARTING testMultipleCollections");
+ // create another 2 collections and search across them
+ createNewCollection("collection2");
+ createNewCollection("collection3");
+
+ while (pending != null && pending.size() > 0) {
+
+ Future future = completionService.take();
+ if (future == null) return;
+ pending.remove(future);
+ }
+
+ indexDoc("collection2", getDoc(id, "10000000"));
+ indexDoc("collection2", getDoc(id, "10000001"));
+ indexDoc("collection2", getDoc(id, "10000003"));
+ getCommonCloudSolrClient().setDefaultCollection("collection2");
+ getCommonCloudSolrClient().add(getDoc(id, "10000004"));
+ getCommonCloudSolrClient().setDefaultCollection(null);
+
+ indexDoc("collection3", getDoc(id, "20000000"));
+ indexDoc("collection3", getDoc(id, "20000001"));
+ getCommonCloudSolrClient().setDefaultCollection("collection3");
+ getCommonCloudSolrClient().add(getDoc(id, "10000005"));
+ getCommonCloudSolrClient().setDefaultCollection(null);
+
+ otherCollectionClients.get("collection2").get(0).commit();
+ otherCollectionClients.get("collection3").get(0).commit();
+
+ getCommonCloudSolrClient().setDefaultCollection("collection1");
+ long collection1Docs = getCommonCloudSolrClient().query(new SolrQuery("*:*")).getResults()
+ .getNumFound();
+
+ long collection2Docs = otherCollectionClients.get("collection2").get(0)
+ .query(new SolrQuery("*:*")).getResults().getNumFound();
+
+ long collection3Docs = otherCollectionClients.get("collection3").get(0)
+ .query(new SolrQuery("*:*")).getResults().getNumFound();
+
+
+ SolrQuery query = new SolrQuery("*:*");
+ query.set("collection", "collection2,collection3");
+ long found = clients.get(0).query(query).getResults().getNumFound();
+ assertEquals(collection2Docs + collection3Docs, found);
+
+ query = new SolrQuery("*:*");
+ query.set("collection", "collection1,collection2,collection3");
+ found = clients.get(0).query(query).getResults().getNumFound();
+ assertEquals(collection1Docs + collection2Docs + collection3Docs, found);
+
+ // try to search multiple with cloud client
+ found = getCommonCloudSolrClient().query(query).getResults().getNumFound();
+ assertEquals(collection1Docs + collection2Docs + collection3Docs, found);
+
+ query.set("collection", "collection2,collection3");
+ found = getCommonCloudSolrClient().query(query).getResults().getNumFound();
+ assertEquals(collection2Docs + collection3Docs, found);
+
+ query.set("collection", "collection3");
+ found = getCommonCloudSolrClient().query(query).getResults().getNumFound();
+ assertEquals(collection3Docs, found);
+
+ query.remove("collection");
+ found = getCommonCloudSolrClient().query(query).getResults().getNumFound();
+ assertEquals(collection1Docs, found);
+
+ assertEquals(collection3Docs, collection2Docs - 1);
+ }
+
+ protected void indexDoc(String collection, SolrInputDocument doc) throws IOException, SolrServerException {
+ List clients = otherCollectionClients.get(collection);
+ int which = (doc.getField(id).toString().hashCode() & 0x7fffffff) % clients.size();
+ SolrClient client = clients.get(which);
+ client.add(doc);
+ }
+
+ @SuppressWarnings({"unchecked"})
+ private void createNewCollection(final String collection) throws InterruptedException {
+ try {
+ assertEquals(0, CollectionAdminRequest
+ .createCollection(collection, "conf1", 2, 1)
+ .setCreateNodeSet("")
+ .process(cloudClient).getStatus());
+ } catch (Exception e) {
+ e.printStackTrace();
+ //fails
+ }
+ final List collectionClients = new ArrayList<>();
+ otherCollectionClients.put(collection, collectionClients);
+ int unique = 0 ;
+ for (final JettySolrRunner runner : jettys) {
+ unique++;
+ final int frozeUnique = unique;
+ Callable call = () -> {
+
+ try {
+ assertTrue(CollectionAdminRequest
+ .addReplicaToShard(collection, "shard"+ ((frozeUnique%2)+1))
+ .setNode(runner.getNodeName())
+ .process(cloudClient).isSuccess());
+ } catch (Exception e) {
+ e.printStackTrace();
+ //fails
+ }
+ return null;
+ };
+
+ collectionClients.add(createNewSolrClient(collection, runner.getBaseUrl().toString()));
+ pending.add(completionService.submit(call));
+ while (pending != null && pending.size() > 0) {
+
+ Future future = completionService.take();
+ if (future == null) return;
+ pending.remove(future);
+ }
+ }
+ }
+
+ protected SolrClient createNewSolrClient(String collection, String baseUrl) {
+ try {
+ // setup the server...
+ HttpSolrClient client = getHttpSolrClient(baseUrl + "/" + collection);
+
+ return client;
+ }
+ catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+
+ protected SolrClient createNewSolrClient(String collection, String baseUrl, int connectionTimeoutMillis, int socketTimeoutMillis) {
+ try {
+ // setup the server...
+ HttpSolrClient client = getHttpSolrClient(baseUrl + "/" + collection, connectionTimeoutMillis, socketTimeoutMillis);
+
+ return client;
+ }
+ catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+
+ @Override
+ protected QueryResponse queryServer(ModifiableSolrParams params) throws SolrServerException, IOException {
+
+ if (r.nextBoolean())
+ return super.queryServer(params);
+
+ if (r.nextBoolean())
+ params.set("collection",DEFAULT_COLLECTION);
+
+ QueryResponse rsp = getCommonCloudSolrClient().query(params);
+ return rsp;
+ }
+
+ @Override
+ public void distribTearDown() throws Exception {
+ super.distribTearDown();
+ if (otherCollectionClients != null) {
+ for (List clientList : otherCollectionClients.values()) {
+ IOUtils.close(clientList);
+ }
+ }
+ otherCollectionClients = null;
+ List tasks = executor.shutdownNow();
+ assertTrue(tasks.isEmpty());
+ }
+}
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractChaosMonkeyNothingIsSafeTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractChaosMonkeyNothingIsSafeTestBase.java
new file mode 100644
index 00000000000..48facc9aa91
--- /dev/null
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractChaosMonkeyNothingIsSafeTestBase.java
@@ -0,0 +1,325 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public abstract class AbstractChaosMonkeyNothingIsSafeTestBase extends AbstractFullDistribZkTestBase {
+ private static final int FAIL_TOLERANCE = 100;
+
+ private static final Integer RUN_LENGTH = Integer.parseInt(System.getProperty("solr.tests.cloud.cm.runlength", "-1"));
+
+ private final boolean onlyLeaderIndexes = random().nextBoolean();
+
+ @BeforeClass
+ public static void beforeSuperClass() {
+ schemaString = "schema15.xml"; // we need a string id
+ System.setProperty("solr.autoCommit.maxTime", "15000");
+ System.clearProperty("solr.httpclient.retries");
+ System.clearProperty("solr.retries.on.forward");
+ System.clearProperty("solr.retries.to.followers");
+ setErrorHook();
+ }
+
+ @AfterClass
+ public static void afterSuperClass() {
+ System.clearProperty("solr.autoCommit.maxTime");
+ clearErrorHook();
+ }
+
+
+
+ @Override
+ protected void destroyServers() throws Exception {
+
+ super.destroyServers();
+ }
+
+ protected static final String[] fieldNames = new String[]{"f_i", "f_f", "f_d", "f_l", "f_dt"};
+ protected static final RandVal[] randVals = new RandVal[]{rint, rfloat, rdouble, rlong, rdate};
+
+ private int clientSoTimeout = 60000;
+
+ private volatile FullThrottleStoppableIndexingThread ftIndexThread;
+
+ private final boolean runFullThrottle;
+
+ public String[] getFieldNames() {
+ return fieldNames;
+ }
+
+ public RandVal[] getRandValues() {
+ return randVals;
+ }
+
+ @Override
+ public void distribSetUp() throws Exception {
+ super.distribSetUp();
+ // can help to hide this when testing and looking at logs
+ //ignoreException("shard update error");
+ useFactory("solr.StandardDirectoryFactory");
+ }
+
+ @Override
+ public void distribTearDown() throws Exception {
+ try {
+ ftIndexThread.safeStop();
+ } catch (NullPointerException e) {
+ // okay
+ }
+ super.distribTearDown();
+ }
+
+ public AbstractChaosMonkeyNothingIsSafeTestBase() {
+ super();
+ sliceCount = Integer.parseInt(System.getProperty("solr.tests.cloud.cm.slicecount", "-1"));
+ if (sliceCount == -1) {
+ sliceCount = random().nextInt(TEST_NIGHTLY ? 5 : 3) + 1;
+ }
+
+ int numShards = Integer.parseInt(System.getProperty("solr.tests.cloud.cm.shardcount", "-1"));
+ if (numShards == -1) {
+ // we make sure that there's at least one shard with more than one replica
+ // so that the ChaosMonkey has something to kill
+ numShards = sliceCount + random().nextInt(TEST_NIGHTLY ? 12 : 2) + 1;
+ }
+ fixShardCount(numShards);
+
+
+ // TODO: we only do this sometimes so that we can sometimes compare against control,
+ // it's currently hard to know what requests failed when using ConcurrentSolrUpdateServer
+ runFullThrottle = random().nextBoolean();
+
+ }
+
+ @Override
+ protected boolean useTlogReplicas() {
+ return false; // TODO: tlog replicas makes commits take way to long due to what is likely a bug and it's TestInjection use
+ }
+
+ @Override
+ protected CloudSolrClient createCloudClient(String defaultCollection) {
+ return this.createCloudClient(defaultCollection, this.clientSoTimeout);
+ }
+
+ protected CloudSolrClient createCloudClient(String defaultCollection, int socketTimeout) {
+ CloudSolrClient client = getCloudSolrClient(zkServer.getZkAddress(), random().nextBoolean(), 30000, socketTimeout);
+ if (defaultCollection != null) client.setDefaultCollection(defaultCollection);
+ return client;
+ }
+
+ @Test
+ @SuppressWarnings({"try"})
+ public void test() throws Exception {
+ // None of the operations used here are particularly costly, so this should work.
+ // Using this low timeout will also help us catch index stalling.
+ clientSoTimeout = 5000;
+
+ boolean testSuccessful = false;
+ try (CloudSolrClient ourCloudClient = createCloudClient(DEFAULT_COLLECTION)) {
+ handle.clear();
+ handle.put("timestamp", SKIPVAL);
+ ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+ // make sure we have leaders for each shard
+ for (int j = 1; j < sliceCount; j++) {
+ zkStateReader.getLeaderRetry(DEFAULT_COLLECTION, "shard" + j, 10000);
+ } // make sure we again have leaders for each shard
+
+ waitForRecoveriesToFinish(false);
+
+ // we cannot do delete by query
+ // as it's not supported for recovery
+ del("*:*");
+
+ List threads = new ArrayList<>();
+ List indexTreads = new ArrayList<>();
+ int threadCount = TEST_NIGHTLY ? 3 : 1;
+ int i = 0;
+ for (i = 0; i < threadCount; i++) {
+ StoppableIndexingThread indexThread = new StoppableIndexingThread(controlClient, cloudClient, Integer.toString(i), true);
+ threads.add(indexThread);
+ indexTreads.add(indexThread);
+ indexThread.start();
+ }
+
+ threadCount = 1;
+ i = 0;
+ for (i = 0; i < threadCount; i++) {
+ StoppableSearchThread searchThread = new StoppableSearchThread(cloudClient);
+ threads.add(searchThread);
+ searchThread.start();
+ }
+
+ if (runFullThrottle) {
+ ftIndexThread =
+ new FullThrottleStoppableIndexingThread(cloudClient.getHttpClient(),controlClient, cloudClient, clients, "ft1", true, this.clientSoTimeout);
+ ftIndexThread.start();
+ }
+
+ chaosMonkey.startTheMonkey(true, 10000);
+ try {
+ long runLength;
+ if (RUN_LENGTH != -1) {
+ runLength = RUN_LENGTH;
+ } else {
+ int[] runTimes;
+ if (TEST_NIGHTLY) {
+ runTimes = new int[] {5000, 6000, 10000, 15000, 25000, 30000,
+ 30000, 45000, 90000, 120000};
+ } else {
+ runTimes = new int[] {5000, 7000, 15000};
+ }
+ runLength = runTimes[random().nextInt(runTimes.length - 1)];
+ }
+
+ Thread.sleep(runLength);
+ } finally {
+ chaosMonkey.stopTheMonkey();
+ }
+
+ // ideally this should go into chaosMonkey
+ restartZk(1000 * (5 + random().nextInt(4)));
+
+
+ if (runFullThrottle) {
+ ftIndexThread.safeStop();
+ }
+
+ for (StoppableThread indexThread : threads) {
+ indexThread.safeStop();
+ }
+
+ // start any downed jetties to be sure we still will end up with a leader per shard...
+
+ // wait for stop...
+ for (StoppableThread indexThread : threads) {
+ indexThread.join();
+ }
+
+ // try and wait for any replications and what not to finish...
+
+ Thread.sleep(2000);
+
+ // wait until there are no recoveries...
+ waitForThingsToLevelOut();
+
+ // make sure we again have leaders for each shard
+ for (int j = 1; j < sliceCount; j++) {
+ zkStateReader.getLeaderRetry(DEFAULT_COLLECTION, "shard" + j, 30000);
+ }
+
+ commit();
+
+ // TODO: assert we didnt kill everyone
+
+ zkStateReader.updateLiveNodes();
+ assertTrue(zkStateReader.getClusterState().getLiveNodes().size() > 0);
+
+ // we expect full throttle fails, but cloud client should not easily fail
+ for (StoppableThread indexThread : threads) {
+ if (indexThread instanceof StoppableIndexingThread && !(indexThread instanceof FullThrottleStoppableIndexingThread)) {
+ int failCount = ((StoppableIndexingThread) indexThread).getFailCount();
+ assertFalse("There were too many update fails (" + failCount + " > " + FAIL_TOLERANCE
+ + ") - we expect it can happen, but shouldn't easily", failCount > FAIL_TOLERANCE);
+ }
+ }
+
+
+ waitForThingsToLevelOut(20, TimeUnit.SECONDS);
+
+ commit();
+
+ Set addFails = getAddFails(indexTreads);
+ Set deleteFails = getDeleteFails(indexTreads);
+ // full throttle thread can
+ // have request fails
+ checkShardConsistency(!runFullThrottle, true, addFails, deleteFails);
+
+ long ctrlDocs = controlClient.query(new SolrQuery("*:*")).getResults()
+ .getNumFound();
+
+ // ensure we have added more than 0 docs
+ long cloudClientDocs = cloudClient.query(new SolrQuery("*:*"))
+ .getResults().getNumFound();
+
+ assertTrue("Found " + ctrlDocs + " control docs", cloudClientDocs > 0);
+
+ if (VERBOSE) System.out.println("control docs:"
+ + controlClient.query(new SolrQuery("*:*")).getResults()
+ .getNumFound() + "\n\n");
+
+ // try and make a collection to make sure the overseer has survived the expiration and session loss
+
+ // sometimes we restart zookeeper as well
+ if (random().nextBoolean()) {
+ // restartZk(1000 * (5 + random().nextInt(4)));
+ }
+
+ try (CloudSolrClient client = createCloudClient("collection1", 30000)) {
+ createCollection(null, "testcollection",
+ 1, 1, client, null, "conf1");
+
+ }
+ List numShardsNumReplicas = new ArrayList<>(2);
+ numShardsNumReplicas.add(1);
+ numShardsNumReplicas.add(1);
+ checkForCollection("testcollection", numShardsNumReplicas, null);
+
+ testSuccessful = true;
+ } finally {
+ if (!testSuccessful) {
+ printLayout();
+ }
+ }
+ }
+
+ private Set getAddFails(List threads) {
+ Set addFails = new HashSet();
+ for (StoppableIndexingThread thread : threads) {
+ addFails.addAll(thread.getAddFails());
+ }
+ return addFails;
+ }
+
+ private Set getDeleteFails(List threads) {
+ Set deleteFails = new HashSet();
+ for (StoppableIndexingThread thread : threads) {
+ deleteFails.addAll(thread.getDeleteFails());
+ }
+ return deleteFails;
+ }
+
+ // skip the randoms - they can deadlock...
+ @Override
+ protected void indexr(Object... fields) throws Exception {
+ SolrInputDocument doc = getDoc(fields);
+ indexDoc(doc);
+ }
+
+}
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractChaosMonkeySafeLeaderTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractChaosMonkeySafeLeaderTestBase.java
new file mode 100644
index 00000000000..0078b98783b
--- /dev/null
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractChaosMonkeySafeLeaderTestBase.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.common.SolrInputDocument;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public abstract class AbstractChaosMonkeySafeLeaderTestBase extends AbstractFullDistribZkTestBase {
+ private static final Integer RUN_LENGTH = Integer.parseInt(System.getProperty("solr.tests.cloud.cm.runlength", "-1"));
+
+ @BeforeClass
+ public static void beforeSuperClass() {
+ schemaString = "schema15.xml"; // we need a string id
+ System.setProperty("solr.autoCommit.maxTime", "15000");
+ System.clearProperty("solr.httpclient.retries");
+ System.clearProperty("solr.retries.on.forward");
+ System.clearProperty("solr.retries.to.followers");
+ setErrorHook();
+ }
+
+ @AfterClass
+ public static void afterSuperClass() {
+ System.clearProperty("solr.autoCommit.maxTime");
+ clearErrorHook();
+ }
+
+ protected static final String[] fieldNames = new String[]{"f_i", "f_f", "f_d", "f_l", "f_dt"};
+ protected static final RandVal[] randVals = new RandVal[]{rint, rfloat, rdouble, rlong, rdate};
+
+ public String[] getFieldNames() {
+ return fieldNames;
+ }
+
+ public RandVal[] getRandValues() {
+ return randVals;
+ }
+
+ protected abstract String getDirectoryFactory();
+ @Override
+ public void distribSetUp() throws Exception {
+ useFactory(getDirectoryFactory());
+ super.distribSetUp();
+ }
+
+ public AbstractChaosMonkeySafeLeaderTestBase() {
+ super();
+ sliceCount = Integer.parseInt(System.getProperty("solr.tests.cloud.cm.slicecount", "-1"));
+ if (sliceCount == -1) {
+ sliceCount = random().nextInt(TEST_NIGHTLY ? 5 : 3) + 1;
+ }
+
+ int numShards = Integer.parseInt(System.getProperty("solr.tests.cloud.cm.shardcount", "-1"));
+ if (numShards == -1) {
+ // we make sure that there's at least one shard with more than one replica
+ // so that the ChaosMonkey has something to kill
+ numShards = sliceCount + random().nextInt(TEST_NIGHTLY ? 12 : 2) + 1;
+ }
+ fixShardCount(numShards);
+ }
+
+ @Test
+ public void test() throws Exception {
+
+ handle.clear();
+ handle.put("timestamp", SKIPVAL);
+
+ // randomly turn on 1 seconds 'soft' commit
+ randomlyEnableAutoSoftCommit();
+
+ tryDelete();
+
+ List threads = new ArrayList<>();
+ int threadCount = 2;
+ int batchSize = 1;
+ if (random().nextBoolean()) {
+ batchSize = random().nextInt(98) + 2;
+ }
+
+ boolean pauseBetweenUpdates = TEST_NIGHTLY ? random().nextBoolean() : true;
+ int maxUpdates = -1;
+ if (!pauseBetweenUpdates) {
+ maxUpdates = 1000 + random().nextInt(1000);
+ } else {
+ maxUpdates = 15000;
+ }
+
+ for (int i = 0; i < threadCount; i++) {
+ StoppableIndexingThread indexThread = new StoppableIndexingThread(controlClient, cloudClient, Integer.toString(i), true, maxUpdates, batchSize, pauseBetweenUpdates); // random().nextInt(999) + 1
+ threads.add(indexThread);
+ indexThread.start();
+ }
+
+ chaosMonkey.startTheMonkey(false, 500);
+ try {
+ long runLength;
+ if (RUN_LENGTH != -1) {
+ runLength = RUN_LENGTH;
+ } else {
+ int[] runTimes;
+ if (TEST_NIGHTLY) {
+ runTimes = new int[] {5000, 6000, 10000, 15000, 25000, 30000,
+ 30000, 45000, 90000, 120000};
+ } else {
+ runTimes = new int[] {5000, 7000, 15000};
+ }
+ runLength = runTimes[random().nextInt(runTimes.length - 1)];
+ }
+
+ Thread.sleep(runLength);
+ } finally {
+ chaosMonkey.stopTheMonkey();
+ }
+
+ for (StoppableIndexingThread indexThread : threads) {
+ indexThread.safeStop();
+ }
+
+ // wait for stop...
+ for (StoppableIndexingThread indexThread : threads) {
+ indexThread.join();
+ }
+
+ for (StoppableIndexingThread indexThread : threads) {
+ assertEquals(0, indexThread.getFailCount());
+ }
+
+ // try and wait for any replications and what not to finish...
+
+ Thread.sleep(2000);
+
+ waitForThingsToLevelOut(3, TimeUnit.MINUTES);
+
+ // even if things were leveled out, a jetty may have just been stopped or something
+ // we wait again and wait to level out again to make sure the system is not still in flux
+
+ Thread.sleep(3000);
+
+ waitForThingsToLevelOut(3, TimeUnit.MINUTES);
+
+ checkShardConsistency(batchSize == 1, true);
+
+ if (VERBOSE) System.out.println("control docs:" + controlClient.query(new SolrQuery("*:*")).getResults().getNumFound() + "\n\n");
+
+ // try and make a collection to make sure the overseer has survived the expiration and session loss
+
+ // sometimes we restart zookeeper as well
+ if (random().nextBoolean()) {
+ zkServer.shutdown();
+ zkServer = new ZkTestServer(zkServer.getZkDir(), zkServer.getPort());
+ zkServer.run(false);
+ }
+
+ try (CloudSolrClient client = createCloudClient("collection1")) {
+ createCollection(null, "testcollection", 1, 1, client, null, "conf1");
+
+ }
+ List numShardsNumReplicas = new ArrayList<>(2);
+ numShardsNumReplicas.add(1);
+ numShardsNumReplicas.add(1);
+ checkForCollection("testcollection",numShardsNumReplicas, null);
+ }
+
+ private void tryDelete() throws Exception {
+ long start = System.nanoTime();
+ long timeout = start + TimeUnit.NANOSECONDS.convert(10, TimeUnit.SECONDS);
+ while (System.nanoTime() < timeout) {
+ try {
+ del("*:*");
+ break;
+ } catch (SolrServerException e) {
+ // cluster may not be up yet
+ e.printStackTrace();
+ }
+ Thread.sleep(100);
+ }
+ }
+
+ // skip the randoms - they can deadlock...
+ @Override
+ protected void indexr(Object... fields) throws Exception {
+ SolrInputDocument doc = new SolrInputDocument();
+ addFields(doc, fields);
+ addFields(doc, "rnd_b", true);
+ indexDoc(doc);
+ }
+
+}
\ No newline at end of file
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractMoveReplicaTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractMoveReplicaTestBase.java
new file mode 100644
index 00000000000..ba893ff236a
--- /dev/null
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractMoveReplicaTestBase.java
@@ -0,0 +1,377 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.client.solrj.response.CoreAdminResponse;
+import org.apache.solr.client.solrj.response.RequestStatusState;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.util.IdUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public abstract class AbstractMoveReplicaTestBase extends SolrCloudTestCase {
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+ // used by MoveReplicaHDFSTest
+ protected boolean inPlaceMove = true;
+ protected boolean isCollectionApiDistributed = false;
+
+ protected String getConfigSet() {
+ return "cloud-dynamic";
+ }
+
+ @Before
+ public void beforeTest() throws Exception {
+ inPlaceMove = true;
+
+ configureCluster(4)
+ .addConfig("conf1", configset(getConfigSet()))
+ .addConfig("conf2", configset(getConfigSet()))
+ .withSolrXml(TEST_PATH().resolve("solr.xml"))
+ .configure();
+
+ // If Collection API is distributed let's not wait for Overseer.
+ if (isCollectionApiDistributed = new CollectionAdminRequest.RequestApiDistributedProcessing().process(cluster.getSolrClient()).getIsCollectionApiDistributed()) {
+ return;
+ }
+
+ NamedList overSeerStatus = cluster.getSolrClient().request(CollectionAdminRequest.getOverseerStatus());
+ JettySolrRunner overseerJetty = null;
+ String overseerLeader = (String) overSeerStatus.get("leader");
+ for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
+ if (jetty.getNodeName().equals(overseerLeader)) {
+ overseerJetty = jetty;
+ break;
+ }
+ }
+ if (overseerJetty == null) {
+ fail("no overseer leader!");
+ }
+ }
+
+ @After
+ public void afterTest() throws Exception {
+ try {
+ shutdownCluster();
+ } finally {
+ super.tearDown();
+ }
+ }
+
+ @Test
+ // commented out on: 17-Feb-2019 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // annotated on: 24-Dec-2018
+ public void test() throws Exception {
+ String coll = getTestClass().getSimpleName() + "_coll_" + inPlaceMove;
+ if (log.isInfoEnabled()) {
+ log.info("total_jettys: {}", cluster.getJettySolrRunners().size());
+ }
+ int REPLICATION = 2;
+
+ CloudSolrClient cloudClient = cluster.getSolrClient();
+
+ // random create tlog or pull type replicas with nrt
+ boolean isTlog = random().nextBoolean();
+ CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf1", 2, 1, isTlog ? 1 : 0, !isTlog ? 1 : 0);
+ cloudClient.request(create);
+
+ addDocs(coll, 100);
+
+ Replica replica = getRandomReplica(coll, cloudClient);
+ Set liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
+ ArrayList l = new ArrayList<>(liveNodes);
+ Collections.shuffle(l, random());
+ String targetNode = null;
+ for (String node : liveNodes) {
+ if (!replica.getNodeName().equals(node)) {
+ targetNode = node;
+ break;
+ }
+ }
+ assertNotNull(targetNode);
+ String shardId = null;
+ for (Slice slice : cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlices()) {
+ if (slice.getReplicas().contains(replica)) {
+ shardId = slice.getName();
+ }
+ }
+
+ int sourceNumCores = getNumOfCores(cloudClient, replica.getNodeName(), coll, replica.getType().name());
+ int targetNumCores = getNumOfCores(cloudClient, targetNode, coll, replica.getType().name());
+
+ CollectionAdminRequest.MoveReplica moveReplica = createMoveReplicaRequest(coll, replica, targetNode);
+ moveReplica.setInPlaceMove(inPlaceMove);
+ String asyncId = IdUtils.randomId();
+ moveReplica.processAsync(asyncId, cloudClient);
+ CollectionAdminRequest.RequestStatus requestStatus = CollectionAdminRequest.requestStatus(asyncId);
+ // wait for async request success
+ boolean success = false;
+ for (int i = 0; i < 200; i++) {
+ CollectionAdminRequest.RequestStatusResponse rsp = requestStatus.process(cloudClient);
+ if (rsp.getRequestStatus() == RequestStatusState.COMPLETED) {
+ success = true;
+ break;
+ }
+ assertNotSame(rsp.getRequestStatus(), RequestStatusState.FAILED);
+ Thread.sleep(500);
+ }
+ assertTrue(success);
+ assertEquals("should be one less core on the source node!", sourceNumCores - 1, getNumOfCores(cloudClient, replica.getNodeName(), coll, replica.getType().name()));
+ assertEquals("should be one more core on target node!", targetNumCores + 1, getNumOfCores(cloudClient, targetNode, coll, replica.getType().name()));
+ // wait for recovery
+ boolean recovered = false;
+ for (int i = 0; i < 300; i++) {
+ DocCollection collState = getCollectionState(coll);
+ log.debug("###### {}", collState);
+ Collection replicas = collState.getSlice(shardId).getReplicas();
+ boolean allActive = true;
+ boolean hasLeaders = true;
+ if (replicas != null && !replicas.isEmpty()) {
+ for (Replica r : replicas) {
+ if (!r.getNodeName().equals(targetNode)) {
+ continue;
+ }
+ if (!r.isActive(Collections.singleton(targetNode))) {
+ log.info("Not active: {}", r);
+ allActive = false;
+ }
+ }
+ } else {
+ allActive = false;
+ }
+ for (Slice slice : collState.getSlices()) {
+ if (slice.getLeader() == null) {
+ hasLeaders = false;
+ }
+ }
+ if (allActive && hasLeaders) {
+ // check the number of active replicas
+ assertEquals("total number of replicas", REPLICATION, replicas.size());
+ recovered = true;
+ break;
+ } else {
+ log.info("--- waiting, allActive={}, hasLeaders={}", allActive, hasLeaders);
+ Thread.sleep(1000);
+ }
+ }
+ assertTrue("replica never fully recovered", recovered);
+
+ assertEquals(100, cluster.getSolrClient().query(coll, new SolrQuery("*:*")).getResults().getNumFound());
+
+ moveReplica = createMoveReplicaRequest(coll, replica, targetNode, shardId);
+ moveReplica.setInPlaceMove(inPlaceMove);
+ moveReplica.process(cloudClient);
+ checkNumOfCores(cloudClient, replica.getNodeName(), coll, sourceNumCores);
+ // wait for recovery
+ recovered = false;
+ for (int i = 0; i < 300; i++) {
+ DocCollection collState = getCollectionState(coll);
+ log.debug("###### {}", collState);
+ Collection replicas = collState.getSlice(shardId).getReplicas();
+ boolean allActive = true;
+ boolean hasLeaders = true;
+ if (replicas != null && !replicas.isEmpty()) {
+ for (Replica r : replicas) {
+ if (!r.getNodeName().equals(replica.getNodeName())) {
+ continue;
+ }
+ if (!r.isActive(Collections.singleton(replica.getNodeName()))) {
+ log.info("Not active yet: {}", r);
+ allActive = false;
+ }
+ }
+ } else {
+ allActive = false;
+ }
+ for (Slice slice : collState.getSlices()) {
+ if (slice.getLeader() == null) {
+ hasLeaders = false;
+ }
+ }
+ if (allActive && hasLeaders) {
+ assertEquals("total number of replicas", REPLICATION, replicas.size());
+ recovered = true;
+ break;
+ } else {
+ Thread.sleep(1000);
+ }
+ }
+ assertTrue("replica never fully recovered", recovered);
+
+ assertEquals(100, cluster.getSolrClient().query(coll, new SolrQuery("*:*")).getResults().getNumFound());
+ }
+
+ //Commented out 5-Dec-2017
+ // @AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/SOLR-11458")
+ @Test
+ // 12-Jun-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 17-Mar-2018 This JIRA is fixed, but this test still fails
+ //17-Aug-2018 commented @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2-Aug-2018
+ // commented out on: 17-Feb-2019 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // annotated on: 24-Dec-2018
+ public void testFailedMove() throws Exception {
+ String coll = getTestClass().getSimpleName() + "_failed_coll_" + inPlaceMove;
+ int REPLICATION = 2;
+
+ CloudSolrClient cloudClient = cluster.getSolrClient();
+
+ // random create tlog or pull type replicas with nrt
+ boolean isTlog = random().nextBoolean();
+ CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf1", 2, 1, isTlog ? 1 : 0, !isTlog ? 1 : 0);
+ cloudClient.request(create);
+
+ addDocs(coll, 100);
+
+ NamedList overSeerStatus = cluster.getSolrClient().request(CollectionAdminRequest.getOverseerStatus());
+ String overseerLeader = (String) overSeerStatus.get("leader");
+
+ // don't kill overseer in this test
+ Replica replica;
+ int count = 10;
+ do {
+ replica = getRandomReplica(coll, cloudClient);
+ } while (!replica.getNodeName().equals(overseerLeader) && count-- > 0);
+ assertNotNull("could not find non-overseer replica???", replica);
+ Set liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
+ ArrayList l = new ArrayList<>(liveNodes);
+ Collections.shuffle(l, random());
+ String targetNode = null;
+ for (String node : liveNodes) {
+ if (!replica.getNodeName().equals(node) && (isCollectionApiDistributed || !overseerLeader.equals(node))) {
+ targetNode = node;
+ break;
+ }
+ }
+ assertNotNull(targetNode);
+ CollectionAdminRequest.MoveReplica moveReplica = createMoveReplicaRequest(coll, replica, targetNode);
+ moveReplica.setInPlaceMove(inPlaceMove);
+ // start moving
+ String asyncId = IdUtils.randomId();
+ moveReplica.processAsync(asyncId, cloudClient);
+ // shut down target node
+ for (int i = 0; i < cluster.getJettySolrRunners().size(); i++) {
+ if (cluster.getJettySolrRunner(i).getNodeName().equals(targetNode)) {
+ JettySolrRunner j = cluster.stopJettySolrRunner(i);
+ cluster.waitForJettyToStop(j);
+ break;
+ }
+ }
+ CollectionAdminRequest.RequestStatus requestStatus = CollectionAdminRequest.requestStatus(asyncId);
+ // wait for async request success
+ boolean success = true;
+ for (int i = 0; i < 200; i++) {
+ CollectionAdminRequest.RequestStatusResponse rsp = requestStatus.process(cloudClient);
+ assertNotSame(rsp.getRequestStatus().toString(), rsp.getRequestStatus(), RequestStatusState.COMPLETED);
+ if (rsp.getRequestStatus() == RequestStatusState.FAILED) {
+ success = false;
+ break;
+ }
+ Thread.sleep(500);
+ }
+ assertFalse(success);
+
+ if (log.isInfoEnabled()) {
+ log.info("--- current collection state: {}", cloudClient.getZkStateReader().getClusterState().getCollection(coll));
+ }
+ assertEquals(100, cluster.getSolrClient().query(coll, new SolrQuery("*:*")).getResults().getNumFound());
+ }
+
+ private CollectionAdminRequest.MoveReplica createMoveReplicaRequest(String coll, Replica replica, String targetNode, String shardId) {
+ return new CollectionAdminRequest.MoveReplica(coll, shardId, targetNode, replica.getNodeName());
+ }
+
+ private CollectionAdminRequest.MoveReplica createMoveReplicaRequest(String coll, Replica replica, String targetNode) {
+ return new CollectionAdminRequest.MoveReplica(coll, replica.getName(), targetNode);
+ }
+
+ private Replica getRandomReplica(String coll, CloudSolrClient cloudClient) {
+ List replicas = cloudClient.getZkStateReader().getClusterState().getCollection(coll).getReplicas();
+ Collections.shuffle(replicas, random());
+ return replicas.get(0);
+ }
+
+ private void checkNumOfCores(CloudSolrClient cloudClient, String nodeName, String collectionName, int expectedCores) throws IOException, SolrServerException {
+ assertEquals(nodeName + " does not have expected number of cores", expectedCores, getNumOfCores(cloudClient, nodeName, collectionName));
+ }
+
+ private int getNumOfCores(CloudSolrClient cloudClient, String nodeName, String collectionName) throws IOException, SolrServerException {
+ return getNumOfCores(cloudClient, nodeName, collectionName, null);
+ }
+
+ private int getNumOfCores(CloudSolrClient cloudClient, String nodeName, String collectionName, String replicaType) throws IOException, SolrServerException {
+ try (HttpSolrClient coreclient = getHttpSolrClient(cloudClient.getZkStateReader().getBaseUrlForNodeName(nodeName))) {
+ CoreAdminResponse status = CoreAdminRequest.getStatus(null, coreclient);
+ if (status.getCoreStatus().size() == 0) {
+ return 0;
+ }
+ if (collectionName == null && replicaType == null) {
+ return status.getCoreStatus().size();
+ }
+ // filter size by collection name
+ int size = 0;
+ for (Map.Entry> stringNamedListEntry : status.getCoreStatus()) {
+ if (collectionName != null) {
+ String coll = (String) stringNamedListEntry.getValue().findRecursive("cloud", "collection");
+ if (!collectionName.equals(coll)) {
+ continue;
+ }
+ }
+ if (replicaType != null) {
+ String type = (String) stringNamedListEntry.getValue().findRecursive("cloud", "replicaType");
+ if (!replicaType.equals(type)) {
+ continue;
+ }
+ }
+ size++;
+ }
+ return size;
+ }
+ }
+
+ protected void addDocs(String collection, int numDocs) throws Exception {
+ SolrClient solrClient = cluster.getSolrClient();
+ for (int docId = 1; docId <= numDocs; docId++) {
+ SolrInputDocument doc = new SolrInputDocument();
+ doc.addField("id", docId);
+ solrClient.add(collection, doc);
+ }
+ solrClient.commit(collection);
+ Thread.sleep(5000);
+ }
+}
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractRecoveryZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractRecoveryZkTestBase.java
new file mode 100644
index 00000000000..388980ec2bd
--- /dev/null
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractRecoveryZkTestBase.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.junit.After;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Slow
+public abstract class AbstractRecoveryZkTestBase extends SolrCloudTestCase {
+
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ configureCluster(2)
+ .addConfig("conf", configset("cloud-minimal"))
+ .configure();
+ }
+
+ private final List threads = new ArrayList<>();
+
+ @After
+ public void stopThreads() throws InterruptedException {
+ for (StoppableIndexingThread t : threads) {
+ t.safeStop();
+ }
+ for (StoppableIndexingThread t : threads) {
+ t.join();
+ }
+ threads.clear();
+ }
+
+ @Test
+ //commented 2-Aug-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 28-June-2018
+ public void test() throws Exception {
+
+ final String collection = "recoverytest";
+
+ CollectionAdminRequest.createCollection(collection, "conf", 1, 2)
+ .process(cluster.getSolrClient());
+ waitForState("Expected a collection with one shard and two replicas", collection, clusterShape(1, 2));
+ cluster.getSolrClient().setDefaultCollection(collection);
+
+ // start a couple indexing threads
+
+ int[] maxDocList = new int[] {300, 700, 1200, 1350, 3000};
+ int[] maxDocNightlyList = new int[] {3000, 7000, 12000, 30000, 45000, 60000};
+
+ int maxDoc;
+ if (!TEST_NIGHTLY) {
+ maxDoc = maxDocList[random().nextInt(maxDocList.length - 1)];
+ } else {
+ maxDoc = maxDocNightlyList[random().nextInt(maxDocList.length - 1)];
+ }
+ log.info("Indexing {} documents", maxDoc);
+
+ final StoppableIndexingThread indexThread
+ = new StoppableIndexingThread(null, cluster.getSolrClient(), "1", true, maxDoc, 1, true);
+ threads.add(indexThread);
+ indexThread.start();
+
+ final StoppableIndexingThread indexThread2
+ = new StoppableIndexingThread(null, cluster.getSolrClient(), "2", true, maxDoc, 1, true);
+ threads.add(indexThread2);
+ indexThread2.start();
+
+ // give some time to index...
+ int[] waitTimes = new int[] {200, 2000, 3000};
+ Thread.sleep(waitTimes[random().nextInt(waitTimes.length - 1)]);
+
+ // bring shard replica down
+ DocCollection state = getCollectionState(collection);
+ Replica leader = state.getLeader("shard1");
+ Replica replica = getRandomReplica(state.getSlice("shard1"), (r) -> leader != r);
+
+ JettySolrRunner jetty = cluster.getReplicaJetty(replica);
+ jetty.stop();
+
+ // wait a moment - lets allow some docs to be indexed so replication time is non 0
+ Thread.sleep(waitTimes[random().nextInt(waitTimes.length - 1)]);
+
+ // bring shard replica up
+ jetty.start();
+
+ // make sure replication can start
+ Thread.sleep(3000);
+
+ // stop indexing threads
+ indexThread.safeStop();
+ indexThread2.safeStop();
+
+ indexThread.join();
+ indexThread2.join();
+
+ new UpdateRequest()
+ .commit(cluster.getSolrClient(), collection);
+
+ cluster.getSolrClient().waitForState(collection, 120, TimeUnit.SECONDS, clusterShape(1, 2));
+
+ // test that leader and replica have same doc count
+ state = getCollectionState(collection);
+ assertShardConsistency(state.getSlice("shard1"), true);
+
+ }
+
+ private void assertShardConsistency(Slice shard, boolean expectDocs) throws Exception {
+ List replicas = shard.getReplicas(r -> r.getState() == Replica.State.ACTIVE);
+ long[] numCounts = new long[replicas.size()];
+ int i = 0;
+ for (Replica replica : replicas) {
+ try (HttpSolrClient client = new HttpSolrClient.Builder(replica.getCoreUrl())
+ .withHttpClient(cluster.getSolrClient().getHttpClient()).build()) {
+ numCounts[i] = client.query(new SolrQuery("*:*").add("distrib", "false")).getResults().getNumFound();
+ i++;
+ }
+ }
+ for (int j = 1; j < replicas.size(); j++) {
+ if (numCounts[j] != numCounts[j - 1])
+ fail("Mismatch in counts between replicas"); // TODO improve this!
+ if (numCounts[j] == 0 && expectDocs)
+ fail("Expected docs on shard " + shard.getName() + " but found none");
+ }
+ }
+
+}
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractRestartWhileUpdatingTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractRestartWhileUpdatingTestBase.java
new file mode 100644
index 00000000000..a1e508cf209
--- /dev/null
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractRestartWhileUpdatingTestBase.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.common.SolrInputDocument;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public abstract class AbstractRestartWhileUpdatingTestBase extends AbstractFullDistribZkTestBase {
+
+ //private static final String DISTRIB_UPDATE_CHAIN = "distrib-update-chain";
+ private List threads;
+
+ private volatile boolean stopExpire = false;
+
+ public AbstractRestartWhileUpdatingTestBase() throws Exception {
+ super();
+ sliceCount = 1;
+ fixShardCount(3);
+ schemaString = "schema15.xml"; // we need a string id
+ useFactory("solr.StandardDirectoryFactory");
+ }
+
+ public static String[] fieldNames = new String[]{"f_i", "f_f", "f_d", "f_l", "f_dt"};
+ public static RandVal[] randVals = new RandVal[]{rint, rfloat, rdouble, rlong, rdate};
+
+ protected String[] getFieldNames() {
+ return fieldNames;
+ }
+
+ protected RandVal[] getRandValues() {
+ return randVals;
+ }
+
+ @BeforeClass
+ public static void beforeRestartWhileUpdatingTest() {
+ System.setProperty("leaderVoteWait", "300000");
+ System.setProperty("solr.autoCommit.maxTime", "30000");
+ System.setProperty("solr.autoSoftCommit.maxTime", "3000");
+ // SOLR-13212 // TestInjection.nonGracefullClose = "true:60";
+ // SOLR-13189 // TestInjection.failReplicaRequests = "true:03";
+ }
+
+ @AfterClass
+ public static void afterRestartWhileUpdatingTest() {
+ System.clearProperty("leaderVoteWait");
+ System.clearProperty("solr.autoCommit.maxTime");
+ System.clearProperty("solr.autoSoftCommit.maxTime");
+ }
+
+
+ @Test
+ public void test() throws Exception {
+ handle.clear();
+ handle.put("timestamp", SKIPVAL);
+
+ // start a couple indexing threads
+
+ int[] maxDocList = new int[] {5000, 10000};
+
+
+ int maxDoc = maxDocList[random().nextInt(maxDocList.length - 1)];
+
+ int numThreads = random().nextInt(4) + 1;
+
+ threads = new ArrayList<>(numThreads);
+
+ Thread expireThread = new Thread("expireThread") {
+ public void run() {
+ while (!stopExpire) {
+ try {
+ Thread.sleep(random().nextInt(15000));
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+
+// try {
+// chaosMonkey.expireRandomSession();
+// } catch (KeeperException e) {
+// throw new RuntimeException(e);
+// } catch (InterruptedException e) {
+// throw new RuntimeException(e);
+// }
+ }
+ }
+ };
+
+// Currently unused
+// expireThread.start();
+
+ StoppableIndexingThread indexThread;
+ for (int i = 0; i < numThreads; i++) {
+ indexThread = new StoppableIndexingThread(controlClient, cloudClient, Integer.toString(i), true, maxDoc, 1, true);
+ threads.add(indexThread);
+ indexThread.start();
+ }
+
+ Thread.sleep(2000);
+
+ int restartTimes = 1;//random().nextInt(4) + 1;;
+ for (int i = 0; i < restartTimes; i++) {
+ Thread.sleep(random().nextInt(30000));
+ stopAndStartAllReplicas();
+ Thread.sleep(random().nextInt(30000));
+ }
+
+ Thread.sleep(2000);
+
+ // stop indexing threads
+ for (StoppableIndexingThread thread : threads) {
+ thread.safeStop();
+ }
+ stopExpire = true;
+ expireThread.join();
+
+ Thread.sleep(1000);
+
+ waitForThingsToLevelOut(320, TimeUnit.SECONDS);
+
+ Thread.sleep(2000);
+
+ waitForThingsToLevelOut(30, TimeUnit.SECONDS);
+
+ Thread.sleep(5000);
+
+ waitForRecoveriesToFinish(DEFAULT_COLLECTION, cloudClient.getZkStateReader(), false, true);
+
+ for (StoppableIndexingThread thread : threads) {
+ thread.join();
+ }
+
+ checkShardConsistency(false, false);
+ }
+
+ public void stopAndStartAllReplicas() throws Exception, InterruptedException {
+ chaosMonkey.stopAll(random().nextInt(1));
+
+ if (random().nextBoolean()) {
+ for (StoppableIndexingThread thread : threads) {
+ thread.safeStop();
+ }
+ }
+ Thread.sleep(1000);
+
+ chaosMonkey.startAll();
+ }
+
+ @Override
+ protected void indexDoc(SolrInputDocument doc) throws IOException,
+ SolrServerException {
+ cloudClient.add(doc);
+ }
+
+
+ @Override
+ public void distribTearDown() throws Exception {
+ // make sure threads have been stopped...
+ if (threads != null) {
+ for (StoppableIndexingThread thread : threads) {
+ thread.safeStop();
+ thread.safeStop();
+ }
+ }
+
+ super.distribTearDown();
+ }
+
+ // skip the randoms - they can deadlock...
+ @Override
+ protected void indexr(Object... fields) throws Exception {
+ SolrInputDocument doc = new SolrInputDocument();
+ addFields(doc, fields);
+ addFields(doc, "rnd_b", true);
+ indexDoc(doc);
+ }
+}
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractSyncSliceTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractSyncSliceTestBase.java
new file mode 100644
index 00000000000..4cfa67ed7f0
--- /dev/null
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractSyncSliceTestBase.java
@@ -0,0 +1,308 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud;
+
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams.CollectionAction;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Test sync phase that occurs when Leader goes down and a new Leader is
+ * elected.
+ */
+public abstract class AbstractSyncSliceTestBase extends AbstractFullDistribZkTestBase {
+ private boolean success = false;
+
+ @Override
+ public void distribTearDown() throws Exception {
+ if (!success) {
+ printLayoutOnTearDown = true;
+ }
+ super.distribTearDown();
+ }
+
+ public AbstractSyncSliceTestBase() {
+ super();
+ sliceCount = 1;
+ fixShardCount(TEST_NIGHTLY ? 7 : 4);
+ }
+
+ @Test
+ public void test() throws Exception {
+
+ handle.clear();
+ handle.put("timestamp", SKIPVAL);
+
+ waitForThingsToLevelOut(30, TimeUnit.SECONDS);
+
+ del("*:*");
+ List skipServers = new ArrayList<>();
+ int docId = 0;
+ indexDoc(skipServers, id, docId++, i1, 50, tlong, 50, t1,
+ "to come to the aid of their country.");
+
+ indexDoc(skipServers, id, docId++, i1, 50, tlong, 50, t1,
+ "old haven was blue.");
+
+ skipServers.add(shardToJetty.get("shard1").get(1));
+
+ indexDoc(skipServers, id, docId++, i1, 50, tlong, 50, t1,
+ "but the song was fancy.");
+
+ skipServers.add(shardToJetty.get("shard1").get(2));
+
+ indexDoc(skipServers, id,docId++, i1, 50, tlong, 50, t1,
+ "under the moon and over the lake");
+
+ commit();
+
+ waitForRecoveriesToFinish(false);
+
+ // shard should be inconsistent
+ String shardFailMessage = checkShardConsistency("shard1", true, false);
+ assertNotNull(shardFailMessage);
+
+ ModifiableSolrParams params = new ModifiableSolrParams();
+ params.set("action", CollectionAction.SYNCSHARD.toString());
+ params.set("collection", "collection1");
+ params.set("shard", "shard1");
+ QueryRequest request = new QueryRequest(params);
+ request.setPath("/admin/collections");
+
+ String baseUrl = ((HttpSolrClient) shardToJetty.get("shard1").get(2).client.solrClient)
+ .getBaseURL();
+ baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
+
+ // we only set the connect timeout, not so timeout
+ try (HttpSolrClient baseClient = getHttpSolrClient(baseUrl, 30000)) {
+ baseClient.request(request);
+ }
+
+ waitForThingsToLevelOut(15, TimeUnit.SECONDS);
+
+ checkShardConsistency(false, true);
+
+ long cloudClientDocs = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
+ assertEquals(4, cloudClientDocs);
+
+
+ // kill the leader - new leader could have all the docs or be missing one
+ CloudJettyRunner leaderJetty = shardToLeaderJetty.get("shard1");
+
+ skipServers = getRandomOtherJetty(leaderJetty, null); // but not the leader
+
+ // this doc won't be on one node
+ indexDoc(skipServers, id, docId++, i1, 50, tlong, 50, t1,
+ "to come to the aid of their country.");
+ commit();
+
+
+ Set jetties = new HashSet<>();
+ jetties.addAll(shardToJetty.get("shard1"));
+ jetties.remove(leaderJetty);
+ assertEquals(getShardCount() - 1, jetties.size());
+
+ leaderJetty.jetty.stop();
+
+ Thread.sleep(3000);
+
+ waitForNoShardInconsistency();
+
+ Thread.sleep(1000);
+
+ checkShardConsistency(false, true);
+
+ cloudClientDocs = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
+ assertEquals(5, cloudClientDocs);
+
+ CloudJettyRunner deadJetty = leaderJetty;
+
+ // let's get the latest leader
+ while (deadJetty == leaderJetty) {
+ updateMappingsFromZk(this.jettys, this.clients);
+ leaderJetty = shardToLeaderJetty.get("shard1");
+ }
+
+ // bring back dead node
+ deadJetty.jetty.start(); // he is not the leader anymore
+
+ waitTillAllNodesActive();
+
+ skipServers = getRandomOtherJetty(leaderJetty, deadJetty);
+ skipServers.addAll( getRandomOtherJetty(leaderJetty, deadJetty));
+ // skip list should be
+
+// System.out.println("leader:" + leaderJetty.url);
+// System.out.println("dead:" + deadJetty.url);
+// System.out.println("skip list:" + skipServers);
+
+ // we are skipping 2 nodes
+ assertEquals(2, skipServers.size());
+
+ // more docs than can peer sync
+ for (int i = 0; i < 300; i++) {
+ indexDoc(skipServers, id, docId++, i1, 50, tlong, 50, t1,
+ "to come to the aid of their country.");
+ }
+
+ commit();
+
+ Thread.sleep(1000);
+
+ waitForRecoveriesToFinish(false);
+
+ // shard should be inconsistent
+ shardFailMessage = waitTillInconsistent();
+ assertNotNull(
+ "Test Setup Failure: shard1 should have just been set up to be inconsistent - but it's still consistent. Leader:"
+ + leaderJetty.url + " Dead Guy:" + deadJetty.url + "skip list:" + skipServers, shardFailMessage);
+
+ // good place to test compareResults
+ boolean shouldFail = CloudInspectUtil.compareResults(controlClient, cloudClient);
+ assertTrue("A test that compareResults is working correctly failed", shouldFail);
+
+ jetties = new HashSet<>();
+ jetties.addAll(shardToJetty.get("shard1"));
+ jetties.remove(leaderJetty);
+ assertEquals(getShardCount() - 1, jetties.size());
+
+
+ // kill the current leader
+ leaderJetty.jetty.stop();
+
+ waitForNoShardInconsistency();
+
+ checkShardConsistency(true, true);
+
+ success = true;
+ }
+
+ private void waitTillAllNodesActive() throws Exception {
+ for (int i = 0; i < 60; i++) {
+ Thread.sleep(3000);
+ ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+ ClusterState clusterState = zkStateReader.getClusterState();
+ DocCollection collection1 = clusterState.getCollection("collection1");
+ Slice slice = collection1.getSlice("shard1");
+ Collection replicas = slice.getReplicas();
+ boolean allActive = true;
+ for (Replica replica : replicas) {
+ if (!clusterState.liveNodesContain(replica.getNodeName()) || replica.getState() != Replica.State.ACTIVE) {
+ allActive = false;
+ break;
+ }
+ }
+ if (allActive) {
+ return;
+ }
+ }
+ printLayout();
+ fail("timeout waiting to see all nodes active");
+ }
+
+ private String waitTillInconsistent() throws Exception, InterruptedException {
+ String shardFailMessage = null;
+
+ shardFailMessage = pollConsistency(shardFailMessage, 0);
+ shardFailMessage = pollConsistency(shardFailMessage, 3000);
+ shardFailMessage = pollConsistency(shardFailMessage, 5000);
+ shardFailMessage = pollConsistency(shardFailMessage, 15000);
+
+ return shardFailMessage;
+ }
+
+ private String pollConsistency(String shardFailMessage, int sleep)
+ throws InterruptedException, Exception {
+ try {
+ commit();
+ } catch (Throwable t) {
+ t.printStackTrace();
+ }
+ if (shardFailMessage == null) {
+ // try again
+ Thread.sleep(sleep);
+ shardFailMessage = checkShardConsistency("shard1", true, false);
+ }
+ return shardFailMessage;
+ }
+
+ private List getRandomOtherJetty(CloudJettyRunner leader, CloudJettyRunner down) {
+ List skipServers = new ArrayList<>();
+ List candidates = new ArrayList<>();
+ candidates.addAll(shardToJetty.get("shard1"));
+
+ if (leader != null) {
+ candidates.remove(leader);
+ }
+
+ if (down != null) {
+ candidates.remove(down);
+ }
+
+ CloudJettyRunner cjetty = candidates.get(random().nextInt(candidates.size()));
+ skipServers.add(cjetty);
+ return skipServers;
+ }
+
+ protected void indexDoc(List skipServers, Object... fields) throws IOException,
+ SolrServerException {
+ SolrInputDocument doc = new SolrInputDocument();
+
+ addFields(doc, fields);
+ addFields(doc, "rnd_b", true);
+
+ controlClient.add(doc);
+
+ UpdateRequest ureq = new UpdateRequest();
+ ureq.add(doc);
+ ModifiableSolrParams params = new ModifiableSolrParams();
+ for (CloudJettyRunner skip : skipServers) {
+ params.add("test.distrib.skip.servers", skip.url + "/");
+ }
+ ureq.setParams(params);
+ ureq.process(cloudClient);
+ }
+
+ // skip the randoms - they can deadlock...
+ @Override
+ protected void indexr(Object... fields) throws Exception {
+ SolrInputDocument doc = new SolrInputDocument();
+ addFields(doc, fields);
+ addFields(doc, "rnd_b", true);
+ indexDoc(doc);
+ }
+
+}
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractTlogReplayBufferedWhileIndexingTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractTlogReplayBufferedWhileIndexingTestBase.java
new file mode 100644
index 00000000000..16aaf838047
--- /dev/null
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractTlogReplayBufferedWhileIndexingTestBase.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.util.TestInjection;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public abstract class AbstractTlogReplayBufferedWhileIndexingTestBase extends AbstractFullDistribZkTestBase {
+
+ private List threads;
+
+ public AbstractTlogReplayBufferedWhileIndexingTestBase() throws Exception {
+ super();
+ sliceCount = 1;
+ fixShardCount(2);
+ schemaString = "schema15.xml"; // we need a string id
+ }
+
+ @BeforeClass
+ public static void beforeRestartWhileUpdatingTest() throws Exception {
+ System.setProperty("leaderVoteWait", "300000");
+ System.setProperty("solr.autoCommit.maxTime", "10000");
+ System.setProperty("solr.autoSoftCommit.maxTime", "3000");
+ TestInjection.updateLogReplayRandomPause = "true:10";
+ TestInjection.updateRandomPause = "true:10";
+ useFactory("solr.StandardDirectoryFactory");
+ }
+
+ @AfterClass
+ public static void afterRestartWhileUpdatingTest() {
+ System.clearProperty("leaderVoteWait");
+ System.clearProperty("solr.autoCommit.maxTime");
+ System.clearProperty("solr.autoSoftCommit.maxTime");
+ }
+
+ @Test
+ public void test() throws Exception {
+ handle.clear();
+ handle.put("timestamp", SKIPVAL);
+
+ waitForRecoveriesToFinish(false);
+
+ int numThreads = 3;
+
+ threads = new ArrayList<>(numThreads);
+
+ ArrayList allJetty = new ArrayList<>();
+ allJetty.addAll(jettys);
+ allJetty.remove(shardToLeaderJetty.get("shard1").jetty);
+ assert allJetty.size() == 1 : allJetty.size();
+ allJetty.get(0).stop();
+
+ StoppableIndexingThread indexThread;
+ for (int i = 0; i < numThreads; i++) {
+ boolean pauseBetweenUpdates = random().nextBoolean();
+ int batchSize = random().nextInt(4) + 1;
+ indexThread = new StoppableIndexingThread(controlClient, cloudClient, Integer.toString(i), true, 900, batchSize, pauseBetweenUpdates);
+ threads.add(indexThread);
+ indexThread.start();
+ }
+
+ Thread.sleep(2000);
+
+ allJetty.get(0).start();
+
+ Thread.sleep(45000);
+
+ waitForThingsToLevelOut(); // we can insert random update delays, so this can take a while, especially when beasting this test
+
+ Thread.sleep(2000);
+
+ waitForRecoveriesToFinish(DEFAULT_COLLECTION, cloudClient.getZkStateReader(), false, true);
+
+ for (StoppableIndexingThread thread : threads) {
+ thread.safeStop();
+ }
+
+ waitForThingsToLevelOut(30, TimeUnit.SECONDS);
+
+ checkShardConsistency(false, false);
+
+ }
+
+ @Override
+ protected void indexDoc(SolrInputDocument doc) throws IOException,
+ SolrServerException {
+ cloudClient.add(doc);
+ }
+
+
+ @Override
+ public void distribTearDown() throws Exception {
+ // make sure threads have been stopped...
+ if (threads != null) {
+ for (StoppableIndexingThread thread : threads) {
+ thread.safeStop();
+ }
+
+ for (StoppableIndexingThread thread : threads) {
+ thread.join();
+ }
+ }
+
+ super.distribTearDown();
+ }
+
+ // skip the randoms - they can deadlock...
+ @Override
+ protected void indexr(Object... fields) throws Exception {
+ SolrInputDocument doc = new SolrInputDocument();
+ addFields(doc, fields);
+ addFields(doc, "rnd_b", true);
+ indexDoc(doc);
+ }
+}
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractUnloadDistributedZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractUnloadDistributedZkTestBase.java
new file mode 100644
index 00000000000..de068bbf640
--- /dev/null
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractUnloadDistributedZkTestBase.java
@@ -0,0 +1,372 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud;
+
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CoreAdminRequest.Unload;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkCoreNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.common.util.SolrNamedThreadFactory;
+import org.apache.solr.core.SolrPaths;
+import org.apache.solr.util.TestInjection;
+import org.apache.solr.util.TimeOut;
+import org.junit.Test;
+
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * This test simply does a bunch of basic things in solrcloud mode and asserts things
+ * work as expected.
+ */
+public abstract class AbstractUnloadDistributedZkTestBase extends AbstractBasicDistributedZkTestBase {
+ public AbstractUnloadDistributedZkTestBase() {
+ super();
+ }
+
+ protected String getSolrXml() {
+ return "solr.xml";
+ }
+
+ @Test
+ public void test() throws Exception {
+ jettys.forEach(j -> {
+ Set allowPath = j.getCoreContainer().getAllowPaths();
+ allowPath.clear();
+ allowPath.add(SolrPaths.ALL_PATH); // Allow non-standard core instance path
+ });
+ testCoreUnloadAndLeaders(); // long
+ testUnloadLotsOfCores(); // long
+
+ testUnloadShardAndCollection();
+ }
+
+ private void checkCoreNamePresenceAndSliceCount(String collectionName, String coreName,
+ boolean shouldBePresent, int expectedSliceCount) throws Exception {
+ final TimeOut timeout = new TimeOut(45, TimeUnit.SECONDS, TimeSource.NANO_TIME);
+ Boolean isPresent = null; // null meaning "don't know"
+ while (null == isPresent || shouldBePresent != isPresent) {
+ final DocCollection docCollection = getCommonCloudSolrClient().getZkStateReader().getClusterState().getCollectionOrNull(collectionName);
+ final Collection slices = (docCollection != null) ? docCollection.getSlices() : Collections.emptyList();
+ if (timeout.hasTimedOut()) {
+ printLayout();
+ fail("checkCoreNamePresenceAndSliceCount failed:"
+ +" collection="+collectionName+" CoreName="+coreName
+ +" shouldBePresent="+shouldBePresent+" isPresent="+isPresent
+ +" expectedSliceCount="+expectedSliceCount+" actualSliceCount="+slices.size());
+ }
+ if (expectedSliceCount == slices.size()) {
+ isPresent = false;
+ for (Slice slice : slices) {
+ for (Replica replica : slice.getReplicas()) {
+ if (coreName.equals(replica.get("core"))) {
+ isPresent = true;
+ }
+ }
+ }
+ }
+ Thread.sleep(1000);
+ }
+ }
+
+ private void testUnloadShardAndCollection() throws Exception{
+ final int numShards = 2;
+
+ final String collection = "test_unload_shard_and_collection";
+
+ final String coreName1 = collection+"_1";
+ final String coreName2 = collection+"_2";
+
+ assertEquals(0, CollectionAdminRequest.createCollection(collection, "conf1", numShards, 1)
+ .setCreateNodeSet("")
+ .process(cloudClient).getStatus());
+ assertTrue(CollectionAdminRequest.addReplicaToShard(collection, "shard1")
+ .setCoreName(coreName1)
+ .setNode(jettys.get(0).getNodeName())
+ .process(cloudClient).isSuccess());
+
+ assertTrue(CollectionAdminRequest.addReplicaToShard(collection, "shard2")
+ .setCoreName(coreName2)
+ .setNode(jettys.get(0).getNodeName())
+ .process(cloudClient).isSuccess());
+
+
+ // does not mean they are active and up yet :*
+ waitForRecoveriesToFinish(collection, false);
+
+ final boolean unloadInOrder = random().nextBoolean();
+ final String unloadCmdCoreName1 = (unloadInOrder ? coreName1 : coreName2);
+ final String unloadCmdCoreName2 = (unloadInOrder ? coreName2 : coreName1);
+
+ try (HttpSolrClient adminClient = getHttpSolrClient(buildUrl(jettys.get(0).getLocalPort()))) {
+ // now unload one of the two
+ Unload unloadCmd = new Unload(false);
+ unloadCmd.setCoreName(unloadCmdCoreName1);
+ adminClient.request(unloadCmd);
+
+ // there should still be two shards (as of SOLR-5209)
+ checkCoreNamePresenceAndSliceCount(collection, unloadCmdCoreName1, false /* shouldBePresent */, numShards /* expectedSliceCount */);
+
+ // now unload one of the other
+ unloadCmd = new Unload(false);
+ unloadCmd.setCoreName(unloadCmdCoreName2);
+ adminClient.request(unloadCmd);
+ checkCoreNamePresenceAndSliceCount(collection, unloadCmdCoreName2, false /* shouldBePresent */, numShards /* expectedSliceCount */);
+ }
+
+ //printLayout();
+ // the collection should still be present (as of SOLR-5209 replica removal does not cascade to remove the slice and collection)
+ assertTrue("No longer found collection "+collection, getCommonCloudSolrClient().getZkStateReader().getClusterState().hasCollection(collection));
+ }
+
+ protected SolrCore getFirstCore(String collection, JettySolrRunner jetty) {
+ SolrCore solrCore = null;
+ for (SolrCore core : jetty.getCoreContainer().getCores()) {
+ if (core.getName().startsWith(collection)) {
+ solrCore = core;
+ }
+ }
+ return solrCore;
+ }
+
+ /**
+ * @throws Exception on any problem
+ */
+ private void testCoreUnloadAndLeaders() throws Exception {
+ JettySolrRunner jetty1 = jettys.get(0);
+
+ assertEquals(0, CollectionAdminRequest
+ .createCollection("unloadcollection", "conf1", 1,1)
+ .setCreateNodeSet(jetty1.getNodeName())
+ .process(cloudClient).getStatus());
+ ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
+
+ zkStateReader.forceUpdateCollection("unloadcollection");
+
+ int slices = zkStateReader.getClusterState().getCollection("unloadcollection").getSlices().size();
+ assertEquals(1, slices);
+ SolrCore solrCore = getFirstCore("unloadcollection", jetty1);
+ String core1DataDir = solrCore.getDataDir();
+
+ assertTrue(CollectionAdminRequest
+ .addReplicaToShard("unloadcollection", "shard1")
+ .setCoreName("unloadcollection_shard1_replica2")
+ .setNode(jettys.get(1).getNodeName())
+ .process(cloudClient).isSuccess());
+ zkStateReader.forceUpdateCollection("unloadcollection");
+ slices = zkStateReader.getClusterState().getCollection("unloadcollection").getSlices().size();
+ assertEquals(1, slices);
+
+ waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
+
+ ZkCoreNodeProps leaderProps = getLeaderUrlFromZk("unloadcollection", "shard1");
+
+ Random random = random();
+ if (random.nextBoolean()) {
+ try (HttpSolrClient collectionClient = getHttpSolrClient(leaderProps.getCoreUrl())) {
+ // lets try and use the solrj client to index and retrieve a couple
+ // documents
+ SolrInputDocument doc1 = getDoc(id, 6, i1, -600, tlong, 600, t1,
+ "humpty dumpy sat on a wall");
+ SolrInputDocument doc2 = getDoc(id, 7, i1, -600, tlong, 600, t1,
+ "humpty dumpy3 sat on a walls");
+ SolrInputDocument doc3 = getDoc(id, 8, i1, -600, tlong, 600, t1,
+ "humpty dumpy2 sat on a walled");
+ collectionClient.add(doc1);
+ collectionClient.add(doc2);
+ collectionClient.add(doc3);
+ collectionClient.commit();
+ }
+ }
+
+ assertTrue(CollectionAdminRequest
+ .addReplicaToShard("unloadcollection", "shard1")
+ .setCoreName("unloadcollection_shard1_replica3")
+ .setNode(jettys.get(2).getNodeName())
+ .process(cloudClient).isSuccess());
+
+ waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
+
+ // so that we start with some versions when we reload...
+ TestInjection.skipIndexWriterCommitOnClose = true;
+
+ try (HttpSolrClient addClient = getHttpSolrClient(jettys.get(2).getBaseUrl() + "/unloadcollection_shard1_replica3", 30000)) {
+
+ // add a few docs
+ for (int x = 20; x < 100; x++) {
+ SolrInputDocument doc1 = getDoc(id, x, i1, -600, tlong, 600, t1,
+ "humpty dumpy sat on a wall");
+ addClient.add(doc1);
+ }
+ }
+ // don't commit so they remain in the tran log
+ //collectionClient.commit();
+
+ // unload the leader
+ try (HttpSolrClient collectionClient = getHttpSolrClient(leaderProps.getBaseUrl(), 15000, 30000)) {
+
+ Unload unloadCmd = new Unload(false);
+ unloadCmd.setCoreName(leaderProps.getCoreName());
+ ModifiableSolrParams p = (ModifiableSolrParams) unloadCmd.getParams();
+
+ collectionClient.request(unloadCmd);
+ }
+// Thread.currentThread().sleep(500);
+// printLayout();
+
+ int tries = 50;
+ while (leaderProps.getCoreUrl().equals(zkStateReader.getLeaderUrl("unloadcollection", "shard1", 15000))) {
+ Thread.sleep(100);
+ if (tries-- == 0) {
+ fail("Leader never changed");
+ }
+ }
+
+ // ensure there is a leader
+ zkStateReader.getLeaderRetry("unloadcollection", "shard1", 15000);
+
+ try (HttpSolrClient addClient = getHttpSolrClient(jettys.get(1).getBaseUrl() + "/unloadcollection_shard1_replica2", 30000, 90000)) {
+
+ // add a few docs while the leader is down
+ for (int x = 101; x < 200; x++) {
+ SolrInputDocument doc1 = getDoc(id, x, i1, -600, tlong, 600, t1,
+ "humpty dumpy sat on a wall");
+ addClient.add(doc1);
+ }
+ }
+
+ assertTrue(CollectionAdminRequest
+ .addReplicaToShard("unloadcollection", "shard1")
+ .setCoreName("unloadcollection_shard1_replica4")
+ .setNode(jettys.get(3).getNodeName())
+ .process(cloudClient).isSuccess());
+
+ waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
+
+ // unload the leader again
+ leaderProps = getLeaderUrlFromZk("unloadcollection", "shard1");
+ try (HttpSolrClient collectionClient = getHttpSolrClient(leaderProps.getBaseUrl(), 15000, 30000)) {
+
+ Unload unloadCmd = new Unload(false);
+ unloadCmd.setCoreName(leaderProps.getCoreName());
+ collectionClient.request(unloadCmd);
+ }
+ tries = 50;
+ while (leaderProps.getCoreUrl().equals(zkStateReader.getLeaderUrl("unloadcollection", "shard1", 15000))) {
+ Thread.sleep(100);
+ if (tries-- == 0) {
+ fail("Leader never changed");
+ }
+ }
+
+ zkStateReader.getLeaderRetry("unloadcollection", "shard1", 15000);
+
+ TestInjection.skipIndexWriterCommitOnClose = false; // set this back
+ assertTrue(CollectionAdminRequest
+ .addReplicaToShard("unloadcollection", "shard1")
+ .setCoreName(leaderProps.getCoreName())
+ .setDataDir(core1DataDir)
+ .setNode(leaderProps.getNodeName())
+ .process(cloudClient).isSuccess());
+
+ waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
+
+ long found1, found3;
+
+ try (HttpSolrClient adminClient = getHttpSolrClient(jettys.get(1).getBaseUrl() + "/unloadcollection_shard1_replica2", 15000, 30000)) {
+ adminClient.commit();
+ SolrQuery q = new SolrQuery("*:*");
+ q.set("distrib", false);
+ found1 = adminClient.query(q).getResults().getNumFound();
+ }
+
+ try (HttpSolrClient adminClient = getHttpSolrClient(jettys.get(2).getBaseUrl() + "/unloadcollection_shard1_replica3", 15000, 30000)) {
+ adminClient.commit();
+ SolrQuery q = new SolrQuery("*:*");
+ q.set("distrib", false);
+ found3 = adminClient.query(q).getResults().getNumFound();
+ }
+
+ try (HttpSolrClient adminClient = getHttpSolrClient(jettys.get(3).getBaseUrl() + "/unloadcollection_shard1_replica4", 15000, 30000)) {
+ adminClient.commit();
+ SolrQuery q = new SolrQuery("*:*");
+ q.set("distrib", false);
+ long found4 = adminClient.query(q).getResults().getNumFound();
+
+ // all 3 shards should now have the same number of docs
+ assertEquals(found1, found3);
+ assertEquals(found3, found4);
+ }
+ }
+
+ private void testUnloadLotsOfCores() throws Exception {
+ JettySolrRunner jetty = jettys.get(0);
+ try (final HttpSolrClient adminClient = (HttpSolrClient) jetty.newClient(15000, 60000)) {
+ int numReplicas = atLeast(3);
+ ThreadPoolExecutor executor = new ExecutorUtil.MDCAwareThreadPoolExecutor(0, Integer.MAX_VALUE,
+ 5, TimeUnit.SECONDS, new SynchronousQueue<>(),
+ new SolrNamedThreadFactory("testExecutor"));
+ try {
+ // create the cores
+ createCollectionInOneInstance(adminClient, jetty.getNodeName(), executor, "multiunload", 2, numReplicas);
+ } finally {
+ ExecutorUtil.shutdownAndAwaitTermination(executor);
+ }
+
+ executor = new ExecutorUtil.MDCAwareThreadPoolExecutor(0, Integer.MAX_VALUE, 5,
+ TimeUnit.SECONDS, new SynchronousQueue<>(),
+ new SolrNamedThreadFactory("testExecutor"));
+ try {
+ for (int j = 0; j < numReplicas; j++) {
+ final int freezeJ = j;
+ executor.execute(() -> {
+ Unload unloadCmd = new Unload(true);
+ unloadCmd.setCoreName("multiunload" + freezeJ);
+ try {
+ adminClient.request(unloadCmd);
+ } catch (SolrServerException | IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
+ Thread.sleep(random().nextInt(50));
+ }
+ } finally {
+ ExecutorUtil.shutdownAndAwaitTermination(executor);
+ }
+ }
+ }
+}
diff --git a/solr/core/src/test/org/apache/solr/cloud/FullThrottleStoppableIndexingThread.java b/solr/test-framework/src/java/org/apache/solr/cloud/FullThrottleStoppableIndexingThread.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/cloud/FullThrottleStoppableIndexingThread.java
rename to solr/test-framework/src/java/org/apache/solr/cloud/FullThrottleStoppableIndexingThread.java
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractCollectionsAPIDistributedZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractCollectionsAPIDistributedZkTestBase.java
new file mode 100644
index 00000000000..07a803c0633
--- /dev/null
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractCollectionsAPIDistributedZkTestBase.java
@@ -0,0 +1,645 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import javax.management.MBeanServer;
+import javax.management.MBeanServerFactory;
+import javax.management.ObjectName;
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.lang.management.ManagementFactory;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.lucene.util.TestUtil;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.BaseHttpSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.client.solrj.request.CoreStatus;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.response.CollectionAdminResponse;
+import org.apache.solr.client.solrj.response.CoreAdminResponse;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkCoreNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams.CollectionAction;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.core.SolrInfoBean.Category;
+import org.apache.solr.util.TestInjection;
+import org.apache.solr.util.TimeOut;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+
+/**
+ * Tests the Cloud Collections API.
+ */
+@Slow
+public abstract class AbstractCollectionsAPIDistributedZkTestBase extends SolrCloudTestCase {
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+ abstract String getConfigSet();
+
+ @Before
+ public void setupCluster() throws Exception {
+ // we don't want this test to have zk timeouts
+ System.setProperty("zkClientTimeout", "60000");
+ System.setProperty("createCollectionWaitTimeTillActive", "5");
+ TestInjection.randomDelayInCoreCreation = "true:5";
+ System.setProperty("validateAfterInactivity", "200");
+ System.setProperty("solr.allowPaths", "*");
+
+ configureCluster(4)
+ .addConfig("conf", configset(getConfigSet()))
+ .addConfig("conf2", configset(getConfigSet()))
+ .withSolrXml(TEST_PATH().resolve("solr.xml"))
+ .configure();
+ }
+
+ @After
+ public void tearDownCluster() throws Exception {
+ try {
+ shutdownCluster();
+ } finally {
+ System.clearProperty("createCollectionWaitTimeTillActive");
+ System.clearProperty("solr.allowPaths");
+ super.tearDown();
+ }
+ }
+
+ @Test
+ public void testCreationAndDeletion() throws Exception {
+ String collectionName = "created_and_deleted";
+
+ CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1).process(cluster.getSolrClient());
+ assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+ .contains(collectionName));
+
+ CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
+ assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+ .contains(collectionName));
+
+ assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
+ }
+
+ @Test
+ public void deleteCollectionRemovesStaleZkCollectionsNode() throws Exception {
+ String collectionName = "out_of_sync_collection";
+
+ // manually create a collections zknode
+ cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
+
+ CollectionAdminRequest.deleteCollection(collectionName)
+ .process(cluster.getSolrClient());
+
+ assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+ .contains(collectionName));
+
+ assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
+ }
+
+ @Test
+ public void deletePartiallyCreatedCollection() throws Exception {
+ final String collectionName = "halfdeletedcollection";
+
+ assertEquals(0, CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+ .setCreateNodeSet("")
+ .process(cluster.getSolrClient()).getStatus());
+ String dataDir = createTempDir().toFile().getAbsolutePath();
+ // create a core that simulates something left over from a partially-deleted collection
+ assertTrue(CollectionAdminRequest
+ .addReplicaToShard(collectionName, "shard1")
+ .setDataDir(dataDir)
+ .process(cluster.getSolrClient()).isSuccess());
+
+ CollectionAdminRequest.deleteCollection(collectionName)
+ .process(cluster.getSolrClient());
+
+ assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+
+ CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+ .process(cluster.getSolrClient());
+
+ assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+ }
+
+ @Test
+ public void deleteCollectionOnlyInZk() throws Exception {
+ final String collectionName = "onlyinzk";
+
+ // create the collections node, but nothing else
+ cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
+
+ // delete via API - should remove collections node
+ CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
+ assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+
+ // now creating that collection should work
+ CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+ .process(cluster.getSolrClient());
+ assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+ }
+
+ @Test
+ public void testBadActionNames() {
+ // try a bad action
+ ModifiableSolrParams params = new ModifiableSolrParams();
+ params.set("action", "BADACTION");
+ String collectionName = "badactioncollection";
+ params.set("name", collectionName);
+ params.set("numShards", 2);
+ final QueryRequest request = new QueryRequest(params);
+ request.setPath("/admin/collections");
+
+ expectThrows(Exception.class, () -> {
+ cluster.getSolrClient().request(request);
+ });
+ }
+
+ @Test
+ public void testMissingRequiredParameters() {
+ ModifiableSolrParams params = new ModifiableSolrParams();
+ params.set("action", CollectionAction.CREATE.toString());
+ params.set("numShards", 2);
+ // missing required collection parameter
+ final QueryRequest request = new QueryRequest(params);
+ request.setPath("/admin/collections");
+
+ expectThrows(Exception.class, () -> {
+ cluster.getSolrClient().request(request);
+ });
+ }
+
+ @Test
+ public void testMissingNumShards() {
+ // No numShards should fail
+ ModifiableSolrParams params = new ModifiableSolrParams();
+ params.set("action", CollectionAction.CREATE.toString());
+ params.set("name", "acollection");
+ params.set(REPLICATION_FACTOR, 10);
+ params.set("collection.configName", "conf");
+
+ final QueryRequest request = new QueryRequest(params);
+ request.setPath("/admin/collections");
+
+ expectThrows(Exception.class, () -> {
+ cluster.getSolrClient().request(request);
+ });
+ }
+
+ @Test
+ public void testZeroNumShards() {
+ ModifiableSolrParams params = new ModifiableSolrParams();
+ params.set("action", CollectionAction.CREATE.toString());
+ params.set("name", "acollection");
+ params.set(REPLICATION_FACTOR, 10);
+ params.set("numShards", 0);
+ params.set("collection.configName", "conf");
+
+ final QueryRequest request = new QueryRequest(params);
+ request.setPath("/admin/collections");
+ expectThrows(Exception.class, () -> {
+ cluster.getSolrClient().request(request);
+ });
+ }
+
+ @Test
+ public void testCreateShouldFailOnExistingCore() throws Exception {
+ String nn1 = cluster.getJettySolrRunner(0).getNodeName();
+ String nn2 = cluster.getJettySolrRunner(1).getNodeName();
+
+ assertEquals(0, CollectionAdminRequest.createCollection("halfcollectionblocker", "conf", 1, 1)
+ .setCreateNodeSet("")
+ .process(cluster.getSolrClient()).getStatus());
+ assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker", "shard1")
+ .setNode(cluster.getJettySolrRunner(0).getNodeName())
+ .setCoreName("halfcollection_shard1_replica_n1")
+ .process(cluster.getSolrClient()).isSuccess());
+
+ assertEquals(0, CollectionAdminRequest.createCollection("halfcollectionblocker2", "conf",1, 1)
+ .setCreateNodeSet("")
+ .process(cluster.getSolrClient()).getStatus());
+ assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker2", "shard1")
+ .setNode(cluster.getJettySolrRunner(1).getNodeName())
+ .setCoreName("halfcollection_shard1_replica_n1")
+ .process(cluster.getSolrClient()).isSuccess());
+
+ expectThrows(BaseHttpSolrClient.RemoteSolrException.class, () -> {
+ CollectionAdminRequest.createCollection("halfcollection", "conf", 1, 1)
+ .setCreateNodeSet(nn1 + "," + nn2)
+ .process(cluster.getSolrClient());
+ });
+ }
+
+ @Test
+ public void testNoConfigSetExist() throws Exception {
+ expectThrows(Exception.class, () -> {
+ CollectionAdminRequest.createCollection("noconfig", "conf123", 1, 1)
+ .process(cluster.getSolrClient());
+ });
+
+ TimeUnit.MILLISECONDS.sleep(1000);
+ // in both cases, the collection should have default to the core name
+ cluster.getSolrClient().getZkStateReader().forceUpdateCollection("noconfig");
+ assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains("noconfig"));
+ }
+
+ @Test
+ public void testCoresAreDistributedAcrossNodes() throws Exception {
+ CollectionAdminRequest.createCollection("nodes_used_collection", "conf", 2, 2)
+ .process(cluster.getSolrClient());
+
+ Set liveNodes = cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes();
+
+ List createNodeList = new ArrayList<>(liveNodes);
+
+ DocCollection collection = getCollectionState("nodes_used_collection");
+ for (Slice slice : collection.getSlices()) {
+ for (Replica replica : slice.getReplicas()) {
+ createNodeList.remove(replica.getNodeName());
+ }
+ }
+
+ assertEquals(createNodeList.toString(), 0, createNodeList.size());
+ }
+
+ @Test
+ public void testDeleteNonExistentCollection() throws Exception {
+
+ expectThrows(SolrException.class, () -> {
+ CollectionAdminRequest.deleteCollection("unknown_collection").process(cluster.getSolrClient());
+ });
+
+ // create another collection should still work
+ CollectionAdminRequest.createCollection("acollectionafterbaddelete", "conf", 1, 2)
+ .process(cluster.getSolrClient());
+ waitForState("Collection creation after a bad delete failed", "acollectionafterbaddelete",
+ (n, c) -> DocCollection.isFullyActive(n, c, 1, 2));
+ }
+
+ @Test
+ public void testSpecificConfigsets() throws Exception {
+ CollectionAdminRequest.createCollection("withconfigset2", "conf2", 1, 1).process(cluster.getSolrClient());
+ String configName = cluster.getSolrClient().getClusterStateProvider().getCollection("withconfigset2").getConfigName();
+ assertEquals("conf2", configName);
+ }
+
+ @Test
+ public void testCreateNodeSet() throws Exception {
+ JettySolrRunner jetty1 = cluster.getRandomJetty(random());
+ JettySolrRunner jetty2 = cluster.getRandomJetty(random());
+
+ List baseUrls = ImmutableList.of(jetty1.getBaseUrl().toString(), jetty2.getBaseUrl().toString());
+
+ CollectionAdminRequest.createCollection("nodeset_collection", "conf", 2, 1)
+ .setCreateNodeSet(baseUrls.get(0) + "," + baseUrls.get(1))
+ .process(cluster.getSolrClient());
+
+ DocCollection collectionState = getCollectionState("nodeset_collection");
+ for (Replica replica : collectionState.getReplicas()) {
+ String replicaUrl = replica.getCoreUrl();
+ boolean matchingJetty = false;
+ for (String jettyUrl : baseUrls) {
+ if (replicaUrl.startsWith(jettyUrl)) {
+ matchingJetty = true;
+ }
+ }
+ if (matchingJetty == false) {
+ fail("Expected replica to be on " + baseUrls + " but was on " + replicaUrl);
+ }
+ }
+ }
+
+ @Test
+ //28-June-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028")
+ // See: https://issues.apache.org/jira/browse/SOLR-12028 Tests cannot remove files on Windows machines occasionally
+ // commented out on: 24-Dec-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 09-Aug-2018 SOLR-12028
+ public void testCollectionsAPI() throws Exception {
+
+ // create new collections rapid fire
+ int cnt = random().nextInt(TEST_NIGHTLY ? 3 : 1) + 1;
+ CollectionAdminRequest.Create[] createRequests = new CollectionAdminRequest.Create[cnt];
+
+ class Coll {
+ String name;
+ int numShards;
+ int replicationFactor;
+ }
+
+ List colls = new ArrayList<>();
+
+ for (int i = 0; i < cnt; i++) {
+
+ int numShards = TestUtil.nextInt(random(), 0, cluster.getJettySolrRunners().size()) + 1;
+ int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1;
+
+ createRequests[i]
+ = CollectionAdminRequest.createCollection("awhollynewcollection_" + i, "conf2", numShards, replicationFactor);
+ createRequests[i].processAsync(cluster.getSolrClient());
+
+ Coll coll = new Coll();
+ coll.name = "awhollynewcollection_" + i;
+ coll.numShards = numShards;
+ coll.replicationFactor = replicationFactor;
+ colls.add(coll);
+ }
+
+ for (Coll coll : colls) {
+ cluster.waitForActiveCollection(coll.name, coll.numShards, coll.numShards * coll.replicationFactor);
+ }
+
+ waitForStable(cnt, createRequests);
+
+ for (int i = 0; i < cluster.getJettySolrRunners().size(); i++) {
+ checkInstanceDirs(cluster.getJettySolrRunner(i));
+ }
+
+ String collectionName = createRequests[random().nextInt(createRequests.length)].getCollectionName();
+
+ // TODO: we should not need this...beast test well when trying to fix
+ Thread.sleep(1000);
+
+ cluster.getSolrClient().getZkStateReader().forciblyRefreshAllClusterStateSlow();
+
+ new UpdateRequest()
+ .add("id", "6")
+ .add("id", "7")
+ .add("id", "8")
+ .commit(cluster.getSolrClient(), collectionName);
+ long numFound = 0;
+ TimeOut timeOut = new TimeOut(10, TimeUnit.SECONDS, TimeSource.NANO_TIME);
+ while (!timeOut.hasTimedOut()) {
+
+ numFound = cluster.getSolrClient().query(collectionName, new SolrQuery("*:*")).getResults().getNumFound();
+ if (numFound == 3) {
+ break;
+ }
+
+ Thread.sleep(500);
+ }
+
+ if (timeOut.hasTimedOut()) {
+ fail("Timeout waiting to see 3 found, instead saw " + numFound + " for collection " + collectionName);
+ }
+
+ checkNoTwoShardsUseTheSameIndexDir();
+ }
+
+ private void waitForStable(int cnt, CollectionAdminRequest.Create[] createRequests) throws InterruptedException {
+ for (int i = 0; i < cnt; i++) {
+ String collectionName = "awhollynewcollection_" + i;
+ final int j = i;
+ waitForState("Expected to see collection " + collectionName, collectionName,
+ (n, c) -> {
+ CollectionAdminRequest.Create req = createRequests[j];
+ return DocCollection.isFullyActive(n, c, req.getNumShards(), req.getReplicationFactor());
+ });
+
+ ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+ // make sure we have leaders for each shard
+ for (int z = 1; z < createRequests[j].getNumShards(); z++) {
+ zkStateReader.getLeaderRetry(collectionName, "shard" + z, 10000);
+ } // make sure we again have leaders for each shard
+ }
+ }
+
+ @Test
+ public void testCollectionReload() throws Exception {
+ final String collectionName = "reloaded_collection";
+ CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2).process(cluster.getSolrClient());
+
+ // get core open times
+ Map urlToTimeBefore = new HashMap<>();
+ collectStartTimes(collectionName, urlToTimeBefore);
+ assertTrue(urlToTimeBefore.size() > 0);
+
+ CollectionAdminRequest.reloadCollection(collectionName).processAsync(cluster.getSolrClient());
+
+ // reloads make take a short while
+ boolean allTimesAreCorrect = waitForReloads(collectionName, urlToTimeBefore);
+ assertTrue("some core start times did not change on reload", allTimesAreCorrect);
+ }
+
+ private void checkInstanceDirs(JettySolrRunner jetty) throws IOException {
+ CoreContainer cores = jetty.getCoreContainer();
+ Collection theCores = cores.getCores();
+ for (SolrCore core : theCores) {
+ // look for core props file
+ Path instancedir = core.getInstancePath();
+ assertTrue("Could not find expected core.properties file", Files.exists(instancedir.resolve("core.properties")));
+
+ Path expected = Paths.get(jetty.getSolrHome()).toAbsolutePath().resolve(core.getName());
+
+ assertTrue("Expected: " + expected + "\nFrom core stats: " + instancedir, Files.isSameFile(expected, instancedir));
+ }
+ }
+
+ private boolean waitForReloads(String collectionName, Map urlToTimeBefore) throws SolrServerException, IOException {
+ TimeOut timeout = new TimeOut(45, TimeUnit.SECONDS, TimeSource.NANO_TIME);
+
+ boolean allTimesAreCorrect = false;
+ while (! timeout.hasTimedOut()) {
+ Map urlToTimeAfter = new HashMap<>();
+ collectStartTimes(collectionName, urlToTimeAfter);
+
+ boolean retry = false;
+ Set> entries = urlToTimeBefore.entrySet();
+ for (Entry entry : entries) {
+ Long beforeTime = entry.getValue();
+ Long afterTime = urlToTimeAfter.get(entry.getKey());
+ assertNotNull(afterTime);
+ if (afterTime <= beforeTime) {
+ retry = true;
+ break;
+ }
+
+ }
+ if (!retry) {
+ allTimesAreCorrect = true;
+ break;
+ }
+ }
+ return allTimesAreCorrect;
+ }
+
+ private void collectStartTimes(String collectionName, Map urlToTime)
+ throws SolrServerException, IOException {
+
+ DocCollection collectionState = getCollectionState(collectionName);
+ if (collectionState != null) {
+ for (Slice shard : collectionState) {
+ for (Replica replica : shard) {
+ ZkCoreNodeProps coreProps = new ZkCoreNodeProps(replica);
+ CoreStatus coreStatus;
+ try (HttpSolrClient server = getHttpSolrClient(coreProps.getBaseUrl())) {
+ coreStatus = CoreAdminRequest.getCoreStatus(coreProps.getCoreName(), false, server);
+ }
+ long before = coreStatus.getCoreStartTime().getTime();
+ urlToTime.put(coreProps.getCoreUrl(), before);
+ }
+ }
+ } else {
+ throw new IllegalArgumentException("Could not find collection " + collectionName);
+ }
+ }
+
+ private void checkNoTwoShardsUseTheSameIndexDir() {
+ Map> indexDirToShardNamesMap = new HashMap<>();
+
+ List servers = new LinkedList<>();
+ servers.add(ManagementFactory.getPlatformMBeanServer());
+ servers.addAll(MBeanServerFactory.findMBeanServer(null));
+ for (final MBeanServer server : servers) {
+ Set mbeans = new HashSet<>(server.queryNames(null, null));
+ for (final ObjectName mbean : mbeans) {
+ try {
+ Map props = mbean.getKeyPropertyList();
+ String category = props.get("category");
+ String name = props.get("name");
+ if ((category != null && category.equals(Category.CORE.toString())) &&
+ (name != null && name.equals("indexDir"))) {
+ String indexDir = server.getAttribute(mbean, "Value").toString();
+ String key = props.get("dom2") + "." + props.get("dom3") + "." + props.get("dom4");
+ if (!indexDirToShardNamesMap.containsKey(indexDir)) {
+ indexDirToShardNamesMap.put(indexDir, new HashSet<>());
+ }
+ indexDirToShardNamesMap.get(indexDir).add(key);
+ }
+ } catch (Exception e) {
+ // ignore, just continue - probably a "Value" attribute
+ // not found
+ }
+ }
+ }
+
+ assertTrue(
+ "Something is broken in the assert for no shards using the same indexDir - probably something was changed in the attributes published in the MBean of "
+ + SolrCore.class.getSimpleName() + " : " + indexDirToShardNamesMap,
+ indexDirToShardNamesMap.size() > 0);
+ for (Entry> entry : indexDirToShardNamesMap.entrySet()) {
+ if (entry.getValue().size() > 1) {
+ fail("We have shards using the same indexDir. E.g. shards "
+ + entry.getValue().toString() + " all use indexDir "
+ + entry.getKey());
+ }
+ }
+ }
+
+ @Test
+ public void addReplicaTest() throws Exception {
+ String collectionName = "addReplicaColl";
+
+ CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2)
+ .process(cluster.getSolrClient());
+ cluster.waitForActiveCollection(collectionName, 2, 4);
+
+ ArrayList nodeList
+ = new ArrayList<>(cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes());
+ Collections.shuffle(nodeList, random());
+
+ CollectionAdminResponse response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+ .setNode(nodeList.get(0))
+ .process(cluster.getSolrClient());
+ Replica newReplica = grabNewReplica(response, getCollectionState(collectionName));
+
+ assertEquals("Replica should be created on the right node",
+ cluster.getSolrClient().getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)), newReplica.getBaseUrl());
+
+ Path instancePath = createTempDir();
+ response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+ .withProperty(CoreAdminParams.INSTANCE_DIR, instancePath.toString())
+ .process(cluster.getSolrClient());
+ newReplica = grabNewReplica(response, getCollectionState(collectionName));
+ assertNotNull(newReplica);
+
+ try (HttpSolrClient coreclient = getHttpSolrClient(newReplica.getBaseUrl())) {
+ CoreAdminResponse status = CoreAdminRequest.getStatus(newReplica.getStr("core"), coreclient);
+ NamedList coreStatus = status.getCoreStatus(newReplica.getStr("core"));
+ String instanceDirStr = (String) coreStatus.get("instanceDir");
+ assertEquals(instanceDirStr, instancePath.toString());
+ }
+
+ //Test to make sure we can't create another replica with an existing core_name of that collection
+ String coreName = newReplica.getStr(CORE_NAME_PROP);
+ SolrException e = expectThrows(SolrException.class, () -> {
+ ModifiableSolrParams params = new ModifiableSolrParams();
+ params.set("action", "addreplica");
+ params.set("collection", collectionName);
+ params.set("shard", "shard1");
+ params.set("name", coreName);
+ QueryRequest request = new QueryRequest(params);
+ request.setPath("/admin/collections");
+ cluster.getSolrClient().request(request);
+ });
+
+ assertTrue(e.getMessage().contains("Another replica with the same core name already exists for this collection"));
+
+ // Check that specifying property.name works. DO NOT remove this when the "name" property is deprecated
+ // for ADDREPLICA, this is "property.name". See SOLR-7132
+ response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+ .withProperty(CoreAdminParams.NAME, "propertyDotName")
+ .process(cluster.getSolrClient());
+
+ newReplica = grabNewReplica(response, getCollectionState(collectionName));
+ assertEquals("'core' should be 'propertyDotName' ", "propertyDotName", newReplica.getStr("core"));
+ }
+
+ private Replica grabNewReplica(CollectionAdminResponse response, DocCollection docCollection) {
+ String replicaName = response.getCollectionCoresStatus().keySet().iterator().next();
+ Optional optional = docCollection.getReplicas().stream()
+ .filter(replica -> replicaName.equals(replica.getCoreName()))
+ .findAny();
+ if (optional.isPresent()) {
+ return optional.get();
+ }
+ throw new AssertionError("Can not find " + replicaName + " from " + docCollection);
+ }
+}
diff --git a/solr/core/src/test/org/apache/solr/handler/BackupRestoreUtils.java b/solr/test-framework/src/java/org/apache/solr/handler/BackupRestoreUtils.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/handler/BackupRestoreUtils.java
rename to solr/test-framework/src/java/org/apache/solr/handler/BackupRestoreUtils.java
diff --git a/solr/core/src/test/org/apache/solr/handler/BackupStatusChecker.java b/solr/test-framework/src/java/org/apache/solr/handler/BackupStatusChecker.java
similarity index 97%
rename from solr/core/src/test/org/apache/solr/handler/BackupStatusChecker.java
rename to solr/test-framework/src/java/org/apache/solr/handler/BackupStatusChecker.java
index 89d99db648c..1db896811a2 100644
--- a/solr/core/src/test/org/apache/solr/handler/BackupStatusChecker.java
+++ b/solr/test-framework/src/java/org/apache/solr/handler/BackupStatusChecker.java
@@ -166,7 +166,7 @@ public String waitForDifferentBackupDir(final String directoryName, final TimeOu
* if the the most recently reported status to the a particular backup request.
*
*
- * @returns the "directoryName" of the backup if the response indicates that a is completed successfully, otherwise null
+ * @return the "directoryName" of the backup if the response indicates that a is completed successfully, otherwise null
*/
public String checkBackupSuccess() throws Exception {
return _checkBackupSuccess(null);
@@ -179,7 +179,7 @@ public String checkBackupSuccess() throws Exception {
* (The Replication Handler API does not make it possible to know which backup
* this exception was related to)
*
- * @returns the "directoryName" of the backup if the response indicates that the specified backupName is completed successfully, otherwise null
+ * @return the "directoryName" of the backup if the response indicates that the specified backupName is completed successfully, otherwise null
* @see #waitForBackupSuccess(String,TimeOut)
*/
public String checkBackupSuccess(final String backupName) throws Exception {
@@ -260,7 +260,7 @@ public void waitForBackupDeletionSuccess(final String backupName, final TimeOut
* Throws a test assertion failure if the status is about this backupName but the starts message
* with "Unable to delete"
*
- * @returns true if the replication status info indicates the backup was deleted, false otherwise
+ * @return true if the replication status info indicates the backup was deleted, false otherwise
* @see #waitForBackupDeletionSuccess(String,TimeOut)
*/
public boolean checkBackupDeletionSuccess(final String backupName) throws Exception {
diff --git a/solr/test-framework/src/java/org/apache/solr/handler/TestRestoreCoreUtil.java b/solr/test-framework/src/java/org/apache/solr/handler/TestRestoreCoreUtil.java
new file mode 100644
index 00000000000..210b2649026
--- /dev/null
+++ b/solr/test-framework/src/java/org/apache/solr/handler/TestRestoreCoreUtil.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.handler;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URL;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.io.IOUtils;
+import org.junit.Assert;
+
+public class TestRestoreCoreUtil {
+ public static boolean fetchRestoreStatus (String baseUrl, String coreName) throws IOException {
+ String leaderUrl = baseUrl + "/" + coreName +
+ ReplicationHandler.PATH + "?wt=xml&command=" + ReplicationHandler.CMD_RESTORE_STATUS;
+ final Pattern pException = Pattern.compile("(.*?)");
+
+ InputStream stream = null;
+ try {
+ URL url = new URL(leaderUrl);
+ stream = url.openStream();
+ String response = IOUtils.toString(stream, "UTF-8");
+ Matcher matcher = pException.matcher(response);
+ if(matcher.find()) {
+ Assert.fail("Failed to complete restore action with exception " + matcher.group(1));
+ }
+ if(response.contains("success")) {
+ return true;
+ } else if (response.contains("failed")){
+ Assert.fail("Restore Failed");
+ }
+ stream.close();
+ } finally {
+ IOUtils.closeQuietly(stream);
+ }
+ return false;
+ }
+}
diff --git a/solr/test-framework/src/java/org/apache/solr/handler/package.html b/solr/test-framework/src/java/org/apache/solr/handler/package.html
new file mode 100644
index 00000000000..553e5c1484c
--- /dev/null
+++ b/solr/test-framework/src/java/org/apache/solr/handler/package.html
@@ -0,0 +1,24 @@
+
+
+
+
+
+Concrete implementations of org.apache.solr.request.SolrRequestHandler
+
+
+
diff --git a/solr/core/src/test/org/apache/solr/schema/MockExchangeRateProvider.java b/solr/test-framework/src/java/org/apache/solr/schema/MockExchangeRateProvider.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/schema/MockExchangeRateProvider.java
rename to solr/test-framework/src/java/org/apache/solr/schema/MockExchangeRateProvider.java
diff --git a/solr/core/src/test/org/apache/solr/search/facet/DebugAgg.java b/solr/test-framework/src/java/org/apache/solr/search/facet/DebugAgg.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/search/facet/DebugAgg.java
rename to solr/test-framework/src/java/org/apache/solr/search/facet/DebugAgg.java
diff --git a/solr/core/src/test/org/apache/solr/search/function/NvlValueSourceParser.java b/solr/test-framework/src/java/org/apache/solr/search/function/NvlValueSourceParser.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/search/function/NvlValueSourceParser.java
rename to solr/test-framework/src/java/org/apache/solr/search/function/NvlValueSourceParser.java
diff --git a/solr/core/src/test/org/apache/solr/search/similarities/CustomSimilarityFactory.java b/solr/test-framework/src/java/org/apache/solr/search/similarities/CustomSimilarityFactory.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/search/similarities/CustomSimilarityFactory.java
rename to solr/test-framework/src/java/org/apache/solr/search/similarities/CustomSimilarityFactory.java
diff --git a/solr/core/src/test/org/apache/solr/search/similarities/MockConfigurableSimilarity.java b/solr/test-framework/src/java/org/apache/solr/search/similarities/MockConfigurableSimilarity.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/search/similarities/MockConfigurableSimilarity.java
rename to solr/test-framework/src/java/org/apache/solr/search/similarities/MockConfigurableSimilarity.java
diff --git a/solr/core/src/test/org/apache/solr/util/MockCoreContainer.java b/solr/test-framework/src/java/org/apache/solr/util/MockCoreContainer.java
similarity index 100%
rename from solr/core/src/test/org/apache/solr/util/MockCoreContainer.java
rename to solr/test-framework/src/java/org/apache/solr/util/MockCoreContainer.java
diff --git a/versions.lock b/versions.lock
index 66028799107..064bff22d96 100644
--- a/versions.lock
+++ b/versions.lock
@@ -31,7 +31,7 @@ com.google.cloud:google-cloud-storage:1.113.14 (2 constraints: 10143aa3)
com.google.code.findbugs:annotations:3.0.1 (1 constraints: 0605fb35)
com.google.code.findbugs:jsr305:3.0.2 (9 constraints: 057f5545)
com.google.code.gson:gson:2.8.6 (4 constraints: 94389e56)
-com.google.errorprone:error_prone_annotations:2.10.0 (5 constraints: 243d8c14)
+com.google.errorprone:error_prone_annotations:2.10.0 (6 constraints: 4c4d2718)
com.google.guava:failureaccess:1.0.1 (1 constraints: e60fd595)
com.google.guava:guava:25.1-jre (2 constraints: 0515c2b9)
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava (1 constraints: 8f1d45cb)
@@ -186,7 +186,7 @@ org.carrot2:morfologik-polish:2.1.5 (1 constraints: cf12501e)
org.carrot2:morfologik-stemming:2.1.5 (2 constraints: d01f1300)
org.ccil.cowan.tagsoup:tagsoup:1.2.1 (1 constraints: 0605f735)
org.checkerframework:checker-compat-qual:2.5.5 (1 constraints: f00fed95)
-org.checkerframework:checker-qual:3.10.0 (4 constraints: 082ba36f)
+org.checkerframework:checker-qual:3.19.0 (5 constraints: 3a3b3860)
org.codehaus.janino:commons-compiler:3.0.11 (2 constraints: 81192719)
org.codehaus.janino:janino:3.0.11 (1 constraints: 8d0d3f3a)
org.codehaus.mojo:animal-sniffer-annotations:1.14 (1 constraints: ea09d5aa)