Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.QuickPatchThreadsFilter;
import org.apache.solr.SolrIgnoredThreadsFilter;
import org.apache.solr.client.solrj.SolrQuery;
Expand All @@ -37,6 +38,7 @@
QuickPatchThreadsFilter.class,
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
@LuceneTestCase.AwaitsFix(bugUrl = "SOLR-15405")
public class HDFSCollectionsAPITest extends SolrCloudTestCase {

private static MiniDFSCluster dfsCluster;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.lucene.index.BaseTestCheckIndex;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.QuickPatchThreadsFilter;
import org.apache.solr.SolrIgnoredThreadsFilter;
import org.apache.solr.SolrTestCaseJ4;
Expand All @@ -49,6 +50,7 @@
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
@SolrTestCaseJ4.SuppressSSL
@LuceneTestCase.AwaitsFix(bugUrl = "SOLR-15405")
// commented out on: 24-Dec-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
public class CheckHdfsIndexTest extends AbstractFullDistribZkTestBase {
private static MiniDFSCluster dfsCluster;
Expand Down
38 changes: 20 additions & 18 deletions solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.QuickPatchThreadsFilter;
import org.apache.solr.SolrIgnoredThreadsFilter;
import org.apache.solr.SolrTestCaseJ4;
Expand Down Expand Up @@ -68,6 +69,7 @@
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
// TODO: longer term this should be combined with TestRecovery somehow ??
@LuceneTestCase.AwaitsFix(bugUrl = "SOLR-15405")
public class TestRecoveryHdfs extends SolrTestCaseJ4 {
// means that we've seen the leader and have version info (i.e. we are a non-leader replica)
private static final String FROM_LEADER = DistribPhase.FROMLEADER.toString();
Expand All @@ -78,17 +80,17 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
private static MiniDFSCluster dfsCluster;
private static String hdfsUri;
private static FileSystem fs;

@After
public void afterTest() {
TestInjection.reset(); // do after every test, don't wait for AfterClass
}

@BeforeClass
public static void beforeClass() throws Exception {
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
hdfsUri = HdfsTestUtil.getURI(dfsCluster);

try {
URI uri = new URI(hdfsUri);
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
Expand All @@ -98,10 +100,10 @@ public static void beforeClass() throws Exception {
}

System.setProperty("solr.ulog.dir", hdfsUri + "/solr/shard1");

initCore("solrconfig-tlog.xml","schema15.xml");
}

@AfterClass
public static void afterClass() throws Exception {
IOUtils.closeQuietly(fs);
Expand All @@ -123,10 +125,10 @@ public static void afterClass() throws Exception {

@Test
public void testReplicationFactor() throws Exception {
clearIndex();
clearIndex();

HdfsUpdateLog ulog = (HdfsUpdateLog) h.getCore().getUpdateHandler().getUpdateLog();

assertU(commit());
addAndGetVersion(sdoc("id", "REP1"), null);
assertU(commit());
Expand All @@ -140,10 +142,10 @@ public void testReplicationFactor() throws Exception {
break;
}
}

assertTrue("Expected to find tlogs with a replication factor of 2", foundRep2);
}

@Test
public void testLogReplay() throws Exception {
try {
Expand Down Expand Up @@ -650,7 +652,7 @@ public void testCleanShutdown() throws Exception {
req().close();
}
}

private void addDocs(int nDocs, int start, LinkedList<Long> versions) throws Exception {
for (int i=0; i<nDocs; i++) {
versions.addFirst( addAndGetVersion( sdoc("id",Integer.toString(start + nDocs)) , null) );
Expand Down Expand Up @@ -679,7 +681,7 @@ public void testRemoveOldLogs() throws Exception {
assertU(commit());

String logDir = h.getCore().getUpdateHandler().getUpdateLog().getLogDir();

h.close();

String[] files = HdfsUpdateLog.getLogList(fs, new Path(logDir));
Expand Down Expand Up @@ -788,16 +790,16 @@ public void testTruncatedLog() throws Exception {
assertU(adoc("id","F1"));
assertU(adoc("id","F2"));
assertU(adoc("id","F3"));

h.close();





String[] files = HdfsUpdateLog.getLogList(fs, new Path(logDir));
Arrays.sort(files);

FSDataOutputStream dos = fs.append(new Path(logDir, files[files.length-1]));

dos.writeLong(0xffffffffffffffffL);
dos.writeChars("This should be appended to a good log file, representing a bad partially written record.");
dos.close();
Expand Down Expand Up @@ -837,7 +839,7 @@ public void testCorruptLog() throws Exception {
TestInjection.skipIndexWriterCommitOnClose = true;

String logDir = h.getCore().getUpdateHandler().getUpdateLog().getLogDir();

clearIndex();
assertU(commit());

Expand Down Expand Up @@ -970,7 +972,7 @@ private static void findReplace(byte[] from, byte[] to, byte[] data) {
System.arraycopy(to, 0, data, idx, to.length);
}
}

private static int indexOf(byte[] target, byte[] data, int start) {
outer: for (int i=start; i<data.length - target.length; i++) {
for (int j=0; j<target.length; j++) {
Expand Down
18 changes: 10 additions & 8 deletions solr/core/src/test/org/apache/solr/update/TestHdfsUpdateLog.java
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.QuickPatchThreadsFilter;
import org.apache.solr.SolrIgnoredThreadsFilter;
import org.apache.solr.SolrTestCaseJ4;
Expand All @@ -41,16 +42,17 @@
QuickPatchThreadsFilter.class,
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
@LuceneTestCase.AwaitsFix(bugUrl = "SOLR-15405")
public class TestHdfsUpdateLog extends SolrTestCaseJ4 {
private static MiniDFSCluster dfsCluster;
private static String hdfsUri;
private static FileSystem fs;

@BeforeClass
public static void beforeClass() throws Exception {
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
hdfsUri = HdfsTestUtil.getURI(dfsCluster);

try {
URI uri = new URI(hdfsUri);
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
Expand All @@ -60,10 +62,10 @@ public static void beforeClass() throws Exception {
}

System.setProperty("solr.ulog.dir", hdfsUri + "/solr/shard1");

initCore("solrconfig-tlog.xml","schema15.xml");
}

@AfterClass
public static void afterClass() throws Exception {
IOUtils.closeQuietly(fs);
Expand All @@ -90,14 +92,14 @@ public void testFSThreadSafety() throws Exception {
((DirectUpdateHandler2) uhandler).getCommitTracker().setTimeUpperBound(100);
((DirectUpdateHandler2) uhandler).getCommitTracker().setOpenSearcher(false);
final UpdateLog ulog = uhandler.getUpdateLog();

clearIndex();
assertU(commit());

// we hammer on init in a background thread to make
// sure we don't run into any filesystem already closed
// problems (SOLR-7113)

Thread thread = new Thread() {
public void run() {
int cnt = 0;
Expand All @@ -114,7 +116,7 @@ public void run() {
}
}
};

Thread thread2 = new Thread() {
public void run() {
int cnt = 0;
Expand Down