Skip to content
Permalink
Browse files
Fix and remove brittle broken tests
  • Loading branch information
milleruntime committed Jan 29, 2019
1 parent 26efc49 commit 0b3174da0ebcd1deac7753deaf0da0ce7b8f4be2
Showing 2 changed files with 5 additions and 101 deletions.
@@ -33,10 +33,7 @@
import java.util.List;
import java.util.Map.Entry;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
import org.apache.accumulo.core.cli.BatchWriterOpts;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchScanner;
@@ -56,12 +53,8 @@
import org.apache.accumulo.examples.client.ReadWriteExample;
import org.apache.accumulo.examples.client.RowOperations;
import org.apache.accumulo.examples.client.SequentialBatchWriter;
import org.apache.accumulo.examples.client.TraceDumpExample;
import org.apache.accumulo.examples.client.TracingExample;
import org.apache.accumulo.examples.combiner.StatsCombiner;
import org.apache.accumulo.examples.constraints.MaxMutationSize;
import org.apache.accumulo.examples.dirlist.Ingest;
import org.apache.accumulo.examples.dirlist.QueryUtil;
import org.apache.accumulo.examples.helloworld.Insert;
import org.apache.accumulo.examples.helloworld.Read;
import org.apache.accumulo.examples.isolation.InterferenceTest;
@@ -76,11 +69,8 @@
import org.apache.accumulo.examples.shard.Reverse;
import org.apache.accumulo.harness.AccumuloClusterHarness;
import org.apache.accumulo.minicluster.MemoryUnit;
import org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl;
import org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl.LogWriter;
import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
import org.apache.accumulo.test.TestIngest;
import org.apache.accumulo.tracer.TraceServer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -162,94 +152,6 @@ public int defaultTimeoutSeconds() {
return 6 * 60;
}

@Test
public void testTrace() throws Exception {
Process trace = null;
if (ClusterType.MINI == getClusterType()) {
MiniAccumuloClusterImpl impl = (MiniAccumuloClusterImpl) cluster;
trace = impl.exec(TraceServer.class);
while (!c.tableOperations().exists("trace"))
sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
}
String[] args = new String[] {"-c", getClientPropsFile(), "--createtable", "--deletetable",
"--create"};
Entry<Integer,String> pair = cluster.getClusterControl().execWithStdout(TracingExample.class,
args);
Assert.assertEquals("Expected return code of zero. STDOUT=" + pair.getValue(), 0,
pair.getKey().intValue());
String result = pair.getValue();
Pattern pattern = Pattern.compile("TraceID: ([0-9a-f]+)");
Matcher matcher = pattern.matcher(result);
int count = 0;
while (matcher.find()) {
args = new String[] {"-c", getClientPropsFile(), "--traceid", matcher.group(1)};
pair = cluster.getClusterControl().execWithStdout(TraceDumpExample.class, args);
assertEquals(0, pair.getKey().intValue());
count++;
}
assertTrue(count > 0);
if (ClusterType.MINI == getClusterType() && null != trace) {
trace.destroy();
}
}

@Test
public void testDirList() throws Exception {
String[] names = getUniqueNames(3);
String dirTable = names[0], indexTable = names[1], dataTable = names[2];
String[] args;
String dirListDirectory;
switch (getClusterType()) {
case MINI:
dirListDirectory = ((MiniAccumuloClusterImpl) getCluster()).getConfig().getDir()
.getAbsolutePath();
break;
case STANDALONE:
dirListDirectory = ((StandaloneAccumuloCluster) getCluster()).getAccumuloHome();
break;
default:
throw new RuntimeException("Unknown cluster type");
}
assumeTrue(new File(dirListDirectory).exists());
// Index a directory listing on /tmp. If this is running against a standalone cluster, we can't
// guarantee Accumulo source will be there.
args = new String[] {"-c", getClientPropsFile(), "--dirTable", dirTable, "--indexTable",
indexTable, "--dataTable", dataTable, "--vis", visibility, "--chunkSize",
Integer.toString(10000), dirListDirectory};

Entry<Integer,String> entry = getClusterControl().execWithStdout(Ingest.class, args);
assertEquals("Got non-zero return code. Stdout=" + entry.getValue(), 0,
entry.getKey().intValue());

String expectedFile;
switch (getClusterType()) {
case MINI:
// Should be present in a minicluster dir
expectedFile = "accumulo-site.xml";
break;
case STANDALONE:
// Should be in place on standalone installs (not having to follow symlinks)
expectedFile = "LICENSE";
break;
default:
throw new RuntimeException("Unknown cluster type");
}

args = new String[] {"-c", getClientPropsFile(), "-t", indexTable, "--auths", auths, "--search",
"--path", expectedFile};
entry = getClusterControl().execWithStdout(QueryUtil.class, args);
if (ClusterType.MINI == getClusterType()) {
MiniAccumuloClusterImpl impl = (MiniAccumuloClusterImpl) cluster;
for (LogWriter writer : impl.getLogWriters()) {
writer.flush();
}
}

log.info("result " + entry.getValue());
assertEquals(0, entry.getKey().intValue());
assertTrue(entry.getValue().contains(expectedFile));
}

@Test
public void testAgeoffFilter() throws Exception {
String tableName = getUniqueNames(1)[0];
@@ -33,6 +33,7 @@
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.ExamplesIT;
import org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl;
import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
import org.apache.accumulo.test.functional.ConfigurableMacBase;
import org.apache.hadoop.conf.Configuration;
@@ -76,9 +77,10 @@ public void test() throws Exception {
bw.addMutation(m);
}
bw.close();
Process hash = getCluster().exec(RowHash.class, Collections.singletonList(hadoopTmpDirArg),
"-c", confFile, "-t", tablename, "--column", input_cfcq);
assertEquals(0, hash.waitFor());
MiniAccumuloClusterImpl.ProcessInfo hash = getCluster().exec(RowHash.class,
Collections.singletonList(hadoopTmpDirArg), "-c", confFile, "-t", tablename, "--column",
input_cfcq);
assertEquals(0, hash.getProcess().waitFor());

Scanner s = client.createScanner(tablename, Authorizations.EMPTY);
s.fetchColumn(new Text(input_cf), new Text(output_cq));

0 comments on commit 0b3174d

Please sign in to comment.