Skip to content

Commit

Permalink
Fix integration tests (#37)
Browse files Browse the repository at this point in the history
* Due to changes in Accumulo
* Commented out use of DistributedTrace
* Updated ITs due to changes in TestIngest
  and ConfigurableMacBase
  • Loading branch information
mikewalch committed Mar 12, 2019
1 parent f89f33d commit 3f3586b
Show file tree
Hide file tree
Showing 5 changed files with 23 additions and 28 deletions.
Expand Up @@ -32,7 +32,7 @@
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.trace.DistributedTrace;
// import org.apache.accumulo.core.trace.DistributedTrace;
import org.apache.accumulo.examples.cli.ClientOnDefaultTable;
import org.apache.accumulo.examples.cli.ScannerOpts;
import org.apache.htrace.Sampler;
Expand Down Expand Up @@ -74,7 +74,7 @@ private TracingExample(AccumuloClient client) {
}

private void enableTracing() {
DistributedTrace.enable("myHost", "myApp");
// DistributedTrace.enable("myHost", "myApp");
}

private void execute(Opts opts) throws TableNotFoundException, AccumuloException,
Expand Down
28 changes: 10 additions & 18 deletions src/test/java/org/apache/accumulo/examples/ExamplesIT.java
Expand Up @@ -34,7 +34,6 @@
import java.util.Map.Entry;
import java.util.concurrent.TimeUnit;

import org.apache.accumulo.core.cli.BatchWriterOpts;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchScanner;
import org.apache.accumulo.core.client.BatchWriter;
Expand Down Expand Up @@ -71,31 +70,27 @@
import org.apache.accumulo.minicluster.MemoryUnit;
import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
import org.apache.accumulo.test.TestIngest;
import org.apache.accumulo.test.TestIngest.IngestParams;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.google.common.collect.Iterators;

public class ExamplesIT extends AccumuloClusterHarness {
private static final Logger log = LoggerFactory.getLogger(ExamplesIT.class);
private static final BatchWriterOpts bwOpts = new BatchWriterOpts();
private static final BatchWriterConfig bwc = new BatchWriterConfig();
private static final String visibility = "A|B";
private static final String auths = "A,B";

AccumuloClient c;
BatchWriter bw;
IteratorSetting is;
String dir;
FileSystem fs;
Authorizations origAuths;
private AccumuloClient c;
private BatchWriter bw;
private IteratorSetting is;
private String dir;
private FileSystem fs;
private Authorizations origAuths;

@Override
public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopConf) {
Expand Down Expand Up @@ -245,13 +240,10 @@ public void testMaxMutationConstraint() throws Exception {
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
c.tableOperations().addConstraint(tableName, MaxMutationSize.class.getName());
TestIngest.Opts opts = new TestIngest.Opts();
opts.rows = 1;
opts.cols = 1000;
opts.setTableName(tableName);
opts.setPrincipal(getAdminPrincipal());
IngestParams params = new IngestParams(c.properties(), tableName, 1);
params.cols = 1000;
try {
TestIngest.ingest(c, opts, bwOpts);
TestIngest.ingest(c, params);
} catch (MutationsRejectedException ex) {
assertEquals(1, ex.getConstraintViolationSummaries().size());
}
Expand Down
Expand Up @@ -22,6 +22,7 @@
import java.util.ArrayList;
import java.util.Map.Entry;

import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.BatchWriterConfig;
Expand Down Expand Up @@ -55,7 +56,7 @@ protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSit
@Before
public void setupInstance() throws Exception {
tableName = getUniqueNames(1)[0];
client = createClient();
client = Accumulo.newClient().from(getClientProperties()).build();
client.tableOperations().create(tableName);
BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
ColumnVisibility cv = new ColumnVisibility();
Expand Down
Expand Up @@ -119,7 +119,7 @@ public static class TestMapper

@Override
protected void map(List<Entry<Key,Value>> key, InputStream value, Context context)
throws IOException, InterruptedException {
throws IOException {
String table = context.getConfiguration().get("MRTester_tableName");
assertNotNull(table);

Expand Down
Expand Up @@ -22,11 +22,13 @@
import java.util.Base64;
import java.util.Collections;
import java.util.Map.Entry;
import java.util.Properties;

import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.BatchWriterConfig;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.conf.ClientProperty;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
Expand Down Expand Up @@ -65,12 +67,13 @@ protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSit
@Test
public void test() throws Exception {
String confFile = System.getProperty("user.dir") + "/target/accumulo-client.properties";
String instance = getClientInfo().getInstanceName();
String keepers = getClientInfo().getZooKeepers();
Properties props = getClientProperties();
String instance = ClientProperty.INSTANCE_NAME.getValue(props);
String keepers = ClientProperty.INSTANCE_ZOOKEEPERS.getValue(props);
ExamplesIT.writeClientPropsFile(confFile, instance, keepers, "root", ROOT_PASSWORD);
try (AccumuloClient client = createClient()) {
try (AccumuloClient client = Accumulo.newClient().from(props).build()) {
client.tableOperations().create(tablename);
BatchWriter bw = client.createBatchWriter(tablename, new BatchWriterConfig());
BatchWriter bw = client.createBatchWriter(tablename);
for (int i = 0; i < 10; i++) {
Mutation m = new Mutation("" + i);
m.put(input_cf, input_cq, "row" + i);
Expand All @@ -93,5 +96,4 @@ public void test() throws Exception {
}
}
}

}

0 comments on commit 3f3586b

Please sign in to comment.