Skip to content
Permalink
Browse files
ACCUMULO-4804 Fixes to work with 2.0
  • Loading branch information
milleruntime committed Feb 14, 2018
2 parents e3616c4 + 752c623 commit 599536eb6f2e3e4f0280b166d60a60c6914f6c19
Show file tree
Hide file tree
Showing 7 changed files with 41 additions and 13 deletions.
@@ -24,10 +24,10 @@ test data are created in HDFS. After that the 1000 rows are ingested into
accumulo. Then we verify the 1000 rows are in accumulo.

$ PKG=org.apache.accumulo.examples.mapreduce.bulk
$ ARGS="-i instance -z zookeepers -u username -p password"
$ ARGS="-c examples.conf"
$ accumulo $PKG.SetupTable $ARGS -t test_bulk row_00000333 row_00000666
$ accumulo $PKG.GenerateTestData --start-row 0 --count 1000 --output bulk/test_1.txt
$ accumulo-util hadoop-jar target/accumulo-examples.jar $PKG.BulkIngestExample $ARGS -t test_bulk --inputDir bulk --workDir tmp/bulkWork
$ accumulo-util hadoop-jar target/accumulo-examples-X.Y.Z.jar $PKG.BulkIngestExample $ARGS -t test_bulk --inputDir bulk --workDir tmp/bulkWork
$ accumulo $PKG.VerifyIngest $ARGS -t test_bulk --start-row 0 --count 1000

For a high level discussion of bulk ingest, see the docs dir.
@@ -33,7 +33,7 @@
<description>Example code and corresponding documentation for using Apache Accumulo</description>

<properties>
<accumulo.version>1.8.1</accumulo.version>
<accumulo.version>2.0.0-SNAPSHOT</accumulo.version>
<hadoop.version>2.6.4</hadoop.version>
<slf4j.version>1.7.21</slf4j.version>
<maven.compiler.source>1.8</maven.compiler.source>
@@ -16,6 +16,7 @@
*/
package org.apache.accumulo.examples.cli;

import java.io.File;
import java.time.Duration;

import org.apache.accumulo.core.client.AccumuloException;
@@ -85,42 +86,52 @@ public Long convert(String str) {
}
}

public static class PropertiesConverter implements IStringConverter<Configuration> {
public static class PropertiesConverter implements IStringConverter<File> {
@Override
public Configuration convert(String filename) {
public File convert(String filename) {
try {
return new PropertiesConfiguration(filename);
} catch (ConfigurationException e) {
return new File(filename);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}

@Parameter(names = {"-c", "--conf"}, required = true, converter = PropertiesConverter.class,
description = "Config file for connecting to Accumulo. See README.md for details.")
private Configuration config = null;
private File config = null;

@Parameter(names = {"-auths", "--auths"}, converter = AuthConverter.class, description = "the authorizations to use when reading or writing")
public Authorizations auths = Authorizations.EMPTY;

public Connector getConnector() {
try {
ZooKeeperInstance zki = new ZooKeeperInstance(config);
ZooKeeperInstance zki = new ZooKeeperInstance(getClientConfiguration());
return zki.getConnector(getPrincipal(), getToken());
} catch (AccumuloException | AccumuloSecurityException e) {
throw new RuntimeException(e);
}
}

public ClientConfiguration getClientConfiguration() {
return new ClientConfiguration(config);
return ClientConfiguration.fromFile(config);
}

public String getPrincipal() {
return config.getString("accumulo.examples.principal", "root");
String user = getClientConfiguration().getString("accumulo.examples.principal");
if(user != null)
return user;

return "root";
}

public AuthenticationToken getToken() {
return new PasswordToken(config.getString("accumulo.examples.password", "secret"));
AuthenticationToken token = new PasswordToken("secret");
String password = getClientConfiguration().getString("accumulo.examples.password");
if(password != null){
token = new PasswordToken(password);
}

return token;
}
}
@@ -281,7 +281,7 @@ public void run() {
} else if (tokens[0].equals("quit") && tokens.length == 1) {
break;
} else if (tokens[0].equals("connect") && tokens.length == 6 && ars == null) {
ZooKeeperInstance zki = new ZooKeeperInstance(new ClientConfiguration().withInstance(tokens[1]).withZkHosts(tokens[2]));
ZooKeeperInstance zki = new ZooKeeperInstance(ClientConfiguration.create().withInstance(tokens[1]).withZkHosts(tokens[2]));
Connector conn = zki.getConnector(tokens[3], new PasswordToken(tokens[4]));
if (conn.tableOperations().exists(tokens[5])) {
ars = new ARS(conn, tokens[5]);
@@ -124,6 +124,7 @@ public class ExamplesIT extends AccumuloClusterHarness {
public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopConf) {
// 128MB * 3
cfg.setDefaultMemory(cfg.getDefaultMemory() * 3, MemoryUnit.BYTE);
cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, "false");
}

@Before
@@ -26,14 +26,17 @@
import org.apache.accumulo.core.client.BatchWriterConfig;
import org.apache.accumulo.core.client.Connector;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.accumulo.core.util.Pair;
import org.apache.accumulo.examples.cli.BatchWriterOpts;
import org.apache.accumulo.examples.cli.ScannerOpts;
import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
import org.apache.accumulo.test.functional.ConfigurableMacBase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.junit.Before;
import org.junit.Test;
@@ -43,6 +46,11 @@ public class CountIT extends ConfigurableMacBase {
private Connector conn;
private String tableName;

@Override
protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, "false");
}

@Before
public void setupInstance() throws Exception {
tableName = getUniqueNames(1)[0];
@@ -34,13 +34,16 @@
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableExistsException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.ExamplesIT;
import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
import org.apache.accumulo.test.functional.ConfigurableMacBase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.junit.Test;

@@ -51,6 +54,11 @@ protected int defaultTimeoutSeconds() {
return 60;
}

@Override
protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, "false");
}

public static final String hadoopTmpDirArg = "-Dhadoop.tmp.dir=" + System.getProperty("user.dir") + "/target/hadoop-tmp";

static final String tablename = "mapredf";

0 comments on commit 599536e

Please sign in to comment.