Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -142,14 +142,19 @@ public void setupCluster() throws Exception {
standaloneCluster.setAccumuloHome(conf.getAccumuloHome());
standaloneCluster.setClientAccumuloConfDir(conf.getClientAccumuloConfDir());
standaloneCluster.setHadoopConfDir(conf.getHadoopConfDir());
standaloneCluster.setServerCmdPrefix(conf.getServerCmdPrefix());
standaloneCluster.setClientCmdPrefix(conf.getClientCmdPrefix());
// If these were not provided then ensure they are not null
standaloneCluster
.setServerCmdPrefix(conf.getServerCmdPrefix() == null ? "" : conf.getServerCmdPrefix());
standaloneCluster
.setClientCmdPrefix(conf.getClientCmdPrefix() == null ? "" : conf.getClientCmdPrefix());
cluster = standaloneCluster;

// For SASL, we need to get the Hadoop configuration files as well otherwise UGI will log in
// as SIMPLE instead of KERBEROS
Configuration hadoopConfiguration = standaloneCluster.getHadoopConfiguration();
if (saslEnabled()) {
// Note that getting the Hadoop config creates a servercontext which wacks up the
// AccumuloClientIT test so if SASL is enabled then the testclose() will fail
Configuration hadoopConfiguration = standaloneCluster.getHadoopConfiguration();
UserGroupInformation.setConfiguration(hadoopConfiguration);
// Login as the admin user to start the tests
UserGroupInformation.loginUserFromKeytab(conf.getAdminPrincipal(),
Expand Down
52 changes: 32 additions & 20 deletions test/src/main/java/org/apache/accumulo/test/IteratorEnvIT.java
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,6 @@
package org.apache.accumulo.test;

import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;

import java.io.IOException;
import java.util.Collections;
Expand Down Expand Up @@ -80,12 +77,14 @@ public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> op

// Checking for compaction on a scan should throw an error.
try {
assertFalse(env.isUserCompaction());
fail("Expected to throw IllegalStateException when checking compaction on a scan.");
env.isUserCompaction();
throw new RuntimeException(
"Test failed - Expected to throw IllegalStateException when checking compaction on a scan.");
} catch (IllegalStateException e) {}
try {
assertFalse(env.isFullMajorCompaction());
fail("Expected to throw IllegalStateException when checking compaction on a scan.");
env.isFullMajorCompaction();
throw new RuntimeException(
"Test failed - Expected to throw IllegalStateException when checking compaction on a scan.");
} catch (IllegalStateException e) {}
}
}
Expand All @@ -101,8 +100,16 @@ public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> op
IteratorEnvironment env) throws IOException {
super.init(source, options, env);
testEnv(scope, options, env);
assertTrue(env.isUserCompaction());
assertTrue(env.isFullMajorCompaction());
try {
env.isUserCompaction();
} catch (IllegalStateException e) {
throw new RuntimeException("Test failed");
}
try {
env.isFullMajorCompaction();
} catch (IllegalStateException e) {
throw new RuntimeException("Test failed");
}
}
}

Expand All @@ -118,12 +125,14 @@ public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> op
super.init(source, options, env);
testEnv(scope, options, env);
try {
assertTrue(env.isUserCompaction());
fail("Expected to throw IllegalStateException when checking compaction on a scan.");
env.isUserCompaction();
throw new RuntimeException(
"Test failed - Expected to throw IllegalStateException when checking compaction on a scan.");
} catch (IllegalStateException e) {}
try {
assertFalse(env.isFullMajorCompaction());
fail("Expected to throw IllegalStateException when checking compaction on a scan.");
env.isFullMajorCompaction();
throw new RuntimeException(
"Test failed - Expected to throw IllegalStateException when checking compaction on a scan.");
} catch (IllegalStateException e) {}
}
}
Expand All @@ -135,13 +144,16 @@ public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> op
private static void testEnv(IteratorScope scope, Map<String,String> opts,
IteratorEnvironment env) {
TableId expectedTableId = TableId.of(opts.get("expected.table.id"));
assertEquals("Expected table property not found", "value1",
env.getConfig().get("table.custom.iterator.env.test"));
assertEquals("Expected table property not found", "value1",
env.getServiceEnv().getConfiguration(env.getTableId()).getTableCustom("iterator.env.test"));
assertEquals("Error getting iterator scope", scope, env.getIteratorScope());
assertFalse("isSamplingEnabled returned true, expected false", env.isSamplingEnabled());
assertEquals("Error getting Table ID", expectedTableId, env.getTableId());
if (!"value1".equals(env.getConfig().get("table.custom.iterator.env.test")) && !"value1".equals(
env.getServiceEnv().getConfiguration(env.getTableId()).getTableCustom("iterator.env.test")))
throw new RuntimeException("Test failed - Expected table property not found.");
if (!scope.equals(env.getIteratorScope()))
throw new RuntimeException("Test failed - Error getting iterator scope");
if (env.isSamplingEnabled())
throw new RuntimeException("Test failed - isSamplingEnabled returned true, expected false");
if (!expectedTableId.equals(env.getTableId()))
throw new RuntimeException("Test failed - Error getting Table ID");

}

@Before
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,9 @@

import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;

import org.apache.accumulo.cluster.ClusterUser;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchWriter;
Expand All @@ -40,12 +42,28 @@
import org.apache.accumulo.core.singletons.SingletonManager;
import org.apache.accumulo.core.singletons.SingletonManager.Mode;
import org.apache.accumulo.harness.AccumuloClusterHarness;
import org.junit.After;
import org.junit.Test;

import com.google.common.collect.Iterables;

public class AccumuloClientIT extends AccumuloClusterHarness {

@After
public void deleteUsers() throws Exception {
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
Set<String> users = client.securityOperations().listLocalUsers();
ClusterUser user1 = getUser(0);
ClusterUser user2 = getUser(1);
if (users.contains(user1.getPrincipal())) {
client.securityOperations().dropLocalUser(user1.getPrincipal());
}
if (users.contains(user2.getPrincipal())) {
client.securityOperations().dropLocalUser(user2.getPrincipal());
}
}
}

private interface CloseCheck {
void check() throws Exception;
}
Expand Down Expand Up @@ -78,49 +96,53 @@ public void testAccumuloClientBuilder() throws Exception {
AccumuloClient c = Accumulo.newClient().from(getClientProps()).build();
String instanceName = getClientInfo().getInstanceName();
String zookeepers = getClientInfo().getZooKeepers();
final String user = "testuser";
final String password = "testpassword";
c.securityOperations().createLocalUser(user, new PasswordToken(password));

AccumuloClient client = Accumulo.newClient().to(instanceName, zookeepers).as(user, password)
ClusterUser testuser1 = getUser(0);
final String user1 = testuser1.getPrincipal();
final String password1 = testuser1.getPassword();
c.securityOperations().createLocalUser(user1, new PasswordToken(password1));
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The purpose of the ClusterUser abstraction is to allow test to work with passwords or kerberose. For this test to work with kerberose it should not use PasswordToken(), but instead use testuser1.getToken()

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks. I'll assume a kerberose check is needed.

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hopefully testuser1.getToken() is an easy change and it works with password or kerberose. If not this could be a follow on issue.

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nevermind, this is not as simple as I thought. I was thinking c.securityOperations().createLocalUser(user1, testuser1.getToken()). However I looked at the method createLocalUser and it explicitly takes a PasswordToken. I saw some other test code passes null for the token when sasl is enabled, but I Am not sure what the correct thing to do.


AccumuloClient client = Accumulo.newClient().to(instanceName, zookeepers).as(user1, password1)
.zkTimeout(1234).build();

Properties props = client.properties();
assertFalse(props.containsKey(ClientProperty.AUTH_TOKEN.getKey()));
ClientInfo info = ClientInfo.from(client.properties());
assertEquals(instanceName, info.getInstanceName());
assertEquals(zookeepers, info.getZooKeepers());
assertEquals(user, client.whoami());
assertEquals(user1, client.whoami());
assertEquals(1234, info.getZooKeepersSessionTimeOut());

props = Accumulo.newClientProperties().to(instanceName, zookeepers).as(user, password).build();
props =
Accumulo.newClientProperties().to(instanceName, zookeepers).as(user1, password1).build();
assertTrue(props.containsKey(ClientProperty.AUTH_TOKEN.getKey()));
assertEquals(password, props.get(ClientProperty.AUTH_TOKEN.getKey()));
assertEquals(password1, props.get(ClientProperty.AUTH_TOKEN.getKey()));
assertEquals("password", props.get(ClientProperty.AUTH_TYPE.getKey()));
assertEquals(instanceName, props.getProperty(ClientProperty.INSTANCE_NAME.getKey()));
info = ClientInfo.from(props);
assertEquals(instanceName, info.getInstanceName());
assertEquals(zookeepers, info.getZooKeepers());
assertEquals(user, info.getPrincipal());
assertEquals(user1, info.getPrincipal());
assertTrue(info.getAuthenticationToken() instanceof PasswordToken);

props = new Properties();
props.put(ClientProperty.INSTANCE_NAME.getKey(), instanceName);
props.put(ClientProperty.INSTANCE_ZOOKEEPERS.getKey(), zookeepers);
props.put(ClientProperty.AUTH_PRINCIPAL.getKey(), user);
props.put(ClientProperty.AUTH_PRINCIPAL.getKey(), user1);
props.put(ClientProperty.INSTANCE_ZOOKEEPERS_TIMEOUT.getKey(), "22s");
ClientProperty.setPassword(props, password);
ClientProperty.setPassword(props, password1);
client.close();
client = Accumulo.newClient().from(props).build();

info = ClientInfo.from(client.properties());
assertEquals(instanceName, info.getInstanceName());
assertEquals(zookeepers, info.getZooKeepers());
assertEquals(user, client.whoami());
assertEquals(user1, client.whoami());
assertEquals(22000, info.getZooKeepersSessionTimeOut());

final String user2 = "testuser2";
final String password2 = "testpassword2";
ClusterUser testuser2 = getUser(1);
final String user2 = testuser2.getPrincipal();
final String password2 = testuser2.getPassword();
c.securityOperations().createLocalUser(user2, new PasswordToken(password2));

AccumuloClient client2 = Accumulo.newClient().from(client.properties())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,19 @@
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.admin.InstanceOperations;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.harness.AccumuloClusterHarness;
import org.apache.accumulo.minicluster.ServerType;
import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
import org.apache.accumulo.server.ServerContext;
import org.apache.accumulo.server.log.WalStateManager.WalState;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.io.Text;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand All @@ -49,6 +53,8 @@ public class ManyWriteAheadLogsIT extends AccumuloClusterHarness {

private static final Logger log = LoggerFactory.getLogger(ManyWriteAheadLogsIT.class);

private String majcDelay, walogSize;

@Override
public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
// configure a smaller walog size so the walogs will roll frequently in the test
Expand All @@ -71,6 +77,38 @@ protected int defaultTimeoutSeconds() {
return 10 * 60;
}

@Before
public void alterConfig() throws Exception {
if (getClusterType() == ClusterType.MINI) {
return;
}
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
InstanceOperations iops = client.instanceOperations();
Map<String,String> conf = iops.getSystemConfiguration();
majcDelay = conf.get(Property.TSERV_MAJC_DELAY.getKey());
walogSize = conf.get(Property.TSERV_WALOG_MAX_SIZE.getKey());

iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), "1");
iops.setProperty(Property.TSERV_WALOG_MAX_SIZE.getKey(), "1M");

getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
getClusterControl().startAllServers(ServerType.TABLET_SERVER);
}
}
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you just put this in configureMiniCluster()? Or does it have to be before configureMiniCluster is called?

Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is an interesting solution for the standalone case. However, if it is necessary to alter the configuration of the system for the test, then it is probably not suitable for a standalone test, and should be recategorized so as to be skipped when running against a standalone instance.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@milleruntime configureMiniCluster() is called when a MAC is used. In this case we are running standalone so it would not be called.


@After
public void resetConfig() throws Exception {
if (majcDelay != null) {
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
InstanceOperations iops = client.instanceOperations();
iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay);
iops.setProperty(Property.TSERV_WALOG_MAX_SIZE.getKey(), walogSize);
}
getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
getClusterControl().startAllServers(ServerType.TABLET_SERVER);
}
}

/**
* This creates a situation where many tablets reference many different write ahead logs. However
* not single tablet references a lot of write ahead logs. Want to ensure the tablet server forces
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
import static org.junit.Assert.assertTrue;

import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.net.URL;
Expand Down Expand Up @@ -147,27 +146,18 @@ public void sunnyDay() throws Exception {
}
String scheme = "http://";
if (getCluster() instanceof StandaloneAccumuloCluster) {
StandaloneAccumuloCluster standaloneCluster = (StandaloneAccumuloCluster) getCluster();
File accumuloProps =
new File(standaloneCluster.getServerAccumuloConfDir(), "accumulo.properties");
if (accumuloProps.isFile()) {
Configuration conf = new Configuration(false);
conf.addResource(new Path(accumuloProps.toURI()));
String monitorSslKeystore = conf.get(Property.MONITOR_SSL_KEYSTORE.getKey());
if (monitorSslKeystore != null) {
log.info("Using HTTPS since monitor ssl keystore configuration was observed in {}",
accumuloProps);
scheme = "https://";
SSLContext ctx = SSLContext.getInstance("TLSv1.2");
TrustManager[] tm = {new TestTrustManager()};
ctx.init(new KeyManager[0], tm, new SecureRandom());
SSLContext.setDefault(ctx);
HttpsURLConnection.setDefaultSSLSocketFactory(ctx.getSocketFactory());
HttpsURLConnection.setDefaultHostnameVerifier(new TestHostnameVerifier());
}
} else {
log.info("{} is not a normal file, not checking for monitor running with SSL",
accumuloProps);
String monitorSslKeystore =
getCluster().getSiteConfiguration().get(Property.MONITOR_SSL_KEYSTORE.getKey());
if (monitorSslKeystore != null && !monitorSslKeystore.isEmpty()) {
log.info(
"Using HTTPS since monitor ssl keystore configuration was observed in accumulo configuration");
scheme = "https://";
SSLContext ctx = SSLContext.getInstance("TLSv1.2");
TrustManager[] tm = {new TestTrustManager()};
ctx.init(new KeyManager[0], tm, new SecureRandom());
SSLContext.setDefault(ctx);
HttpsURLConnection.setDefaultSSLSocketFactory(ctx.getSocketFactory());
HttpsURLConnection.setDefaultHostnameVerifier(new TestHostnameVerifier());
}
}
URL url = new URL(scheme + monitorLocation);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,10 @@ public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean i
.yield(range.getStartKey().followingKey(PartialKey.ROW_COLFAM_COLQUAL_COLVIS_TIME));
log.info("end YieldingIterator.next: yielded at " + range.getStartKey());
}
} else {
// must be a new scan so re-initialize the counters
log.info("reseting counters");
resetCounters();
}

// if not yielding, then simply pass on the call to the source
Expand All @@ -132,4 +136,12 @@ public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean i
public void enableYielding(YieldCallback<Key> yield) {
this.yield = Optional.of(yield);
}

protected void resetCounters() {
yieldNexts.set(0);
yieldSeeks.set(0);
rebuilds.set(0);
yieldNextKey.set(false);
yieldSeekKey.set(false);
}
}