From d77962c1b6034f02d27bab8e9ea8874855c8943b Mon Sep 17 00:00:00 2001 From: Holly Keebler Date: Thu, 16 May 2019 10:55:39 -0400 Subject: [PATCH] Fix #1084 partial - IT tests failing on Standalone AccumuloClientIT, ManyWariteaheadLogsIT, ReadWriteIT, YieldScannersIT, BadDeleteMarkers...IT and others that failed due to null in the command prefix Also includes IteratorEnvIT fix --- .../harness/AccumuloClusterHarness.java | 11 ++-- .../apache/accumulo/test/IteratorEnvIT.java | 52 ++++++++++++------- .../test/functional/AccumuloClientIT.java | 48 ++++++++++++----- .../test/functional/ManyWriteAheadLogsIT.java | 38 ++++++++++++++ .../accumulo/test/functional/ReadWriteIT.java | 34 +++++------- .../test/functional/YieldingIterator.java | 12 +++++ 6 files changed, 137 insertions(+), 58 deletions(-) diff --git a/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java b/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java index 72a5eda7c94..95984a6a59f 100644 --- a/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java +++ b/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java @@ -142,14 +142,19 @@ public void setupCluster() throws Exception { standaloneCluster.setAccumuloHome(conf.getAccumuloHome()); standaloneCluster.setClientAccumuloConfDir(conf.getClientAccumuloConfDir()); standaloneCluster.setHadoopConfDir(conf.getHadoopConfDir()); - standaloneCluster.setServerCmdPrefix(conf.getServerCmdPrefix()); - standaloneCluster.setClientCmdPrefix(conf.getClientCmdPrefix()); + // If these were not provided then ensure they are not null + standaloneCluster + .setServerCmdPrefix(conf.getServerCmdPrefix() == null ? "" : conf.getServerCmdPrefix()); + standaloneCluster + .setClientCmdPrefix(conf.getClientCmdPrefix() == null ? "" : conf.getClientCmdPrefix()); cluster = standaloneCluster; // For SASL, we need to get the Hadoop configuration files as well otherwise UGI will log in // as SIMPLE instead of KERBEROS - Configuration hadoopConfiguration = standaloneCluster.getHadoopConfiguration(); if (saslEnabled()) { + // Note that getting the Hadoop config creates a servercontext which wacks up the + // AccumuloClientIT test so if SASL is enabled then the testclose() will fail + Configuration hadoopConfiguration = standaloneCluster.getHadoopConfiguration(); UserGroupInformation.setConfiguration(hadoopConfiguration); // Login as the admin user to start the tests UserGroupInformation.loginUserFromKeytab(conf.getAdminPrincipal(), diff --git a/test/src/main/java/org/apache/accumulo/test/IteratorEnvIT.java b/test/src/main/java/org/apache/accumulo/test/IteratorEnvIT.java index 935b32bff14..9399c3a52c9 100644 --- a/test/src/main/java/org/apache/accumulo/test/IteratorEnvIT.java +++ b/test/src/main/java/org/apache/accumulo/test/IteratorEnvIT.java @@ -17,9 +17,6 @@ package org.apache.accumulo.test; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; import java.io.IOException; import java.util.Collections; @@ -80,12 +77,14 @@ public void init(SortedKeyValueIterator source, Map op // Checking for compaction on a scan should throw an error. try { - assertFalse(env.isUserCompaction()); - fail("Expected to throw IllegalStateException when checking compaction on a scan."); + env.isUserCompaction(); + throw new RuntimeException( + "Test failed - Expected to throw IllegalStateException when checking compaction on a scan."); } catch (IllegalStateException e) {} try { - assertFalse(env.isFullMajorCompaction()); - fail("Expected to throw IllegalStateException when checking compaction on a scan."); + env.isFullMajorCompaction(); + throw new RuntimeException( + "Test failed - Expected to throw IllegalStateException when checking compaction on a scan."); } catch (IllegalStateException e) {} } } @@ -101,8 +100,16 @@ public void init(SortedKeyValueIterator source, Map op IteratorEnvironment env) throws IOException { super.init(source, options, env); testEnv(scope, options, env); - assertTrue(env.isUserCompaction()); - assertTrue(env.isFullMajorCompaction()); + try { + env.isUserCompaction(); + } catch (IllegalStateException e) { + throw new RuntimeException("Test failed"); + } + try { + env.isFullMajorCompaction(); + } catch (IllegalStateException e) { + throw new RuntimeException("Test failed"); + } } } @@ -118,12 +125,14 @@ public void init(SortedKeyValueIterator source, Map op super.init(source, options, env); testEnv(scope, options, env); try { - assertTrue(env.isUserCompaction()); - fail("Expected to throw IllegalStateException when checking compaction on a scan."); + env.isUserCompaction(); + throw new RuntimeException( + "Test failed - Expected to throw IllegalStateException when checking compaction on a scan."); } catch (IllegalStateException e) {} try { - assertFalse(env.isFullMajorCompaction()); - fail("Expected to throw IllegalStateException when checking compaction on a scan."); + env.isFullMajorCompaction(); + throw new RuntimeException( + "Test failed - Expected to throw IllegalStateException when checking compaction on a scan."); } catch (IllegalStateException e) {} } } @@ -135,13 +144,16 @@ public void init(SortedKeyValueIterator source, Map op private static void testEnv(IteratorScope scope, Map opts, IteratorEnvironment env) { TableId expectedTableId = TableId.of(opts.get("expected.table.id")); - assertEquals("Expected table property not found", "value1", - env.getConfig().get("table.custom.iterator.env.test")); - assertEquals("Expected table property not found", "value1", - env.getServiceEnv().getConfiguration(env.getTableId()).getTableCustom("iterator.env.test")); - assertEquals("Error getting iterator scope", scope, env.getIteratorScope()); - assertFalse("isSamplingEnabled returned true, expected false", env.isSamplingEnabled()); - assertEquals("Error getting Table ID", expectedTableId, env.getTableId()); + if (!"value1".equals(env.getConfig().get("table.custom.iterator.env.test")) && !"value1".equals( + env.getServiceEnv().getConfiguration(env.getTableId()).getTableCustom("iterator.env.test"))) + throw new RuntimeException("Test failed - Expected table property not found."); + if (!scope.equals(env.getIteratorScope())) + throw new RuntimeException("Test failed - Error getting iterator scope"); + if (env.isSamplingEnabled()) + throw new RuntimeException("Test failed - isSamplingEnabled returned true, expected false"); + if (!expectedTableId.equals(env.getTableId())) + throw new RuntimeException("Test failed - Error getting Table ID"); + } @Before diff --git a/test/src/main/java/org/apache/accumulo/test/functional/AccumuloClientIT.java b/test/src/main/java/org/apache/accumulo/test/functional/AccumuloClientIT.java index c6840ffcfef..af93b37fa89 100644 --- a/test/src/main/java/org/apache/accumulo/test/functional/AccumuloClientIT.java +++ b/test/src/main/java/org/apache/accumulo/test/functional/AccumuloClientIT.java @@ -23,7 +23,9 @@ import java.util.Map.Entry; import java.util.Properties; +import java.util.Set; +import org.apache.accumulo.cluster.ClusterUser; import org.apache.accumulo.core.client.Accumulo; import org.apache.accumulo.core.client.AccumuloClient; import org.apache.accumulo.core.client.BatchWriter; @@ -40,12 +42,28 @@ import org.apache.accumulo.core.singletons.SingletonManager; import org.apache.accumulo.core.singletons.SingletonManager.Mode; import org.apache.accumulo.harness.AccumuloClusterHarness; +import org.junit.After; import org.junit.Test; import com.google.common.collect.Iterables; public class AccumuloClientIT extends AccumuloClusterHarness { + @After + public void deleteUsers() throws Exception { + try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) { + Set users = client.securityOperations().listLocalUsers(); + ClusterUser user1 = getUser(0); + ClusterUser user2 = getUser(1); + if (users.contains(user1.getPrincipal())) { + client.securityOperations().dropLocalUser(user1.getPrincipal()); + } + if (users.contains(user2.getPrincipal())) { + client.securityOperations().dropLocalUser(user2.getPrincipal()); + } + } + } + private interface CloseCheck { void check() throws Exception; } @@ -78,11 +96,13 @@ public void testAccumuloClientBuilder() throws Exception { AccumuloClient c = Accumulo.newClient().from(getClientProps()).build(); String instanceName = getClientInfo().getInstanceName(); String zookeepers = getClientInfo().getZooKeepers(); - final String user = "testuser"; - final String password = "testpassword"; - c.securityOperations().createLocalUser(user, new PasswordToken(password)); - AccumuloClient client = Accumulo.newClient().to(instanceName, zookeepers).as(user, password) + ClusterUser testuser1 = getUser(0); + final String user1 = testuser1.getPrincipal(); + final String password1 = testuser1.getPassword(); + c.securityOperations().createLocalUser(user1, new PasswordToken(password1)); + + AccumuloClient client = Accumulo.newClient().to(instanceName, zookeepers).as(user1, password1) .zkTimeout(1234).build(); Properties props = client.properties(); @@ -90,37 +110,39 @@ public void testAccumuloClientBuilder() throws Exception { ClientInfo info = ClientInfo.from(client.properties()); assertEquals(instanceName, info.getInstanceName()); assertEquals(zookeepers, info.getZooKeepers()); - assertEquals(user, client.whoami()); + assertEquals(user1, client.whoami()); assertEquals(1234, info.getZooKeepersSessionTimeOut()); - props = Accumulo.newClientProperties().to(instanceName, zookeepers).as(user, password).build(); + props = + Accumulo.newClientProperties().to(instanceName, zookeepers).as(user1, password1).build(); assertTrue(props.containsKey(ClientProperty.AUTH_TOKEN.getKey())); - assertEquals(password, props.get(ClientProperty.AUTH_TOKEN.getKey())); + assertEquals(password1, props.get(ClientProperty.AUTH_TOKEN.getKey())); assertEquals("password", props.get(ClientProperty.AUTH_TYPE.getKey())); assertEquals(instanceName, props.getProperty(ClientProperty.INSTANCE_NAME.getKey())); info = ClientInfo.from(props); assertEquals(instanceName, info.getInstanceName()); assertEquals(zookeepers, info.getZooKeepers()); - assertEquals(user, info.getPrincipal()); + assertEquals(user1, info.getPrincipal()); assertTrue(info.getAuthenticationToken() instanceof PasswordToken); props = new Properties(); props.put(ClientProperty.INSTANCE_NAME.getKey(), instanceName); props.put(ClientProperty.INSTANCE_ZOOKEEPERS.getKey(), zookeepers); - props.put(ClientProperty.AUTH_PRINCIPAL.getKey(), user); + props.put(ClientProperty.AUTH_PRINCIPAL.getKey(), user1); props.put(ClientProperty.INSTANCE_ZOOKEEPERS_TIMEOUT.getKey(), "22s"); - ClientProperty.setPassword(props, password); + ClientProperty.setPassword(props, password1); client.close(); client = Accumulo.newClient().from(props).build(); info = ClientInfo.from(client.properties()); assertEquals(instanceName, info.getInstanceName()); assertEquals(zookeepers, info.getZooKeepers()); - assertEquals(user, client.whoami()); + assertEquals(user1, client.whoami()); assertEquals(22000, info.getZooKeepersSessionTimeOut()); - final String user2 = "testuser2"; - final String password2 = "testpassword2"; + ClusterUser testuser2 = getUser(1); + final String user2 = testuser2.getPrincipal(); + final String password2 = testuser2.getPassword(); c.securityOperations().createLocalUser(user2, new PasswordToken(password2)); AccumuloClient client2 = Accumulo.newClient().from(client.properties()) diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ManyWriteAheadLogsIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ManyWriteAheadLogsIT.java index b64199cbf02..4387de90fb1 100644 --- a/test/src/main/java/org/apache/accumulo/test/functional/ManyWriteAheadLogsIT.java +++ b/test/src/main/java/org/apache/accumulo/test/functional/ManyWriteAheadLogsIT.java @@ -32,15 +32,19 @@ import org.apache.accumulo.core.client.Accumulo; import org.apache.accumulo.core.client.AccumuloClient; import org.apache.accumulo.core.client.BatchWriter; +import org.apache.accumulo.core.client.admin.InstanceOperations; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.data.Mutation; import org.apache.accumulo.harness.AccumuloClusterHarness; +import org.apache.accumulo.minicluster.ServerType; import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl; import org.apache.accumulo.server.ServerContext; import org.apache.accumulo.server.log.WalStateManager.WalState; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.RawLocalFileSystem; import org.apache.hadoop.io.Text; +import org.junit.After; +import org.junit.Before; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,6 +53,8 @@ public class ManyWriteAheadLogsIT extends AccumuloClusterHarness { private static final Logger log = LoggerFactory.getLogger(ManyWriteAheadLogsIT.class); + private String majcDelay, walogSize; + @Override public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { // configure a smaller walog size so the walogs will roll frequently in the test @@ -71,6 +77,38 @@ protected int defaultTimeoutSeconds() { return 10 * 60; } + @Before + public void alterConfig() throws Exception { + if (getClusterType() == ClusterType.MINI) { + return; + } + try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) { + InstanceOperations iops = client.instanceOperations(); + Map conf = iops.getSystemConfiguration(); + majcDelay = conf.get(Property.TSERV_MAJC_DELAY.getKey()); + walogSize = conf.get(Property.TSERV_WALOG_MAX_SIZE.getKey()); + + iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), "1"); + iops.setProperty(Property.TSERV_WALOG_MAX_SIZE.getKey(), "1M"); + + getClusterControl().stopAllServers(ServerType.TABLET_SERVER); + getClusterControl().startAllServers(ServerType.TABLET_SERVER); + } + } + + @After + public void resetConfig() throws Exception { + if (majcDelay != null) { + try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) { + InstanceOperations iops = client.instanceOperations(); + iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay); + iops.setProperty(Property.TSERV_WALOG_MAX_SIZE.getKey(), walogSize); + } + getClusterControl().stopAllServers(ServerType.TABLET_SERVER); + getClusterControl().startAllServers(ServerType.TABLET_SERVER); + } + } + /** * This creates a situation where many tablets reference many different write ahead logs. However * not single tablet references a lot of write ahead logs. Want to ensure the tablet server forces diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java index 15904244cb0..3b9a2e1e3e8 100644 --- a/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java +++ b/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java @@ -21,7 +21,6 @@ import static org.junit.Assert.assertTrue; import java.io.ByteArrayOutputStream; -import java.io.File; import java.io.IOException; import java.io.PrintStream; import java.net.URL; @@ -147,27 +146,18 @@ public void sunnyDay() throws Exception { } String scheme = "http://"; if (getCluster() instanceof StandaloneAccumuloCluster) { - StandaloneAccumuloCluster standaloneCluster = (StandaloneAccumuloCluster) getCluster(); - File accumuloProps = - new File(standaloneCluster.getServerAccumuloConfDir(), "accumulo.properties"); - if (accumuloProps.isFile()) { - Configuration conf = new Configuration(false); - conf.addResource(new Path(accumuloProps.toURI())); - String monitorSslKeystore = conf.get(Property.MONITOR_SSL_KEYSTORE.getKey()); - if (monitorSslKeystore != null) { - log.info("Using HTTPS since monitor ssl keystore configuration was observed in {}", - accumuloProps); - scheme = "https://"; - SSLContext ctx = SSLContext.getInstance("TLSv1.2"); - TrustManager[] tm = {new TestTrustManager()}; - ctx.init(new KeyManager[0], tm, new SecureRandom()); - SSLContext.setDefault(ctx); - HttpsURLConnection.setDefaultSSLSocketFactory(ctx.getSocketFactory()); - HttpsURLConnection.setDefaultHostnameVerifier(new TestHostnameVerifier()); - } - } else { - log.info("{} is not a normal file, not checking for monitor running with SSL", - accumuloProps); + String monitorSslKeystore = + getCluster().getSiteConfiguration().get(Property.MONITOR_SSL_KEYSTORE.getKey()); + if (monitorSslKeystore != null && !monitorSslKeystore.isEmpty()) { + log.info( + "Using HTTPS since monitor ssl keystore configuration was observed in accumulo configuration"); + scheme = "https://"; + SSLContext ctx = SSLContext.getInstance("TLSv1.2"); + TrustManager[] tm = {new TestTrustManager()}; + ctx.init(new KeyManager[0], tm, new SecureRandom()); + SSLContext.setDefault(ctx); + HttpsURLConnection.setDefaultSSLSocketFactory(ctx.getSocketFactory()); + HttpsURLConnection.setDefaultHostnameVerifier(new TestHostnameVerifier()); } } URL url = new URL(scheme + monitorLocation); diff --git a/test/src/main/java/org/apache/accumulo/test/functional/YieldingIterator.java b/test/src/main/java/org/apache/accumulo/test/functional/YieldingIterator.java index e94558a682a..fb593eb17db 100644 --- a/test/src/main/java/org/apache/accumulo/test/functional/YieldingIterator.java +++ b/test/src/main/java/org/apache/accumulo/test/functional/YieldingIterator.java @@ -118,6 +118,10 @@ public void seek(Range range, Collection columnFamilies, boolean i .yield(range.getStartKey().followingKey(PartialKey.ROW_COLFAM_COLQUAL_COLVIS_TIME)); log.info("end YieldingIterator.next: yielded at " + range.getStartKey()); } + } else { + // must be a new scan so re-initialize the counters + log.info("reseting counters"); + resetCounters(); } // if not yielding, then simply pass on the call to the source @@ -132,4 +136,12 @@ public void seek(Range range, Collection columnFamilies, boolean i public void enableYielding(YieldCallback yield) { this.yield = Optional.of(yield); } + + protected void resetCounters() { + yieldNexts.set(0); + yieldSeeks.set(0); + rebuilds.set(0); + yieldNextKey.set(false); + yieldSeekKey.set(false); + } }