From 397406716f52b75a5c2cb4a254fc0411dc098e4b Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 5 May 2026 12:35:05 +0800 Subject: [PATCH 1/2] HBASE-30082 Upgrade hbase-server to use junit5 Part12 --- ...tractTestAsyncTableRegionReplicasRead.java | 46 +- .../AbstractTestCIOperationTimeout.java | 10 +- .../client/AbstractTestCIRpcTimeout.java | 10 +- .../hbase/client/AbstractTestCITimeout.java | 16 +- .../client/AbstractTestRegionLocator.java | 10 +- .../AbstractTestResultScannerCursor.java | 8 +- .../hbase/client/AbstractTestScanCursor.java | 8 +- ...DifferentConnectionRegistriesTestBase.java | 4 +- .../hbase/client/ClientPushbackTestBase.java | 40 +- .../client/MetaWithReplicasTestBase.java | 22 +- .../hbase/client/RegionReplicaTestHelper.java | 6 +- .../SnapshotCloneIndependenceTestBase.java | 365 ++++++++++++++ .../client/SnapshotFromClientTestBase.java | 459 ++++++++++++++++++ .../SnapshotTemporaryDirectoryTestBase.java | 452 +++++++++++++++++ .../hbase/client/SnapshotWithAclTestBase.java | 50 +- .../apache/hadoop/hbase/client/TestAdmin.java | 99 ++-- .../hadoop/hbase/client/TestAdmin1.java | 67 ++- .../hadoop/hbase/client/TestAdmin2.java | 134 +++-- .../hadoop/hbase/client/TestAdmin3.java | 75 ++- .../hadoop/hbase/client/TestAdmin4.java | 26 +- .../hadoop/hbase/client/TestAdminBase.java | 26 +- .../TestAllowPartialScanResultCache.java | 25 +- .../hbase/client/TestAlwaysSetScannerId.java | 25 +- .../client/TestAppendFromClientSide.java | 37 +- .../hbase/client/TestAsyncAdminBuilder.java | 58 +-- .../hbase/client/TestAsyncBufferMutator.java | 33 +- .../TestAsyncClientPauseForRpcThrottling.java | 32 +- ...stAsyncClientPauseForServerOverloaded.java | 35 +- .../hbase/client/TestAsyncClientPushback.java | 19 +- .../client/TestAsyncNonMetaRegionLocator.java | 90 ++-- ...ncNonMetaRegionLocatorConcurrenyLimit.java | 29 +- .../hbase/client/TestAsyncRegionLocator.java | 31 +- .../client/TestAsyncResultScannerCursor.java | 21 +- ...stAsyncSingleRequestRpcRetryingCaller.java | 27 +- .../hadoop/hbase/client/TestAsyncTable.java | 146 +++--- .../hbase/client/TestAsyncTableBatch.java | 84 ++-- .../TestAsyncTableBatchRetryImmediately.java | 25 +- .../TestAsyncTableGetMultiThreaded.java | 26 +- ...leGetMultiThreadedWithBasicCompaction.java | 19 +- ...leGetMultiThreadedWithEagerCompaction.java | 19 +- .../client/TestAsyncTableLocatePrefetch.java | 27 +- ...AsyncTableLocateRegionForDeletedTable.java | 25 +- .../client/TestAsyncTableNoncedRetry.java | 41 +- .../client/TestAsyncTableQueryMetrics.java | 58 ++- .../client/TestAsyncTableRSCrashPublish.java | 23 +- .../client/TestAsyncTableRegionLocator.java | 19 +- .../TestAsyncTableRegionReplicasGet.java | 25 +- .../TestAsyncTableRegionReplicasScan.java | 29 +- .../client/TestAsyncTableScanException.java | 49 +- ...TableScanMetricsWithScannerSuspending.java | 31 +- .../client/TestAsyncTableScanRenewLease.java | 23 +- ...AsyncTableScannerCloseWhileSuspending.java | 23 +- .../client/TestAsyncTableUseMetaReplicas.java | 34 +- ...tAvoidCellReferencesIntoShippedBlocks.java | 55 +-- .../client/TestBatchScanResultCache.java | 25 +- .../client/TestBlockEvictionFromClient.java | 96 ++-- .../hbase/client/TestBootstrapNodeUpdate.java | 25 +- .../hbase/client/TestBufferedMutator.java | 35 +- .../client/TestCIDeleteOperationTimeout.java | 11 +- .../hbase/client/TestCIDeleteRpcTimeout.java | 11 +- .../client/TestCIGetOperationTimeout.java | 11 +- .../hbase/client/TestCIGetRpcTimeout.java | 11 +- .../client/TestCIIncrementRpcTimeout.java | 11 +- .../client/TestCIPutOperationTimeout.java | 11 +- .../hbase/client/TestCIPutRpcTimeout.java | 11 +- .../hadoop/hbase/client/TestCISleep.java | 21 +- ...talogReplicaLoadBalanceSimpleSelector.java | 25 +- .../hbase/client/TestCheckAndMutate.java | 92 ++-- .../TestCheckAndMutateWithByteBuff.java | 44 +- .../hbase/client/TestCleanupMetaReplica.java | 19 +- .../TestCleanupMetaReplicaThroughConfig.java | 19 +- .../client/TestClientOperationInterrupt.java | 33 +- .../client/TestClientScannerTimeouts.java | 104 ++-- .../client/TestClientSideRegionScanner.java | 164 +++---- .../hbase/client/TestClientTableMetrics.java | 32 +- .../hbase/client/TestClientTimeouts.java | 25 +- .../TestCompleteResultScanResultCache.java | 25 +- .../hadoop/hbase/client/TestConnection.java | 88 ++-- .../client/TestConnectionAttributes.java | 23 +- .../hbase/client/TestConnectionUtils.java | 15 +- .../hbase/client/TestDropTimeoutRequest.java | 37 +- .../hadoop/hbase/client/TestEnableTable.java | 35 +- .../TestFailedMetaReplicaAssigment.java | 27 +- .../hbase/client/TestFallbackToUseReplay.java | 23 +- .../hbase/client/TestFlushFromClient.java | 48 +- ...hFromClientWithDisabledFlushProcedure.java | 43 +- .../hbase/client/TestGetProcedureResult.java | 23 +- ...tGetScanColumnsWithNewVersionBehavior.java | 23 +- .../client/TestGetScanPartialResult.java | 25 +- .../apache/hadoop/hbase/client/TestHbck.java | 86 ++-- .../client/TestIllegalTableDescriptor.java | 47 +- .../TestIncreaseMetaReplicaThroughConfig.java | 19 +- ...ncrementFromClientSideWithCoprocessor.java | 16 +- .../client/TestIncrementsFromClientSide.java | 105 ++-- .../hbase/client/TestIntraRowPagination.java | 13 +- ...estInvalidMutationDurabilityException.java | 50 +- .../client/TestLimitedScanWithFilter.java | 29 +- .../client/TestMalformedCellFromClient.java | 42 +- .../hbase/client/TestMasterRegistry.java | 26 +- .../hadoop/hbase/client/TestMetaCache.java | 41 +- .../client/TestMetaRegionLocationCache.java | 27 +- .../client/TestMetaReplicasAddressChange.java | 21 +- .../TestMetaTableAccessorNoCluster.java | 27 +- .../client/TestMetaWithReplicasBasic.java | 23 +- .../TestMetaWithReplicasShutdownHandling.java | 21 +- .../TestMobSnapshotCloneIndependence.java | 24 +- .../client/TestMobSnapshotFromClient.java | 24 +- .../TestMultiActionMetricsFromClient.java | 23 +- .../hbase/client/TestMultiParallel.java | 61 ++- .../hbase/client/TestMultiRespectsLimits.java | 39 +- .../hbase/client/TestMultipleTimestamps.java | 87 ++-- .../client/TestMutationGetCellBuilder.java | 43 +- .../client/TestMvccConsistentScanner.java | 42 +- .../client/TestPreadReversedScanner.java | 43 +- .../client/TestPutDeleteEtcCellIteration.java | 38 +- .../hbase/client/TestPutWithDelete.java | 76 ++- .../hbase/client/TestRawAsyncScanCursor.java | 31 +- ...estRawAsyncTableLimitedScanWithFilter.java | 27 +- .../client/TestRawAsyncTablePartialScan.java | 23 +- .../client/TestRegionLocationCaching.java | 33 +- .../hbase/client/TestRegionLocator.java | 19 +- .../hbase/client/TestReplicaWithCluster.java | 58 ++- .../hbase/client/TestReplicasClient.java | 121 +++-- .../hbase/client/TestRequestAttributes.java | 21 +- .../client/TestRequestTooBigException.java | 23 +- .../hadoop/hbase/client/TestResult.java | 27 +- .../client/TestResultFromCoprocessor.java | 23 +- .../hbase/client/TestResultScannerCursor.java | 21 +- .../client/TestResultSizeEstimation.java | 41 +- .../client/TestRpcConnectionRegistry.java | 33 +- .../hbase/client/TestScanAttributes.java | 28 +- .../client/TestScanWithoutFetchingData.java | 25 +- .../hbase/client/TestScannerTimeout.java | 27 +- .../client/TestScannersFromClientSide2.java | 78 ++- .../client/TestSeparateClientZKCluster.java | 37 +- .../hbase/client/TestServerBusyException.java | 35 +- .../client/TestServerLoadDurability.java | 59 +-- .../TestShutdownOfMetaReplicaHolder.java | 17 +- .../hadoop/hbase/client/TestSizeFailures.java | 22 +- .../client/TestSnapshotCloneIndependence.java | 366 +------------- .../TestSnapshotDFSTemporaryDirectory.java | 22 +- .../hbase/client/TestSnapshotFromClient.java | 455 +---------------- ...tSnapshotFromClientWithRegionReplicas.java | 29 +- .../hbase/client/TestSnapshotMetadata.java | 37 +- .../TestSnapshotTemporaryDirectory.java | 447 +---------------- ...tTemporaryDirectoryWithRegionReplicas.java | 28 +- .../hbase/client/TestSnapshotWithAcl.java | 11 +- .../client/TestSnapshotWithAclAsyncAdmin.java | 11 +- .../client/TestSnapshotWithTTLFromClient.java | 37 +- .../client/TestSplitOrMergeAtTableLevel.java | 53 +- .../hbase/client/TestSplitOrMergeStatus.java | 56 +-- .../hbase/client/TestTableFavoredNodes.java | 120 +++-- .../client/TestTableOperationException.java | 74 +-- .../client/TestTableSnapshotScanner.java | 90 ++-- .../TestTableSnapshotScannerWithSFT.java | 11 +- .../hbase/client/TestTimestampsFilter.java | 79 ++- .../hbase/client/TestUpdateConfiguration.java | 38 +- .../client/TestZKConnectionRegistry.java | 47 +- .../hbase/client/locking/TestEntityLocks.java | 23 +- .../replication/TestBadReplicationPeer.java | 25 +- ...estReplicationAdminForSyncReplication.java | 25 +- 161 files changed, 3909 insertions(+), 4544 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotCloneIndependenceTestBase.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotFromClientTestBase.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotTemporaryDirectoryTestBase.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableRegionReplicasRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableRegionReplicasRead.java index 229045af93cf..0333a8359ea2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableRegionReplicasRead.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableRegionReplicasRead.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; -import java.util.Arrays; import java.util.List; import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; @@ -28,6 +27,7 @@ import java.util.concurrent.ForkJoinPool; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; +import java.util.stream.Stream; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -38,12 +38,9 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; -import org.junit.AfterClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestName; -import org.junit.runners.Parameterized.Parameter; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.params.provider.Arguments; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @@ -65,25 +62,24 @@ public abstract class AbstractTestAsyncTableRegionReplicasRead { protected static AsyncConnection ASYNC_CONN; - @Rule - public TestName testName = new TestName(); + protected Supplier> getTable; - @Parameter - public Supplier> getTable; + public static Stream parameters() { + return Stream.of( + Arguments.of((Supplier>) AbstractTestAsyncTableRegionReplicasRead::getRawTable), + Arguments.of((Supplier>) AbstractTestAsyncTableRegionReplicasRead::getTable)); + } - private static AsyncTable getRawTable() { - return ASYNC_CONN.getTable(TABLE_NAME); + protected AbstractTestAsyncTableRegionReplicasRead(Supplier> getTable) { + this.getTable = getTable; } - private static AsyncTable getTable() { - return ASYNC_CONN.getTable(TABLE_NAME, ForkJoinPool.commonPool()); + protected static AsyncTable getRawTable() { + return ASYNC_CONN.getTable(TABLE_NAME); } - @Parameters - public static List params() { - return Arrays.asList( - new Supplier[] { AbstractTestAsyncTableRegionReplicasRead::getRawTable }, - new Supplier[] { AbstractTestAsyncTableRegionReplicasRead::getTable }); + protected static AsyncTable getTable() { + return ASYNC_CONN.getTable(TABLE_NAME, ForkJoinPool.commonPool()); } protected static volatile boolean FAIL_PRIMARY_GET = false; @@ -151,7 +147,7 @@ protected static void waitUntilAllReplicasHaveRow(byte[] row) throws IOException TEST_UTIL.waitFor(30000, () -> allReplicasHaveRow(row)); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { Closeables.close(ASYNC_CONN, true); TEST_UTIL.shutdownMiniCluster(); @@ -171,7 +167,7 @@ protected static int getPrimaryGetCount() { // replicaId = -1 means do not set replica protected abstract void readAndCheck(AsyncTable table, int replicaId) throws Exception; - @Test + @TestTemplate public void testNoReplicaRead() throws Exception { FAIL_PRIMARY_GET = false; REPLICA_ID_TO_COUNT.clear(); @@ -183,7 +179,7 @@ public void testNoReplicaRead() throws Exception { assertEquals(0, getSecondaryGetCount()); } - @Test + @TestTemplate public void testReplicaRead() throws Exception { // fail the primary get request FAIL_PRIMARY_GET = true; @@ -198,7 +194,7 @@ public void testReplicaRead() throws Exception { assertEquals(count, getPrimaryGetCount()); } - @Test + @TestTemplate public void testReadSpecificReplica() throws Exception { FAIL_PRIMARY_GET = false; REPLICA_ID_TO_COUNT.clear(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.java index 4f449e2e0cf3..8facdedf2124 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.net.SocketTimeoutException; import org.apache.hadoop.hbase.TableName; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -36,9 +36,9 @@ public abstract class AbstractTestCIOperationTimeout extends AbstractTestCITimeo private TableName tableName; - @Before + @BeforeEach public void setUp() throws IOException { - tableName = TableName.valueOf(name.getMethodName()); + tableName = name.getTableName(); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) .setCoprocessor(SleepAndFailFirstTime.class.getName()) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAM_NAM)).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIRpcTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIRpcTimeout.java index 0fef02aca884..21674199b385 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIRpcTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIRpcTimeout.java @@ -17,14 +17,14 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,9 +37,9 @@ public abstract class AbstractTestCIRpcTimeout extends AbstractTestCITimeout { private TableName tableName; - @Before + @BeforeEach public void setUp() throws IOException { - tableName = TableName.valueOf(name.getMethodName()); + tableName = name.getTableName(); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName).setCoprocessor(SleepCoprocessor.class.getName()) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAM_NAM)).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCITimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCITimeout.java index b9cbd36f5848..d28992fe8dff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCITimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCITimeout.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableNameTestExtension; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; @@ -32,10 +33,9 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WALEdit; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.extension.RegisterExtension; /** * Based class for testing timeout logic. @@ -46,8 +46,8 @@ public abstract class AbstractTestCITimeout { protected static final byte[] FAM_NAM = Bytes.toBytes("f"); - @Rule - public final TestName name = new TestName(); + @RegisterExtension + protected final TableNameTestExtension name = new TableNameTestExtension(); /** * This copro sleeps 20 second. The first call it fails. The second time, it works. @@ -146,7 +146,7 @@ public void preDelete(final ObserverContext createPuts(byte[][] rows, byte[][] families, byte[][] q return puts; } - @AfterClass - public static void tearDownAfterClass() throws Exception { + protected static void stopCluster() throws Exception { TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/BasicReadWriteWithDifferentConnectionRegistriesTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/BasicReadWriteWithDifferentConnectionRegistriesTestBase.java index 86cc1a0cb969..123490816de8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/BasicReadWriteWithDifferentConnectionRegistriesTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/BasicReadWriteWithDifferentConnectionRegistriesTestBase.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertFalse; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import java.io.IOException; import java.net.URI; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ClientPushbackTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ClientPushbackTestBase.java index 8ea36bed0360..1c300e687d2c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ClientPushbackTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ClientPushbackTestBase.java @@ -18,10 +18,10 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.client.MetricsConnection.CLIENT_SIDE_METRICS_ENABLED_KEY; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.concurrent.CountDownLatch; @@ -39,9 +39,9 @@ import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -58,7 +58,7 @@ public abstract class ClientPushbackTestBase { private static final byte[] qualifier = Bytes.toBytes("q"); private static final long flushSizeBytes = 512; - @BeforeClass + @BeforeAll public static void setupCluster() throws Exception { Configuration conf = UTIL.getConfiguration(); // enable backpressure @@ -77,7 +77,7 @@ public static void setupCluster() throws Exception { UTIL.createTable(tableName, family); } - @AfterClass + @AfterAll public static void cleanupCluster() throws Exception { UTIL.shutdownMiniCluster(); } @@ -112,11 +112,11 @@ public void testClientTracksServerPushback() throws Exception { // get the stats for the region hosting our table ClientBackoffPolicy backoffPolicy = getBackoffPolicy(); - assertTrue("Backoff policy is not correctly configured", - backoffPolicy instanceof ExponentialClientBackoffPolicy); + assertTrue(backoffPolicy instanceof ExponentialClientBackoffPolicy, + "Backoff policy is not correctly configured"); ServerStatisticTracker stats = getStatisticsTracker(); - assertNotNull("No stats configured for the client!", stats); + assertNotNull(stats, "No stats configured for the client!"); // get the names so we can query the stats ServerName server = rs.getServerName(); byte[] regionName = region.getRegionInfo().getRegionName(); @@ -124,11 +124,11 @@ public void testClientTracksServerPushback() throws Exception { // check to see we found some load on the memstore ServerStatistics serverStats = stats.getStats(server); ServerStatistics.RegionStatistics regionStats = serverStats.getStatsForRegion(regionName); - assertEquals("We did not find some load on the memstore", load, - regionStats.getMemStoreLoadPercent()); + assertEquals(load, regionStats.getMemStoreLoadPercent(), + "We did not find some load on the memstore"); // check that the load reported produces a nonzero delay long backoffTime = backoffPolicy.getBackoffTime(server, regionName, serverStats); - assertNotEquals("Reported load does not produce a backoff", 0, backoffTime); + assertNotEquals(0, backoffTime, "Reported load does not produce a backoff"); LOG.debug("Backoff calculated for " + region.getRegionInfo().getRegionNameAsString() + " @ " + server + " is " + backoffTime); @@ -153,12 +153,12 @@ public void testClientTracksServerPushback() throws Exception { assertEquals(1, runnerStats.delayRunners.getCount()); assertEquals(1, runnerStats.normalRunners.getCount()); - assertEquals("", runnerStats.delayIntevalHist.getSnapshot().getMean(), (double) backoffTime, - 0.1); + assertEquals(runnerStats.delayIntevalHist.getSnapshot().getMean(), (double) backoffTime, 0.1, + ""); latch.await(backoffTime * 2, TimeUnit.MILLISECONDS); - assertNotEquals("AsyncProcess did not submit the work time", 0, endTime.get()); - assertTrue("AsyncProcess did not delay long enough", endTime.get() - startTime >= backoffTime); + assertNotEquals(0, endTime.get(), "AsyncProcess did not submit the work time"); + assertTrue(endTime.get() - startTime >= backoffTime, "AsyncProcess did not delay long enough"); } @Test @@ -173,7 +173,7 @@ public void testMutateRowStats() throws IOException { mutateRow(mutations); ServerStatisticTracker stats = getStatisticsTracker(); - assertNotNull("No stats configured for the client!", stats); + assertNotNull(stats, "No stats configured for the client!"); // get the names so we can query the stats ServerName server = rs.getServerName(); byte[] regionName = region.getRegionInfo().getRegionName(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java index 2b6b3d017fcb..779404dcb207 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.HashSet; import java.util.Set; @@ -27,19 +27,19 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartTestingClusterOption; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNameTestRule; +import org.apache.hadoop.hbase.TableNameTestExtension; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil; import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore; -import org.junit.AfterClass; -import org.junit.Rule; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.extension.RegisterExtension; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Base class for testing the scenarios where replicas are enabled for the meta table. */ -public class MetaWithReplicasTestBase { +public abstract class MetaWithReplicasTestBase { private static final Logger LOG = LoggerFactory.getLogger(MetaWithReplicasTestBase.class); @@ -47,8 +47,8 @@ public class MetaWithReplicasTestBase { protected static final int REGIONSERVERS_COUNT = 3; - @Rule - public TableNameTestRule name = new TableNameTestRule(); + @RegisterExtension + protected TableNameTestExtension name = new TableNameTestExtension(); protected static void startCluster() throws Exception { TEST_UTIL.getConfiguration().setInt("zookeeper.session.timeout", 30000); @@ -78,7 +78,7 @@ protected static void startCluster() throws Exception { // to fail ... sometimes. if (sns.size() == 1) { int count = TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size(); - assertTrue("count=" + count, count == REGIONSERVERS_COUNT); + assertTrue(count == REGIONSERVERS_COUNT, "count=" + count); LOG.warn("All hbase:meta replicas are on the one server; moving hbase:meta: " + sns); int metaServerIndex = TEST_UTIL.getHBaseCluster().getServerWithMeta(); int newServerIndex = metaServerIndex; @@ -99,7 +99,7 @@ protected static void startCluster() throws Exception { LOG.debug("All meta replicas assigned"); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java index a98ae217e3c2..ae8df5c0f1ef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.ArrayList; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotCloneIndependenceTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotCloneIndependenceTestBase.java new file mode 100644 index 000000000000..dbde68a99966 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotCloneIndependenceTestBase.java @@ -0,0 +1,365 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.List; +import java.util.regex.Pattern; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; +import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Threads; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Test to verify that the cloned table is independent of the table from which it was cloned + */ +public abstract class SnapshotCloneIndependenceTestBase { + + private static final Logger LOG = + LoggerFactory.getLogger(SnapshotCloneIndependenceTestBase.class); + + protected static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + + protected static final int NUM_RS = 2; + private static final String TEST_FAM_STR = "fam"; + protected static final byte[] TEST_FAM = Bytes.toBytes(TEST_FAM_STR); + private static final int CLEANER_INTERVAL = 100; + + private FileSystem fs; + private Path rootDir; + private Admin admin; + private TableName originalTableName; + private Table originalTable; + private TableName cloneTableName; + private int countOriginalTable; + String snapshotNameAsString; + String snapshotName; + + protected static void setupConf(Configuration conf) { + // Up the handlers; this test needs more than usual. + conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 15); + // enable snapshot support + conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); + // change the flush size to a small amount, regulating number of store files + conf.setInt("hbase.hregion.memstore.flush.size", 25000); + // so make sure we get a compaction when doing a load, but keep around + // some files in the store + conf.setInt("hbase.hstore.compaction.min", 10); + conf.setInt("hbase.hstore.compactionThreshold", 10); + // block writes if we get to 12 store files + conf.setInt("hbase.hstore.blockingStoreFiles", 12); + conf.setInt("hbase.regionserver.msginterval", 100); + conf.setBoolean("hbase.master.enabletable.roundrobin", true); + // Avoid potentially aggressive splitting which would cause snapshot to fail + conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, + ConstantSizeRegionSplitPolicy.class.getName()); + // Execute cleaner frequently to induce failures + conf.setInt("hbase.master.cleaner.interval", CLEANER_INTERVAL); + conf.setInt("hbase.master.hfilecleaner.plugins.snapshot.period", CLEANER_INTERVAL); + // Effectively disable TimeToLiveHFileCleaner. Don't want to fully disable it because that + // will even trigger races between creating the directory containing back references and + // the back reference itself. + conf.setInt("hbase.master.hfilecleaner.ttl", CLEANER_INTERVAL); + } + + @BeforeEach + public void setup(TestInfo testInfo) throws Exception { + fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); + rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); + + admin = UTIL.getAdmin(); + originalTableName = TableName.valueOf("test" + testInfo.getTestMethod().get().getName()); + cloneTableName = TableName.valueOf("test-clone-" + originalTableName); + snapshotNameAsString = "snapshot_" + originalTableName; + snapshotName = snapshotNameAsString; + + originalTable = createTable(originalTableName, TEST_FAM); + loadData(originalTable, TEST_FAM); + countOriginalTable = countRows(originalTable); + System.out.println("Original table has: " + countOriginalTable + " rows"); + } + + @AfterEach + public void tearDown() throws Exception { + UTIL.deleteTable(originalTableName); + UTIL.deleteTable(cloneTableName); + SnapshotTestingUtils.deleteAllSnapshots(UTIL.getAdmin()); + SnapshotTestingUtils.deleteArchiveDirectory(UTIL); + } + + @AfterAll + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + /** + * Verify that adding data to the cloned table will not affect the original, and vice-versa when + * it is taken as an online snapshot. + */ + @Test + public void testOnlineSnapshotAppendIndependent() throws Exception { + createAndCloneSnapshot(true); + runTestSnapshotAppendIndependent(); + } + + /** + * Verify that adding data to the cloned table will not affect the original, and vice-versa when + * it is taken as an offline snapshot. + */ + @Test + public void testOfflineSnapshotAppendIndependent() throws Exception { + createAndCloneSnapshot(false); + runTestSnapshotAppendIndependent(); + } + + /** + * Verify that adding metadata to the cloned table will not affect the original, and vice-versa + * when it is taken as an online snapshot. + */ + @Test + public void testOnlineSnapshotMetadataChangesIndependent() throws Exception { + createAndCloneSnapshot(true); + runTestSnapshotMetadataChangesIndependent(); + } + + /** + * Verify that adding netadata to the cloned table will not affect the original, and vice-versa + * when is taken as an online snapshot. + */ + @Test + public void testOfflineSnapshotMetadataChangesIndependent() throws Exception { + createAndCloneSnapshot(false); + runTestSnapshotMetadataChangesIndependent(); + } + + /** + * Verify that region operations, in this case splitting a region, are independent between the + * cloned table and the original. + */ + @Test + public void testOfflineSnapshotRegionOperationsIndependent() throws Exception { + createAndCloneSnapshot(false); + runTestRegionOperationsIndependent(); + } + + /** + * Verify that region operations, in this case splitting a region, are independent between the + * cloned table and the original. + */ + @Test + public void testOnlineSnapshotRegionOperationsIndependent() throws Exception { + createAndCloneSnapshot(true); + runTestRegionOperationsIndependent(); + } + + @Test + public void testOfflineSnapshotDeleteIndependent() throws Exception { + createAndCloneSnapshot(false); + runTestSnapshotDeleteIndependent(); + } + + @Test + public void testOnlineSnapshotDeleteIndependent() throws Exception { + createAndCloneSnapshot(true); + runTestSnapshotDeleteIndependent(); + } + + private static void waitOnSplit(Connection c, final Table t, int originalCount) throws Exception { + for (int i = 0; i < 200; i++) { + Threads.sleepWithoutInterrupt(500); + try (RegionLocator locator = c.getRegionLocator(t.getName())) { + if (locator.getAllRegionLocations().size() > originalCount) { + return; + } + } + } + throw new Exception("Split did not increase the number of regions"); + } + + /** + * Takes the snapshot of originalTable and clones the snapshot to another tables. If + * {@code online} is false, the original table is disabled during taking snapshot, so also enables + * it again. + * @param online - Whether the table is online or not during the snapshot + */ + private void createAndCloneSnapshot(boolean online) throws Exception { + SnapshotTestingUtils.createSnapshotAndValidate(admin, originalTableName, TEST_FAM_STR, + snapshotNameAsString, rootDir, fs, online); + + // If offline, enable the table disabled by snapshot testing util. + if (!online) { + admin.enableTable(originalTableName); + UTIL.waitTableAvailable(originalTableName); + } + + admin.cloneSnapshot(snapshotName, cloneTableName); + UTIL.waitUntilAllRegionsAssigned(cloneTableName); + } + + /** + * Verify that adding data to original table or clone table doesn't affect other table. + */ + private void runTestSnapshotAppendIndependent() throws Exception { + try (Table clonedTable = UTIL.getConnection().getTable(cloneTableName)) { + final int clonedTableRowCount = countRows(clonedTable); + + assertEquals(countOriginalTable, clonedTableRowCount, + "The line counts of original and cloned tables do not match after clone. "); + + // Attempt to add data to the test + Put p = new Put(Bytes.toBytes("new-row-" + EnvironmentEdgeManager.currentTime())); + p.addColumn(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString")); + originalTable.put(p); + + // Verify that the new row is not in the restored table + assertEquals(countOriginalTable + 1, countRows(originalTable), + "The row count of the original table was not modified by the put"); + assertEquals(clonedTableRowCount, countRows(clonedTable), + "The row count of the cloned table changed as a result of addition to the original"); + + Put p2 = new Put(Bytes.toBytes("new-row-" + EnvironmentEdgeManager.currentTime())); + p2.addColumn(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString")); + clonedTable.put(p2); + + // Verify that the row is not added to the original table. + assertEquals(countOriginalTable + 1, countRows(originalTable), + "The row count of the original table was modified by the put to the clone"); + assertEquals(clonedTableRowCount + 1, countRows(clonedTable), + "The row count of the cloned table was not modified by the put"); + } + } + + /** + * Do a split, and verify that this only affects one table + */ + private void runTestRegionOperationsIndependent() throws Exception { + // Verify that region information is the same pre-split + UTIL.getConnection().clearRegionLocationCache(); + List originalTableHRegions = admin.getRegions(originalTableName); + + final int originalRegionCount = originalTableHRegions.size(); + final int cloneTableRegionCount = admin.getRegions(cloneTableName).size(); + assertEquals(originalRegionCount, cloneTableRegionCount, + "The number of regions in the cloned table is different than in the original table."); + + // Split a region on the parent table + admin.splitRegionAsync(originalTableHRegions.get(0).getRegionName()).get(); + waitOnSplit(UTIL.getConnection(), originalTable, originalRegionCount); + + // Verify that the cloned table region is not split + final int cloneTableRegionCount2 = admin.getRegions(cloneTableName).size(); + assertEquals(cloneTableRegionCount, cloneTableRegionCount2, + "The number of regions in the cloned table changed though none of its regions were split."); + } + + /** + * Add metadata, and verify that this only affects one table + */ + private void runTestSnapshotMetadataChangesIndependent() throws Exception { + // Add a new column family to the original table + byte[] TEST_FAM_2 = Bytes.toBytes("fam2"); + ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.of(TEST_FAM_2); + + admin.disableTable(originalTableName); + admin.addColumnFamily(originalTableName, familyDescriptor); + + // Verify that it is not in the snapshot + admin.enableTable(originalTableName); + UTIL.waitTableAvailable(originalTableName); + + // get a description of the cloned table + // get a list of its families + // assert that the family is there + TableDescriptor originalTableDescriptor = originalTable.getDescriptor(); + TableDescriptor clonedTableDescriptor = admin.getDescriptor(cloneTableName); + + assertTrue(originalTableDescriptor.hasColumnFamily(TEST_FAM), + "The original family was not found. There is something wrong. "); + assertTrue(clonedTableDescriptor.hasColumnFamily(TEST_FAM), + "The original family was not found in the clone. There is something wrong. "); + + assertTrue(originalTableDescriptor.hasColumnFamily(TEST_FAM_2), + "The new family was not found. "); + assertTrue(!clonedTableDescriptor.hasColumnFamily(TEST_FAM_2), + "The new family was not found. "); + } + + /** + * Verify that deleting the snapshot does not affect either table. + */ + private void runTestSnapshotDeleteIndependent() throws Exception { + // Ensure the original table does not reference the HFiles anymore + admin.majorCompact(originalTableName); + + // Deleting the snapshot used to break the cloned table by deleting in-use HFiles + admin.deleteSnapshot(snapshotName); + + // Wait for cleaner run and DFS heartbeats so that anything that is deletable is fully deleted + Pattern pattern = Pattern.compile(snapshotNameAsString); + do { + Thread.sleep(5000); + } while (!admin.listSnapshots(pattern).isEmpty()); + + try (Table original = UTIL.getConnection().getTable(originalTableName)) { + try (Table clonedTable = UTIL.getConnection().getTable(cloneTableName)) { + // Verify that all regions of both tables are readable + final int origTableRowCount = countRows(original); + final int clonedTableRowCount = countRows(clonedTable); + assertEquals(origTableRowCount, clonedTableRowCount); + } + } + } + + protected Table createTable(final TableName table, byte[] family) throws Exception { + Table t = UTIL.createTable(table, family); + // Wait for everything to be ready with the table + UTIL.waitUntilAllRegionsAssigned(table); + + // At this point the table should be good to go. + return t; + } + + public void loadData(final Table table, byte[]... families) throws Exception { + UTIL.loadTable(originalTable, TEST_FAM); + } + + protected int countRows(final Table table, final byte[]... families) throws Exception { + return UTIL.countRows(table, families); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotFromClientTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotFromClientTestBase.java new file mode 100644 index 000000000000..3c1ed7590ad5 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotFromClientTestBase.java @@ -0,0 +1,459 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Pattern; +import java.util.stream.Stream; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNameTestExtension; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; +import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; +import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException; +import org.apache.hadoop.hbase.snapshot.SnapshotManifestV1; +import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.junit.jupiter.params.provider.Arguments; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; + +/** + * Test create/using/deleting snapshots from the client + *

+ * This is an end-to-end test for the snapshot utility + */ +public abstract class SnapshotFromClientTestBase { + + private static final Logger LOG = LoggerFactory.getLogger(SnapshotFromClientTestBase.class); + + protected static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + protected static final int NUM_RS = 2; + protected static final String STRING_TABLE_NAME = "test"; + protected static final byte[] TEST_FAM = Bytes.toBytes("fam"); + protected static final TableName TABLE_NAME = TableName.valueOf(STRING_TABLE_NAME); + private static final Pattern MATCH_ALL = Pattern.compile(".*"); + + @RegisterExtension + protected TableNameTestExtension name = new TableNameTestExtension(); + + protected final StoreFileTrackerFactory.Trackers trackerImpl; + + public SnapshotFromClientTestBase(StoreFileTrackerFactory.Trackers trackerImpl) { + this.trackerImpl = trackerImpl; + } + + public static Stream parameters() { + return Stream.of(Arguments.of(StoreFileTrackerFactory.Trackers.DEFAULT), + Arguments.of(StoreFileTrackerFactory.Trackers.FILE)); + } + + protected static void setupConf(Configuration conf) { + // disable the ui + conf.setInt("hbase.regionsever.info.port", -1); + // change the flush size to a small amount, regulating number of store files + conf.setInt("hbase.hregion.memstore.flush.size", 25000); + // so make sure we get a compaction when doing a load, but keep around some + // files in the store + conf.setInt("hbase.hstore.compaction.min", 10); + conf.setInt("hbase.hstore.compactionThreshold", 10); + // block writes if we get to 12 store files + conf.setInt("hbase.hstore.blockingStoreFiles", 12); + // Enable snapshot + conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); + conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, + ConstantSizeRegionSplitPolicy.class.getName()); + } + + @BeforeEach + public void setup() throws Exception { + createTable(); + } + + protected void createTable() throws Exception { + TableDescriptor htd = + TableDescriptorBuilder.newBuilder(TABLE_NAME).setRegionReplication(getNumReplicas()) + .setValue(StoreFileTrackerFactory.TRACKER_IMPL, trackerImpl.name()).build(); + UTIL.createTable(htd, new byte[][] { TEST_FAM }, null); + } + + protected int getNumReplicas() { + return 1; + } + + @AfterEach + public void tearDown() throws Exception { + UTIL.deleteTable(TABLE_NAME); + SnapshotTestingUtils.deleteAllSnapshots(UTIL.getAdmin()); + SnapshotTestingUtils.deleteArchiveDirectory(UTIL); + } + + @AfterAll + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + /** + * Test snapshotting not allowed hbase:meta and -ROOT- + */ + @TestTemplate + public void testMetaTablesSnapshot() throws Exception { + assertThrows(IllegalArgumentException.class, + () -> UTIL.getAdmin().snapshot("metaSnapshot", TableName.META_TABLE_NAME)); + } + + /** + * Test HBaseAdmin#deleteSnapshots(String) which deletes snapshots whose names match the parameter + */ + @TestTemplate + public void testSnapshotDeletionWithRegex() throws Exception { + Admin admin = UTIL.getAdmin(); + // make sure we don't fail on listing snapshots + SnapshotTestingUtils.assertNoSnapshots(admin); + + // put some stuff in the table + Table table = UTIL.getConnection().getTable(TABLE_NAME); + UTIL.loadTable(table, TEST_FAM); + table.close(); + + String snapshot1 = "TableSnapshot1"; + admin.snapshot(snapshot1, TABLE_NAME); + LOG.debug("Snapshot1 completed."); + + String snapshot2 = "TableSnapshot2"; + admin.snapshot(snapshot2, TABLE_NAME); + LOG.debug("Snapshot2 completed."); + + String snapshot3 = "3rdTableSnapshot"; + admin.snapshot(snapshot3, TABLE_NAME); + LOG.debug(snapshot3 + " completed."); + + // delete the first two snapshots + admin.deleteSnapshots(Pattern.compile("TableSnapshot.*")); + List snapshots = admin.listSnapshots(); + assertEquals(1, snapshots.size()); + assertEquals(snapshot3, snapshots.get(0).getName()); + + admin.deleteSnapshot(snapshot3); + admin.close(); + } + + /** + * Test snapshotting a table that is offline + */ + @TestTemplate + public void testOfflineTableSnapshot() throws Exception { + Admin admin = UTIL.getAdmin(); + // make sure we don't fail on listing snapshots + SnapshotTestingUtils.assertNoSnapshots(admin); + + // put some stuff in the table + Table table = UTIL.getConnection().getTable(TABLE_NAME); + UTIL.loadTable(table, TEST_FAM, false); + + LOG.debug("FS state before disable:"); + CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), + CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); + // XXX if this is flakey, might want to consider using the async version and looping as + // disableTable can succeed and still timeout. + admin.disableTable(TABLE_NAME); + + LOG.debug("FS state before snapshot:"); + CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), + CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); + + // take a snapshot of the disabled table + final String SNAPSHOT_NAME = "offlineTableSnapshot"; + String snapshot = SNAPSHOT_NAME; + + admin.snapshot(new SnapshotDescription(SNAPSHOT_NAME, TABLE_NAME, SnapshotType.DISABLED, null, + -1, SnapshotManifestV1.DESCRIPTOR_VERSION, null)); + LOG.debug("Snapshot completed."); + + // make sure we have the snapshot + List snapshots = + SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME); + + // make sure its a valid snapshot + FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); + Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); + LOG.debug("FS state after snapshot:"); + CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), + CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); + SnapshotTestingUtils.confirmSnapshotValid( + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM, rootDir, + admin, fs); + + admin.deleteSnapshot(snapshot); + snapshots = admin.listSnapshots(); + SnapshotTestingUtils.assertNoSnapshots(admin); + } + + @TestTemplate + public void testSnapshotFailsOnNonExistantTable() throws Exception { + Admin admin = UTIL.getAdmin(); + // make sure we don't fail on listing snapshots + SnapshotTestingUtils.assertNoSnapshots(admin); + String tableName = "_not_a_table"; + + // make sure the table doesn't exist + boolean fail = false; + do { + try { + admin.getDescriptor(TableName.valueOf(tableName)); + fail = true; + LOG.error("Table:" + tableName + " already exists, checking a new name"); + tableName = tableName + "!"; + } catch (TableNotFoundException e) { + fail = false; + } + } while (fail); + + // snapshot the non-existant table + try { + admin.snapshot("fail", TableName.valueOf(tableName)); + fail("Snapshot succeeded even though there is not table."); + } catch (SnapshotCreationException e) { + LOG.info("Correctly failed to snapshot a non-existant table:" + e.getMessage()); + } + } + + @TestTemplate + public void testOfflineTableSnapshotWithEmptyRegions() throws Exception { + // test with an empty table with one region + + Admin admin = UTIL.getAdmin(); + // make sure we don't fail on listing snapshots + SnapshotTestingUtils.assertNoSnapshots(admin); + + LOG.debug("FS state before disable:"); + CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), + CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); + admin.disableTable(TABLE_NAME); + + LOG.debug("FS state before snapshot:"); + CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), + CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); + + // take a snapshot of the disabled table + String snapshot = "testOfflineTableSnapshotWithEmptyRegions"; + admin.snapshot(snapshot, TABLE_NAME); + LOG.debug("Snapshot completed."); + + // make sure we have the snapshot + List snapshots = + SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME); + + // make sure its a valid snapshot + FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); + Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); + LOG.debug("FS state after snapshot:"); + CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), + CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); + + List emptyCfs = Lists.newArrayList(TEST_FAM); // no file in the region + List nonEmptyCfs = Lists.newArrayList(); + SnapshotTestingUtils.confirmSnapshotValid( + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, nonEmptyCfs, + emptyCfs, rootDir, admin, fs); + + admin.deleteSnapshot(snapshot); + snapshots = admin.listSnapshots(); + SnapshotTestingUtils.assertNoSnapshots(admin); + } + + @TestTemplate + public void testListTableSnapshots() throws Exception { + Admin admin = null; + final TableName tableName = name.getTableName(); + try { + admin = UTIL.getAdmin(); + + TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName).build(); + UTIL.createTable(htd, new byte[][] { TEST_FAM }, UTIL.getConfiguration()); + + String table1Snapshot1 = "Table1Snapshot1"; + admin.snapshot(table1Snapshot1, TABLE_NAME); + LOG.debug("Snapshot1 completed."); + + String table1Snapshot2 = "Table1Snapshot2"; + admin.snapshot(table1Snapshot2, TABLE_NAME); + LOG.debug("Snapshot2 completed."); + + String table2Snapshot1 = "Table2Snapshot1"; + admin.snapshot(table2Snapshot1, tableName); + LOG.debug(table2Snapshot1 + " completed."); + + List listTableSnapshots = + admin.listTableSnapshots(Pattern.compile("test.*"), MATCH_ALL); + List listTableSnapshotNames = new ArrayList<>(); + assertEquals(3, listTableSnapshots.size()); + for (SnapshotDescription s : listTableSnapshots) { + listTableSnapshotNames.add(s.getName()); + } + assertTrue(listTableSnapshotNames.contains(table1Snapshot1)); + assertTrue(listTableSnapshotNames.contains(table1Snapshot2)); + assertTrue(listTableSnapshotNames.contains(table2Snapshot1)); + } finally { + if (admin != null) { + try { + admin.deleteSnapshots(Pattern.compile("Table.*")); + } catch (SnapshotDoesNotExistException ignore) { + } + if (admin.tableExists(tableName)) { + UTIL.deleteTable(tableName); + } + admin.close(); + } + } + } + + @TestTemplate + public void testListTableSnapshotsWithRegex() throws Exception { + Admin admin = null; + try { + admin = UTIL.getAdmin(); + + String table1Snapshot1 = "Table1Snapshot1"; + admin.snapshot(table1Snapshot1, TABLE_NAME); + LOG.debug("Snapshot1 completed."); + + String table1Snapshot2 = "Table1Snapshot2"; + admin.snapshot(table1Snapshot2, TABLE_NAME); + LOG.debug("Snapshot2 completed."); + + String table2Snapshot1 = "Table2Snapshot1"; + admin.snapshot(table2Snapshot1, TABLE_NAME); + LOG.debug(table2Snapshot1 + " completed."); + + List listTableSnapshots = + admin.listTableSnapshots(Pattern.compile("test.*"), Pattern.compile("Table1.*")); + List listTableSnapshotNames = new ArrayList<>(); + assertEquals(2, listTableSnapshots.size()); + for (SnapshotDescription s : listTableSnapshots) { + listTableSnapshotNames.add(s.getName()); + } + assertTrue(listTableSnapshotNames.contains(table1Snapshot1)); + assertTrue(listTableSnapshotNames.contains(table1Snapshot2)); + assertFalse(listTableSnapshotNames.contains(table2Snapshot1)); + } finally { + if (admin != null) { + try { + admin.deleteSnapshots(Pattern.compile("Table.*")); + } catch (SnapshotDoesNotExistException ignore) { + } + admin.close(); + } + } + } + + @TestTemplate + public void testDeleteTableSnapshots() throws Exception { + Admin admin = null; + final TableName tableName = name.getTableName(); + try { + admin = UTIL.getAdmin(); + + TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName).build(); + UTIL.createTable(htd, new byte[][] { TEST_FAM }, UTIL.getConfiguration()); + + String table1Snapshot1 = "Table1Snapshot1"; + admin.snapshot(table1Snapshot1, TABLE_NAME); + LOG.debug("Snapshot1 completed."); + + String table1Snapshot2 = "Table1Snapshot2"; + admin.snapshot(table1Snapshot2, TABLE_NAME); + LOG.debug("Snapshot2 completed."); + + String table2Snapshot1 = "Table2Snapshot1"; + admin.snapshot(table2Snapshot1, tableName); + LOG.debug(table2Snapshot1 + " completed."); + + Pattern tableNamePattern = Pattern.compile("test.*"); + admin.deleteTableSnapshots(tableNamePattern, MATCH_ALL); + assertEquals(0, admin.listTableSnapshots(tableNamePattern, MATCH_ALL).size()); + } finally { + if (admin != null) { + if (admin.tableExists(tableName)) { + UTIL.deleteTable(tableName); + } + admin.close(); + } + } + } + + @TestTemplate + public void testDeleteTableSnapshotsWithRegex() throws Exception { + Admin admin = null; + Pattern tableNamePattern = Pattern.compile("test.*"); + try { + admin = UTIL.getAdmin(); + + String table1Snapshot1 = "Table1Snapshot1"; + admin.snapshot(table1Snapshot1, TABLE_NAME); + LOG.debug("Snapshot1 completed."); + + String table1Snapshot2 = "Table1Snapshot2"; + admin.snapshot(table1Snapshot2, TABLE_NAME); + LOG.debug("Snapshot2 completed."); + + String table2Snapshot1 = "Table2Snapshot1"; + admin.snapshot(table2Snapshot1, TABLE_NAME); + LOG.debug(table2Snapshot1 + " completed."); + + admin.deleteTableSnapshots(tableNamePattern, Pattern.compile("Table1.*")); + assertEquals(1, admin.listTableSnapshots(tableNamePattern, MATCH_ALL).size()); + } finally { + if (admin != null) { + try { + admin.deleteTableSnapshots(tableNamePattern, MATCH_ALL); + } catch (SnapshotDoesNotExistException ignore) { + } + admin.close(); + } + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotTemporaryDirectoryTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotTemporaryDirectoryTestBase.java new file mode 100644 index 000000000000..9dad3e4b0a79 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotTemporaryDirectoryTestBase.java @@ -0,0 +1,452 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.UUID; +import java.util.regex.Pattern; +import java.util.stream.Stream; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; +import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException; +import org.apache.hadoop.hbase.snapshot.SnapshotManifestV1; +import org.apache.hadoop.hbase.snapshot.SnapshotManifestV2; +import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.params.provider.Arguments; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; + +/** + * This class tests that the use of a temporary snapshot directory supports snapshot functionality + * while the temporary directory is on a different file system than the root directory + *

+ * This is an end-to-end test for the snapshot utility + */ +public abstract class SnapshotTemporaryDirectoryTestBase { + + public static Stream parameters() { + return Stream.of(Arguments.of(SnapshotManifestV1.DESCRIPTOR_VERSION), + Arguments.of(SnapshotManifestV2.DESCRIPTOR_VERSION)); + } + + private final int manifestVersion; + + public SnapshotTemporaryDirectoryTestBase(int manifestVersion) { + this.manifestVersion = manifestVersion; + } + + private static final Logger LOG = + LoggerFactory.getLogger(SnapshotTemporaryDirectoryTestBase.class); + protected static final int NUM_RS = 2; + protected static String TEMP_DIR = + Paths.get("").toAbsolutePath().toString() + Path.SEPARATOR + UUID.randomUUID().toString(); + + protected static Admin admin; + protected static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + protected static final String STRING_TABLE_NAME = "test"; + protected static final byte[] TEST_FAM = Bytes.toBytes("fam"); + protected static final TableName TABLE_NAME = TableName.valueOf(STRING_TABLE_NAME); + + protected static void setupConf(Configuration conf) throws IOException { + // disable the ui + conf.setInt("hbase.regionsever.info.port", -1); + // change the flush size to a small amount, regulating number of store files + conf.setInt("hbase.hregion.memstore.flush.size", 25000); + // so make sure we get a compaction when doing a load, but keep around some + // files in the store + conf.setInt("hbase.hstore.compaction.min", 10); + conf.setInt("hbase.hstore.compactionThreshold", 10); + // block writes if we get to 12 store files + conf.setInt("hbase.hstore.blockingStoreFiles", 12); + // Enable snapshot + conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); + conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, + ConstantSizeRegionSplitPolicy.class.getName()); + conf.set(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR, + "file://" + new Path(TEMP_DIR, ".tmpDir").toUri()); + } + + @BeforeEach + public void setup() throws Exception { + TableDescriptor htd = + TableDescriptorBuilder.newBuilder(TABLE_NAME).setRegionReplication(getNumReplicas()).build(); + UTIL.createTable(htd, new byte[][] { TEST_FAM }, UTIL.getConfiguration()); + } + + protected int getNumReplicas() { + return 1; + } + + @AfterEach + public void tearDown() throws Exception { + UTIL.deleteTable(TABLE_NAME); + SnapshotTestingUtils.deleteAllSnapshots(UTIL.getAdmin()); + SnapshotTestingUtils.deleteArchiveDirectory(UTIL); + } + + @AfterAll + public static void cleanupTest() { + try { + UTIL.shutdownMiniCluster(); + FileUtils.deleteDirectory(new File(TEMP_DIR)); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @TestTemplate + public void testRestoreDisabledSnapshot() throws IOException, InterruptedException { + long tid = EnvironmentEdgeManager.currentTime(); + TableName tableName = TableName.valueOf("testtb-" + tid); + String emptySnapshot = "emptySnaptb-" + tid; + String snapshotName0 = "snaptb0-" + tid; + String snapshotName1 = "snaptb1-" + tid; + int snapshot0Rows; + int snapshot1Rows; + + // create Table and disable it + SnapshotTestingUtils.createTable(UTIL, tableName, getNumReplicas(), TEST_FAM); + admin.disableTable(tableName); + + // take an empty snapshot + takeSnapshot(tableName, emptySnapshot, true); + + // enable table and insert data + admin.enableTable(tableName); + SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM); + try (Table table = UTIL.getConnection().getTable(tableName)) { + snapshot0Rows = UTIL.countRows(table); + } + admin.disableTable(tableName); + + // take a snapshot + takeSnapshot(tableName, snapshotName0, true); + + // enable table and insert more data + admin.enableTable(tableName); + SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM); + try (Table table = UTIL.getConnection().getTable(tableName)) { + snapshot1Rows = UTIL.countRows(table); + } + + SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows); + admin.disableTable(tableName); + takeSnapshot(tableName, snapshotName1, true); + + // Restore from snapshot-0 + admin.restoreSnapshot(snapshotName0); + admin.enableTable(tableName); + SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot0Rows); + SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas()); + + // Restore from emptySnapshot + admin.disableTable(tableName); + admin.restoreSnapshot(emptySnapshot); + admin.enableTable(tableName); + SnapshotTestingUtils.verifyRowCount(UTIL, tableName, 0); + SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas()); + + // Restore from snapshot-1 + admin.disableTable(tableName); + admin.restoreSnapshot(snapshotName1); + admin.enableTable(tableName); + SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows); + SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas()); + + // Restore from snapshot-1 + UTIL.deleteTable(tableName); + admin.restoreSnapshot(snapshotName1); + SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows); + SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas()); + } + + @TestTemplate + public void testRestoreEnabledSnapshot() throws IOException, InterruptedException { + long tid = EnvironmentEdgeManager.currentTime(); + TableName tableName = TableName.valueOf("testtb-" + tid); + String emptySnapshot = "emptySnaptb-" + tid; + String snapshotName0 = "snaptb0-" + tid; + String snapshotName1 = "snaptb1-" + tid; + int snapshot0Rows; + int snapshot1Rows; + + // create Table + SnapshotTestingUtils.createTable(UTIL, tableName, getNumReplicas(), TEST_FAM); + + // take an empty snapshot + takeSnapshot(tableName, emptySnapshot, false); + + // Insert data + SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM); + try (Table table = UTIL.getConnection().getTable(tableName)) { + snapshot0Rows = UTIL.countRows(table); + } + + // take a snapshot + takeSnapshot(tableName, snapshotName0, false); + + // Insert more data + SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM); + try (Table table = UTIL.getConnection().getTable(tableName)) { + snapshot1Rows = UTIL.countRows(table); + } + + SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows); + takeSnapshot(tableName, snapshotName1, false); + + // Restore from snapshot-0 + admin.disableTable(tableName); + admin.restoreSnapshot(snapshotName0); + admin.enableTable(tableName); + SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot0Rows); + SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas()); + + // Restore from emptySnapshot + admin.disableTable(tableName); + admin.restoreSnapshot(emptySnapshot); + admin.enableTable(tableName); + SnapshotTestingUtils.verifyRowCount(UTIL, tableName, 0); + SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas()); + + // Restore from snapshot-1 + admin.disableTable(tableName); + admin.restoreSnapshot(snapshotName1); + admin.enableTable(tableName); + SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows); + SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas()); + + // Restore from snapshot-1 + UTIL.deleteTable(tableName); + admin.restoreSnapshot(snapshotName1); + SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows); + SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas()); + } + + /** + * Test snapshotting a table that is offline + * @throws Exception if snapshot does not complete successfully + */ + @TestTemplate + public void testOfflineTableSnapshot() throws Exception { + Admin admin = UTIL.getAdmin(); + // make sure we don't fail on listing snapshots + SnapshotTestingUtils.assertNoSnapshots(admin); + + // put some stuff in the table + Table table = UTIL.getConnection().getTable(TABLE_NAME); + UTIL.loadTable(table, TEST_FAM, false); + + LOG.debug("FS state before disable:"); + CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), + CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); + // XXX if this is flakey, might want to consider using the async version and looping as + // disableTable can succeed and still timeout. + admin.disableTable(TABLE_NAME); + + LOG.debug("FS state before snapshot:"); + CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), + CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); + + // take a snapshot of the disabled table + final String SNAPSHOT_NAME = "offlineTableSnapshot"; + String snapshot = SNAPSHOT_NAME; + takeSnapshot(TABLE_NAME, SNAPSHOT_NAME, true); + LOG.debug("Snapshot completed."); + + // make sure we have the snapshot + List snapshots = + SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME); + + // make sure its a valid snapshot + FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); + Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); + LOG.debug("FS state after snapshot:"); + CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), + CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); + + SnapshotTestingUtils.confirmSnapshotValid( + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM, rootDir, + admin, fs); + + admin.deleteSnapshot(snapshot); + SnapshotTestingUtils.assertNoSnapshots(admin); + } + + /** + * Tests that snapshot has correct contents by taking snapshot, cloning it, then affirming the + * contents of the original and cloned table match + * @throws Exception if snapshot does not complete successfully + */ + @TestTemplate + public void testSnapshotCloneContents() throws Exception { + // make sure we don't fail on listing snapshots + SnapshotTestingUtils.assertNoSnapshots(admin); + + // put some stuff in the table + Table table = UTIL.getConnection().getTable(TABLE_NAME); + UTIL.loadTable(table, TEST_FAM); + table.close(); + + String snapshot1 = "TableSnapshot1"; + takeSnapshot(TABLE_NAME, snapshot1, false); + LOG.debug("Snapshot1 completed."); + + TableName clone = TableName.valueOf("Table1Clone"); + admin.cloneSnapshot(snapshot1, clone, false); + + Scan original = new Scan(); + Scan cloned = new Scan(); + ResultScanner originalScan = admin.getConnection().getTable(TABLE_NAME).getScanner(original); + ResultScanner clonedScan = + admin.getConnection().getTable(TableName.valueOf("Table1Clone")).getScanner(cloned); + + Iterator i = originalScan.iterator(); + Iterator i2 = clonedScan.iterator(); + assertTrue(i.hasNext()); + while (i.hasNext()) { + assertTrue(i2.hasNext()); + assertEquals(Bytes.toString(i.next().getValue(TEST_FAM, new byte[] {})), + Bytes.toString(i2.next().getValue(TEST_FAM, new byte[] {}))); + } + assertFalse(i2.hasNext()); + admin.deleteSnapshot(snapshot1); + UTIL.deleteTable(clone); + admin.close(); + } + + @TestTemplate + public void testOfflineTableSnapshotWithEmptyRegion() throws Exception { + // test with an empty table with one region + + // make sure we don't fail on listing snapshots + SnapshotTestingUtils.assertNoSnapshots(admin); + + LOG.debug("FS state before disable:"); + CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), + CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); + admin.disableTable(TABLE_NAME); + + LOG.debug("FS state before snapshot:"); + CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), + CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); + + // take a snapshot of the disabled table + String snapshot = "testOfflineTableSnapshotWithEmptyRegion"; + takeSnapshot(TABLE_NAME, snapshot, true); + LOG.debug("Snapshot completed."); + + // make sure we have the snapshot + List snapshots = + SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME); + + // make sure its a valid snapshot + FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); + Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); + LOG.debug("FS state after snapshot:"); + CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), + CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); + + List emptyCfs = Lists.newArrayList(TEST_FAM); // no file in the region + List nonEmptyCfs = Lists.newArrayList(); + SnapshotTestingUtils.confirmSnapshotValid( + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, nonEmptyCfs, + emptyCfs, rootDir, admin, fs); + + admin.deleteSnapshot(snapshot); + SnapshotTestingUtils.assertNoSnapshots(admin); + } + + // Ensures that the snapshot is transferred to the proper completed snapshot directory + @TestTemplate + public void testEnsureTemporaryDirectoryTransfer() throws Exception { + Admin admin = UTIL.getAdmin(); + TableName tableName2 = TableName.valueOf("testListTableSnapshots"); + try { + TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName2).build(); + UTIL.createTable(htd, new byte[][] { TEST_FAM }, UTIL.getConfiguration()); + + String table1Snapshot1 = "Table1Snapshot1"; + takeSnapshot(TABLE_NAME, table1Snapshot1, false); + LOG.debug("Snapshot1 completed."); + + String table1Snapshot2 = "Table1Snapshot2"; + takeSnapshot(TABLE_NAME, table1Snapshot2, false); + LOG.debug("Snapshot2 completed."); + + String table2Snapshot1 = "Table2Snapshot1"; + takeSnapshot(TABLE_NAME, table2Snapshot1, false); + LOG.debug("Table2Snapshot1 completed."); + + List listTableSnapshots = + admin.listTableSnapshots(Pattern.compile("test.*"), Pattern.compile(".*")); + List listTableSnapshotNames = new ArrayList(); + assertEquals(3, listTableSnapshots.size()); + for (SnapshotDescription s : listTableSnapshots) { + listTableSnapshotNames.add(s.getName()); + } + assertTrue(listTableSnapshotNames.contains(table1Snapshot1)); + assertTrue(listTableSnapshotNames.contains(table1Snapshot2)); + assertTrue(listTableSnapshotNames.contains(table2Snapshot1)); + } finally { + try { + admin.deleteSnapshots(Pattern.compile("Table.*")); + } catch (SnapshotDoesNotExistException ignore) { + } + if (admin.tableExists(tableName2)) { + UTIL.deleteTable(tableName2); + } + admin.close(); + } + } + + private void takeSnapshot(TableName tableName, String snapshotName, boolean disabled) + throws IOException { + SnapshotType type = disabled ? SnapshotType.DISABLED : SnapshotType.FLUSH; + SnapshotDescription desc = + new SnapshotDescription(snapshotName, tableName, type, null, -1, manifestVersion, null); + admin.snapshot(desc); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotWithAclTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotWithAclTestBase.java index 72e0a1a265d9..f6acefe2b76a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotWithAclTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotWithAclTestBase.java @@ -17,11 +17,12 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.List; @@ -34,7 +35,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNameTestRule; +import org.apache.hadoop.hbase.TableNameTestExtension; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.security.User; @@ -48,19 +49,18 @@ import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException; import org.apache.hadoop.hbase.snapshot.SnapshotManifest; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; public abstract class SnapshotWithAclTestBase extends SecureTestUtil { - @Rule - public TableNameTestRule name = new TableNameTestRule(); + @RegisterExtension + protected TableNameTestExtension name = new TableNameTestExtension(); - private TableName TEST_TABLE = TableName.valueOf(TEST_UTIL.getRandomUUID().toString()); + private TableName TEST_TABLE = TableName.valueOf(HBaseTestingUtil.getRandomUUID().toString()); private static final int ROW_COUNT = 30000; @@ -118,7 +118,7 @@ public Object run() throws Exception { } } - @BeforeClass + @BeforeAll public static void setupBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); // Enable security @@ -145,7 +145,7 @@ public static void setupBeforeClass() throws Exception { grantGlobal(TEST_UTIL, USER_OWNER.getShortName(), Permission.Action.CREATE); } - @Before + @BeforeEach public void setUp() throws Exception { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TEST_TABLE) @@ -174,7 +174,7 @@ private void loadData() throws IOException { } } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @@ -186,7 +186,7 @@ private void verifyRows(TableName tableName) throws IOException { int rowCount = 0; while ((result = scanner.next()) != null) { byte[] value = result.getValue(TEST_FAMILY, TEST_QUALIFIER); - Assert.assertArrayEquals(value, Bytes.toBytes(rowCount++)); + assertArrayEquals(value, Bytes.toBytes(rowCount++)); } assertEquals(ROW_COUNT, rowCount); } @@ -210,11 +210,11 @@ public void testRestoreSnapshot() throws Exception { loadData(); verifyRows(TEST_TABLE); - String snapshotName1 = TEST_UTIL.getRandomUUID().toString(); + String snapshotName1 = HBaseTestingUtil.getRandomUUID().toString(); snapshot(snapshotName1, TEST_TABLE); // clone snapshot with restoreAcl true. - TableName tableName1 = TableName.valueOf(TEST_UTIL.getRandomUUID().toString()); + TableName tableName1 = TableName.valueOf(HBaseTestingUtil.getRandomUUID().toString()); cloneSnapshot(snapshotName1, tableName1, true); verifyRows(tableName1); verifyAllowed(new AccessReadAction(tableName1), USER_OWNER, USER_RO, USER_RW); @@ -223,7 +223,7 @@ public void testRestoreSnapshot() throws Exception { verifyDenied(new AccessWriteAction(tableName1), USER_RO, USER_NONE); // clone snapshot with restoreAcl false. - TableName tableName2 = TableName.valueOf(TEST_UTIL.getRandomUUID().toString()); + TableName tableName2 = TableName.valueOf(HBaseTestingUtil.getRandomUUID().toString()); cloneSnapshot(snapshotName1, tableName2, false); verifyRows(tableName2); verifyDenied(new AccessReadAction(tableName2), USER_OWNER); @@ -331,9 +331,9 @@ public void testCreateSnapshotWithNonExistingTable() throws Exception { try { // Create snapshot without creating table - assertThrows("Snapshot operation should fail, table doesn't exist", - SnapshotCreationException.class, - () -> TEST_UTIL.getAdmin().snapshot(snapshotName, tableName)); + assertThrows(SnapshotCreationException.class, + () -> TEST_UTIL.getAdmin().snapshot(snapshotName, tableName), + "Snapshot operation should fail, table doesn't exist"); // Create the table TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java index 68a841b7d671..f840a755cfc0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java @@ -19,13 +19,13 @@ import static org.apache.hadoop.hbase.HBaseTestingUtil.countRows; import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.ArrayList; @@ -33,7 +33,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; @@ -47,18 +46,15 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestAdmin extends TestAdminBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAdmin.class); - private static final Logger LOG = LoggerFactory.getLogger(TestAdmin.class); @Test @@ -75,23 +71,23 @@ public void testListTableDescriptors() throws IOException { public void testCreateTable() throws IOException { List tables = ADMIN.listTableDescriptors(); int numTables = tables.size(); - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); tables = ADMIN.listTableDescriptors(); assertEquals(numTables + 1, tables.size()); - assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster().getMaster() - .getTableStateManager().isTableState(tableName, TableState.State.ENABLED)); + assertTrue(TEST_UTIL.getHBaseCluster().getMaster().getTableStateManager() + .isTableState(tableName, TableState.State.ENABLED), "Table must be enabled."); assertEquals(TableState.State.ENABLED, getStateFromMeta(tableName)); } @Test public void testTruncateTable() throws IOException { - testTruncateTable(TableName.valueOf(name.getMethodName()), false); + testTruncateTable(TableName.valueOf(methodName), false); } @Test public void testTruncateTablePreservingSplits() throws IOException { - testTruncateTable(TableName.valueOf(name.getMethodName()), true); + testTruncateTable(TableName.valueOf(methodName), true); } private void testTruncateTable(final TableName tableName, boolean preserveSplits) @@ -128,13 +124,13 @@ private void testTruncateTable(final TableName tableName, boolean preserveSplits @Test public void testCreateTableNumberOfRegions() throws IOException, InterruptedException { - TableName table = TableName.valueOf(name.getMethodName()); + TableName table = TableName.valueOf(methodName); ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY); ADMIN.createTable(TableDescriptorBuilder.newBuilder(table).setColumnFamily(cfd).build()); List regions; try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(table)) { regions = l.getAllRegionLocations(); - assertEquals("Table should have only 1 region", 1, regions.size()); + assertEquals(1, regions.size(), "Table should have only 1 region"); } TableName table2 = TableName.valueOf(table.getNameAsString() + "_2"); @@ -142,7 +138,7 @@ public void testCreateTableNumberOfRegions() throws IOException, InterruptedExce new byte[][] { new byte[] { 42 } }); try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(table2)) { regions = l.getAllRegionLocations(); - assertEquals("Table should have only 2 region", 2, regions.size()); + assertEquals(2, regions.size(), "Table should have only 2 region"); } TableName table3 = TableName.valueOf(table.getNameAsString() + "_3"); @@ -150,7 +146,7 @@ public void testCreateTableNumberOfRegions() throws IOException, InterruptedExce Bytes.toBytes("a"), Bytes.toBytes("z"), 3); try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(table3)) { regions = l.getAllRegionLocations(); - assertEquals("Table should have only 3 region", 3, regions.size()); + assertEquals(3, regions.size(), "Table should have only 3 region"); } TableName table4 = TableName.valueOf(table.getNameAsString() + "_4"); @@ -167,13 +163,13 @@ public void testCreateTableNumberOfRegions() throws IOException, InterruptedExce new byte[] { 1 }, new byte[] { 127 }, 16); try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(table5)) { regions = l.getAllRegionLocations(); - assertEquals("Table should have 16 region", 16, regions.size()); + assertEquals(16, regions.size(), "Table should have 16 region"); } } @Test public void testCreateTableWithRegions() throws IOException, InterruptedException { - TableName table = TableName.valueOf(name.getMethodName()); + TableName table = TableName.valueOf(methodName); ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY); byte[][] splitKeys = { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, new byte[] { 3, 3, 3 }, new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, new byte[] { 6, 6, 6 }, @@ -184,7 +180,7 @@ public void testCreateTableWithRegions() throws IOException, InterruptedExceptio splitKeys); boolean tableAvailable = ADMIN.isTableAvailable(table); - assertTrue("Table should be created with splitKyes + 1 rows in META", tableAvailable); + assertTrue(tableAvailable, "Table should be created with splitKyes + 1 rows in META"); List regions; Iterator hris; @@ -192,9 +188,8 @@ public void testCreateTableWithRegions() throws IOException, InterruptedExceptio try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(table)) { regions = l.getAllRegionLocations(); - assertEquals( - "Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(), - expectedRegions, regions.size()); + assertEquals(expectedRegions, regions.size(), + "Tried to create " + expectedRegions + " regions " + "but only found " + regions.size()); System.err.println("Found " + regions.size() + " regions"); hris = regions.iterator(); @@ -249,9 +244,8 @@ public void testCreateTableWithRegions() throws IOException, InterruptedExceptio try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(table2)) { regions = l.getAllRegionLocations(); - assertEquals( - "Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(), - expectedRegions, regions.size()); + assertEquals(expectedRegions, regions.size(), + "Tried to create " + expectedRegions + " regions " + "but only found " + regions.size()); System.err.println("Found " + regions.size() + " regions"); hris = regions.iterator(); @@ -302,9 +296,8 @@ public void testCreateTableWithRegions() throws IOException, InterruptedExceptio try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(table3)) { regions = l.getAllRegionLocations(); - assertEquals( - "Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(), - expectedRegions, regions.size()); + assertEquals(expectedRegions, regions.size(), + "Tried to create " + expectedRegions + " regions " + "but only found " + regions.size()); System.err.println("Found " + regions.size() + " regions"); verifyRoundRobinDistribution(l, expectedRegions); @@ -318,8 +311,8 @@ public void testCreateTableWithRegions() throws IOException, InterruptedExceptio try { ADMIN.createTable(TableDescriptorBuilder.newBuilder(table4).setColumnFamily(cfd).build(), splitKeys); - assertTrue("Should not be able to create this table because of " + "duplicate split keys", - false); + assertTrue(false, + "Should not be able to create this table because of " + "duplicate split keys"); } catch (IllegalArgumentException iae) { // Expected } @@ -327,7 +320,7 @@ public void testCreateTableWithRegions() throws IOException, InterruptedExceptio @Test public void testCreateTableWithOnlyEmptyStartRow() throws IOException { - final byte[] tableName = Bytes.toBytes(name.getMethodName()); + final byte[] tableName = Bytes.toBytes(methodName); byte[][] splitKeys = new byte[1][]; splitKeys[0] = HConstants.EMPTY_BYTE_ARRAY; TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) @@ -341,7 +334,7 @@ public void testCreateTableWithOnlyEmptyStartRow() throws IOException { @Test public void testCreateTableWithEmptyRowInTheSplitKeys() throws IOException { - final byte[] tableName = Bytes.toBytes(name.getMethodName()); + final byte[] tableName = Bytes.toBytes(methodName); byte[][] splitKeys = new byte[3][]; splitKeys[0] = Bytes.toBytes("region1"); splitKeys[1] = HConstants.EMPTY_BYTE_ARRAY; @@ -374,21 +367,21 @@ private void verifyRoundRobinDistribution(RegionLocator regionLocator, int expec int min = (int) Math.floor(average); int max = (int) Math.ceil(average); for (List regionList : server2Regions.values()) { - assertTrue("numRS=" + numRS + ", min=" + min + ", max=" + max + ", size=" + regionList.size(), - regionList.size() == min || regionList.size() == max); + assertTrue(regionList.size() == min || regionList.size() == max, + "numRS=" + numRS + ", min=" + min + ", max=" + max + ", size=" + regionList.size()); } } @Test public void testCloneTableSchema() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new"); testCloneTableSchema(tableName, newTableName, false); } @Test public void testCloneTableSchemaPreservingSplits() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new"); testCloneTableSchema(tableName, newTableName, true); } @@ -415,8 +408,8 @@ private void testCloneTableSchema(final TableName tableName, final TableName new ADMIN.createTable(tableDesc, splitKeys); assertEquals(NUM_REGIONS, TEST_UTIL.getHBaseCluster().getRegions(tableName).size()); - assertTrue("Table should be created with splitKyes + 1 rows in META", - ADMIN.isTableAvailable(tableName)); + assertTrue(ADMIN.isTableAvailable(tableName), + "Table should be created with splitKyes + 1 rows in META"); // clone & Verify ADMIN.cloneTableSchema(tableName, newTableName, preserveSplits); @@ -433,8 +426,8 @@ private void testCloneTableSchema(final TableName tableName, final TableName new if (preserveSplits) { assertEquals(NUM_REGIONS, TEST_UTIL.getHBaseCluster().getRegions(newTableName).size()); - assertTrue("New table should be created with splitKyes + 1 rows in META", - ADMIN.isTableAvailable(newTableName)); + assertTrue(ADMIN.isTableAvailable(newTableName), + "New table should be created with splitKyes + 1 rows in META"); } else { assertEquals(1, TEST_UTIL.getHBaseCluster().getRegions(newTableName).size()); } @@ -442,7 +435,7 @@ private void testCloneTableSchema(final TableName tableName, final TableName new @Test public void testCloneTableSchemaWithNonExistentSourceTable() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new"); // test for non-existent source table try { @@ -455,7 +448,7 @@ public void testCloneTableSchemaWithNonExistentSourceTable() throws Exception { @Test public void testCloneTableSchemaWithExistentDestinationTable() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new"); byte[] FAMILY_0 = Bytes.toBytes("cf0"); TEST_UTIL.createTable(tableName, FAMILY_0); @@ -471,7 +464,7 @@ public void testCloneTableSchemaWithExistentDestinationTable() throws Exception @Test public void testModifyTableOnTableWithRegionReplicas() throws Exception { - TableName tableName = TableName.valueOf(name.getMethodName()); + TableName tableName = TableName.valueOf(methodName); TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf"))) .setRegionReplication(5).build(); @@ -492,7 +485,7 @@ public void testModifyTableOnTableWithRegionReplicas() throws Exception { */ @Test public void testOnlineChangeTableSchema() throws IOException, InterruptedException { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); List tables = ADMIN.listTableDescriptors(); int numTables = tables.size(); TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); @@ -558,7 +551,7 @@ public void testOnlineChangeTableSchema() throws IOException, InterruptedExcepti @Test public void testUnknownServers() throws Exception { - TableName table = TableName.valueOf(name.getMethodName()); + TableName table = TableName.valueOf(methodName); ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY); ADMIN.createTable(TableDescriptorBuilder.newBuilder(table).setColumnFamily(cfd).build()); final List regions = ADMIN.getRegions(table); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index 2de0c7d03c59..a37abae10d0d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -17,12 +17,12 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.ArrayList; @@ -30,7 +30,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MetaTableAccessor; @@ -51,9 +50,8 @@ import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,17 +62,15 @@ * Class to test HBaseAdmin. Spins up the minicluster once at test start and then takes it down * afterward. Add any testing of HBaseAdmin functionality here. */ -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestAdmin1 extends TestAdminBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAdmin1.class); - private static final Logger LOG = LoggerFactory.getLogger(TestAdmin1.class); @Test public void testSplitFlushCompactUnknownTable() throws InterruptedException { - final TableName unknowntable = TableName.valueOf(name.getMethodName()); + final TableName unknowntable = TableName.valueOf(methodName); Exception exception = null; try { ADMIN.compact(unknowntable); @@ -102,7 +98,7 @@ public void testSplitFlushCompactUnknownTable() throws InterruptedException { @Test public void testCompactATableWithSuperLongTableName() throws Exception { - TableName tableName = TableName.valueOf(name.getMethodName()); + TableName tableName = TableName.valueOf(methodName); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); try { @@ -120,7 +116,7 @@ public void testCompactATableWithSuperLongTableName() throws Exception { @Test public void testCompactionTimestamps() throws Exception { - TableName tableName = TableName.valueOf(name.getMethodName()); + TableName tableName = TableName.valueOf(methodName); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); ADMIN.createTable(htd); @@ -171,14 +167,15 @@ public void testCompactionTimestamps() throws Exception { table.close(); } - @Test(expected = IllegalArgumentException.class) + @Test public void testColumnValidName() { - ColumnFamilyDescriptorBuilder.of("\\test\\abc"); + assertThrows(IllegalArgumentException.class, + () -> ColumnFamilyDescriptorBuilder.of("\\test\\abc")); } @Test public void testTableExist() throws IOException { - final TableName table = TableName.valueOf(name.getMethodName()); + final TableName table = TableName.valueOf(methodName); boolean exist; exist = ADMIN.tableExists(table); assertEquals(false, exist); @@ -367,8 +364,8 @@ public void run() { for (int index = 0; index < familyNames.length; index++) { int delta = Math.abs(rowCounts[index] / 2 - splitKey); if (delta < deltaForLargestFamily) { - assertTrue("Delta " + delta + " for family " + index + " should be at least " - + "deltaForLargestFamily " + deltaForLargestFamily, false); + assertTrue(false, "Delta " + delta + " for family " + index + " should be at least " + + "deltaForLargestFamily " + deltaForLargestFamily); } } } @@ -383,7 +380,7 @@ public void testSplitAndMergeWithReplicaTable() throws Exception { // are not allowed. The test validates that. Then the test does a valid split/merge of allowed // regions. // Set up a table with 3 regions and replication set to 3 - TableName tableName = TableName.valueOf(name.getMethodName()); + TableName tableName = TableName.valueOf(methodName); byte[] cf = Bytes.toBytes("f"); TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(3) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf)).build(); @@ -477,9 +474,10 @@ public void testSplitAndMergeWithReplicaTable() throws Exception { assertTrue(gotException); } - @Test(expected = IllegalArgumentException.class) - public void testInvalidColumnDescriptor() throws IOException { - ColumnFamilyDescriptorBuilder.of("/cfamily/name"); + @Test + public void testInvalidColumnDescriptor() { + assertThrows(IllegalArgumentException.class, + () -> ColumnFamilyDescriptorBuilder.of("/cfamily/name")); } /** @@ -488,7 +486,7 @@ public void testInvalidColumnDescriptor() throws IOException { */ @Test public void testHFileReplication() throws Exception { - final TableName tableName = TableName.valueOf(this.name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); String fn1 = "rep1"; String fn = "defaultRep"; TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) @@ -516,16 +514,17 @@ public void testHFileReplication() throws Exception { HStore store = r.getStore(Bytes.toBytes(fn)); for (HStoreFile sf : store.getStorefiles()) { assertTrue(sf.toString().contains(fn)); - assertTrue("Column family " + fn + " should have 3 copies", + assertTrue( CommonFSUtils.getDefaultReplication(TEST_UTIL.getTestFileSystem(), sf.getPath()) - == (sf.getFileInfo().getFileStatus().getReplication())); + == (sf.getFileInfo().getFileStatus().getReplication()), + "Column family " + fn + " should have 3 copies"); } store = r.getStore(Bytes.toBytes(fn1)); for (HStoreFile sf : store.getStorefiles()) { assertTrue(sf.toString().contains(fn1)); - assertTrue("Column family " + fn1 + " should have only 1 copy", - 1 == sf.getFileInfo().getFileStatus().getReplication()); + assertTrue(1 == sf.getFileInfo().getFileStatus().getReplication(), + "Column family " + fn1 + " should have only 1 copy"); } } } finally { @@ -538,7 +537,7 @@ public void testHFileReplication() throws Exception { @Test public void testMergeRegions() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of("d")).build(); byte[][] splitRows = new byte[2][]; @@ -598,7 +597,7 @@ public void testMergeRegions() throws Exception { @Test public void testMergeRegionsInvalidRegionCount() throws IOException, InterruptedException, ExecutionException { - TableName tableName = TableName.valueOf(name.getMethodName()); + TableName tableName = TableName.valueOf(methodName); TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of("d")).build(); byte[][] splitRows = new byte[2][]; @@ -632,7 +631,7 @@ public void testMergeRegionsInvalidRegionCount() @Test public void testSplitShouldNotHappenIfSplitIsDisabledForTable() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) .setRegionSplitPolicyClassName(DisabledRegionSplitPolicy.class.getName()) .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java index 2cf088fa6a82..a8f6583492d3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java @@ -17,11 +17,12 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.ArrayList; @@ -35,7 +36,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import org.apache.hadoop.hbase.ClusterMetrics.Option; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; @@ -58,10 +58,8 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; -import org.junit.Assert; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -72,12 +70,10 @@ * Class to test HBaseAdmin. Spins up the minicluster once at test start and then takes it down * afterward. Add any testing of HBaseAdmin functionality here. */ -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestAdmin2 extends TestAdminBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAdmin2.class); - private static final Logger LOG = LoggerFactory.getLogger(TestAdmin2.class); @Test @@ -88,13 +84,14 @@ public void testCreateBadTables() throws IOException { } catch (TableExistsException e) { msg = e.toString(); } - assertTrue("Unexcepted exception message " + msg, + assertTrue( msg != null && msg.startsWith(TableExistsException.class.getName()) - && msg.contains(TableName.META_TABLE_NAME.getNameAsString())); + && msg.contains(TableName.META_TABLE_NAME.getNameAsString()), + "Unexcepted exception message " + msg); // Now try and do concurrent creation with a bunch of threads. TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + TableDescriptorBuilder.newBuilder(TableName.valueOf(methodName)) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); int count = 10; Thread[] threads = new Thread[count]; @@ -139,7 +136,7 @@ public void run() { */ @Test public void testTableNameClash() throws Exception { - final String name = this.name.getMethodName(); + final String name = methodName; TableDescriptor tableDescriptor1 = TableDescriptorBuilder.newBuilder(TableName.valueOf(name + "SOMEUPPERCASE")) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); @@ -158,7 +155,7 @@ public void testTableNameClash() throws Exception { */ @Test public void testCreateTableRPCTimeOut() throws Exception { - final String name = this.name.getMethodName(); + final String name = methodName; int oldTimeout = TEST_UTIL.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1500); @@ -181,7 +178,7 @@ public void testCreateTableRPCTimeOut() throws Exception { */ @Test public void testReadOnlyTable() throws Exception { - final TableName name = TableName.valueOf(this.name.getMethodName()); + final TableName name = TableName.valueOf(methodName); Table table = TEST_UTIL.createTable(name, HConstants.CATALOG_FAMILY); byte[] value = Bytes.toBytes("somedata"); // This used to use an empty row... That must have been a bug @@ -199,9 +196,8 @@ public void testReadOnlyTable() throws Exception { public void testTableNames() throws IOException { byte[][] illegalNames = new byte[][] { Bytes.toBytes("-bad"), Bytes.toBytes(".bad") }; for (byte[] illegalName : illegalNames) { - assertThrows( - "Did not detect '" + Bytes.toString(illegalName) + "' as an illegal user table name", - IllegalArgumentException.class, () -> TableName.valueOf(illegalName)); + assertThrows(IllegalArgumentException.class, () -> TableName.valueOf(illegalName), + "Did not detect '" + Bytes.toString(illegalName) + "' as an illegal user table name"); } byte[] legalName = Bytes.toBytes("g-oo.d"); try { @@ -215,49 +211,50 @@ public void testTableNames() throws IOException { /** * For HADOOP-2579 */ - @Test(expected = TableExistsException.class) + @Test public void testTableExistsExceptionWithATable() throws IOException { - final TableName name = TableName.valueOf(this.name.getMethodName()); + final TableName name = TableName.valueOf(methodName); TEST_UTIL.createTable(name, HConstants.CATALOG_FAMILY).close(); - TEST_UTIL.createTable(name, HConstants.CATALOG_FAMILY); + assertThrows(TableExistsException.class, + () -> TEST_UTIL.createTable(name, HConstants.CATALOG_FAMILY)); } /** * Can't disable a table if the table isn't in enabled state */ - @Test(expected = TableNotEnabledException.class) + @Test public void testTableNotEnabledExceptionWithATable() throws IOException { - final TableName name = TableName.valueOf(this.name.getMethodName()); + final TableName name = TableName.valueOf(methodName); TEST_UTIL.createTable(name, HConstants.CATALOG_FAMILY).close(); ADMIN.disableTable(name); - ADMIN.disableTable(name); + assertThrows(TableNotEnabledException.class, () -> ADMIN.disableTable(name)); } /** * Can't enable a table if the table isn't in disabled state */ - @Test(expected = TableNotDisabledException.class) + @Test public void testTableNotDisabledExceptionWithATable() throws IOException { - final TableName name = TableName.valueOf(this.name.getMethodName()); + final TableName name = TableName.valueOf(methodName); try (Table t = TEST_UTIL.createTable(name, HConstants.CATALOG_FAMILY)) { - ADMIN.enableTable(name); + assertThrows(TableNotDisabledException.class, () -> ADMIN.enableTable(name)); } } /** * For HADOOP-2579 */ - @Test(expected = TableNotFoundException.class) + @Test public void testTableNotFoundExceptionWithoutAnyTables() throws IOException { TableName tableName = TableName.valueOf("testTableNotFoundExceptionWithoutAnyTables"); try (Table ht = TEST_UTIL.getConnection().getTable(tableName)) { - ht.get(new Get(Bytes.toBytes("e"))); + assertThrows(TableNotFoundException.class, () -> ht.get(new Get(Bytes.toBytes("e")))); } } @Test public void testShouldUnassignTheRegion() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); createTableWithDefaultConf(tableName); RegionInfo info = null; @@ -276,12 +273,12 @@ public void testShouldUnassignTheRegion() throws Exception { isInList = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info); } - assertFalse("The region should not be present in online regions list.", isInList); + assertFalse(isInList, "The region should not be present in online regions list."); } @Test public void testCloseRegionIfInvalidRegionNameIsPassed() throws Exception { - final String name = this.name.getMethodName(); + final String name = methodName; byte[] tableName = Bytes.toBytes(name); createTableWithDefaultConf(tableName); @@ -298,13 +295,13 @@ public void testCloseRegionIfInvalidRegionNameIsPassed() throws Exception { } } onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()); - assertTrue("The region should be present in online regions list.", - onlineRegions.contains(info)); + assertTrue(onlineRegions.contains(info), + "The region should be present in online regions list."); } @Test public void testCloseRegionThatFetchesTheHRIFromMeta() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); createTableWithDefaultConf(tableName); RegionInfo info = null; @@ -326,7 +323,7 @@ public void testCloseRegionThatFetchesTheHRIFromMeta() throws Exception { isInList = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info); } - assertFalse("The region should not be present in online regions list.", isInList); + assertFalse(isInList, "The region should not be present in online regions list."); } private Admin createTable(TableName tableName) throws IOException { @@ -359,7 +356,7 @@ private void createTableWithDefaultConf(TableName TABLENAME) throws IOException */ @Test public void testGetTableRegions() throws IOException { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); int expectedRegions = 10; @@ -373,24 +370,23 @@ public void testGetTableRegions() throws IOException { List RegionInfos = ADMIN.getRegions(tableName); - assertEquals( - "Tried to create " + expectedRegions + " regions " + "but only found " + RegionInfos.size(), - expectedRegions, RegionInfos.size()); + assertEquals(expectedRegions, RegionInfos.size(), + "Tried to create " + expectedRegions + " regions " + "but only found " + RegionInfos.size()); } @Test public void testMoveToPreviouslyAssignedRS() throws IOException, InterruptedException { SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); HMaster master = cluster.getMaster(); - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); Admin localAdmin = createTable(tableName); List tableRegions = localAdmin.getRegions(tableName); RegionInfo hri = tableRegions.get(0); AssignmentManager am = master.getAssignmentManager(); ServerName server = am.getRegionStates().getRegionServerOfRegion(hri); localAdmin.move(hri.getEncodedNameAsBytes(), server); - assertEquals("Current region server and region server before move should be same.", server, - am.getRegionStates().getRegionServerOfRegion(hri)); + assertEquals(server, am.getRegionStates().getRegionServerOfRegion(hri), + "Current region server and region server before move should be same."); } @Test @@ -402,7 +398,7 @@ public void testWALRollWriting() throws Exception { v.append(className); } byte[] value = Bytes.toBytes(v.toString()); - HRegionServer regionServer = startAndWriteData(TableName.valueOf(name.getMethodName()), value); + HRegionServer regionServer = startAndWriteData(TableName.valueOf(methodName), value); LOG.info("after writing there are " + AbstractFSWALProvider.getNumRolledLogFiles(regionServer.getWAL(null)) + " log files"); @@ -493,7 +489,7 @@ public void testDisableCatalogTable() throws Exception { // Before the fix for HBASE-6146, the below table creation was failing as the hbase:meta table // actually getting disabled by the disableTable() call. TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(Bytes.toBytes(name.getMethodName()))) + TableDescriptorBuilder.newBuilder(TableName.valueOf(Bytes.toBytes(methodName))) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf1"))).build(); TEST_UTIL.getAdmin().createTable(tableDescriptor); } @@ -501,13 +497,13 @@ public void testDisableCatalogTable() throws Exception { @Test public void testIsEnabledOrDisabledOnUnknownTable() throws Exception { try { - ADMIN.isTableEnabled(TableName.valueOf(name.getMethodName())); + ADMIN.isTableEnabled(TableName.valueOf(methodName)); fail("Test should fail if isTableEnabled called on unknown table."); } catch (IOException e) { } try { - ADMIN.isTableDisabled(TableName.valueOf(name.getMethodName())); + ADMIN.isTableDisabled(TableName.valueOf(methodName)); fail("Test should fail if isTableDisabled called on unknown table."); } catch (IOException e) { } @@ -581,7 +577,7 @@ public void testDecommissionRegionServers() throws Exception { List decommissionedRegionServers = ADMIN.listDecommissionedRegionServers(); assertTrue(decommissionedRegionServers.isEmpty()); - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TEST_UTIL.createMultiRegionTable(tableName, Bytes.toBytes("f"), 6); ArrayList clusterRegionServers = new ArrayList<>( @@ -640,7 +636,7 @@ public void testDecommissionRegionServers() throws Exception { */ @Test public void testGetRegionInfo() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); Table table = TEST_UTIL.createTable(tableName, Bytes.toBytes("f")); for (int i = 0; i < 100; i++) { table.put(new Put(Bytes.toBytes(i)).addColumn(Bytes.toBytes("f"), Bytes.toBytes("q"), @@ -650,24 +646,24 @@ public void testGetRegionInfo() throws Exception { HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(table.getName()); List regions = rs.getRegions(tableName); - Assert.assertEquals(1, regions.size()); + assertEquals(1, regions.size()); HRegion region = regions.get(0); byte[] regionName = region.getRegionInfo().getRegionName(); HStore store = region.getStore(Bytes.toBytes("f")); long expectedStoreFilesSize = store.getStorefilesSize(); - Assert.assertNotNull(store); - Assert.assertEquals(expectedStoreFilesSize, store.getSize()); + assertNotNull(store); + assertEquals(expectedStoreFilesSize, store.getSize()); for (int i = 0; i < 10; i++) { RegionInfo ri = ProtobufUtil .toRegionInfo(TEST_UTIL.getAsyncConnection().getRegionServerAdmin(rs.getServerName()) .getRegionInfo(RequestConverter.buildGetRegionInfoRequest(regionName)).get() .getRegionInfo()); - Assert.assertEquals(region.getRegionInfo(), ri); + assertEquals(region.getRegionInfo(), ri); // Make sure that the store size is still the actual file system's store size. - Assert.assertEquals(expectedStoreFilesSize, store.getSize()); + assertEquals(expectedStoreFilesSize, store.getSize()); } // Test querying using the encoded name only. When encoded name passed, @@ -699,7 +695,7 @@ private void testGetWithRegionName(ServerName sn, RegionInfo inputRI, byte[] reg @Test public void testTableSplitFollowedByModify() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TEST_UTIL.createTable(tableName, Bytes.toBytes("f")); // get the original table region count @@ -727,7 +723,7 @@ public boolean evaluate() throws Exception { @SuppressWarnings("FutureReturnValueIgnored") @Test public void testTableMergeFollowedByModify() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TEST_UTIL.createTable(tableName, new byte[][] { Bytes.toBytes("f") }, new byte[][] { Bytes.toBytes(3) }); @@ -797,22 +793,22 @@ public void testSlowLogResponses() throws Exception { ++countFailedClearSlowResponse; } } - Assert.assertEquals(countFailedClearSlowResponse, 0); + assertEquals(countFailedClearSlowResponse, 0); List onlineLogRecords = ADMIN.getLogEntries(new HashSet<>(serverNames), "SLOW_LOG", ServerType.REGION_SERVER, 100, null); // after cleanup of slowlog responses, total count of slowlog payloads should be 0 - Assert.assertEquals(onlineLogRecords.size(), 0); + assertEquals(onlineLogRecords.size(), 0); List balancerDecisionRecords = ADMIN.getLogEntries(null, "BALANCER_DECISION", ServerType.MASTER, 100, null); - Assert.assertEquals(balancerDecisionRecords.size(), 0); + assertEquals(balancerDecisionRecords.size(), 0); } @Test public void testGetRegionServers() throws Exception { // get all live server names List serverNames = new ArrayList<>(ADMIN.getRegionServers(true)); - Assert.assertEquals(3, serverNames.size()); + assertEquals(3, serverNames.size()); List serversToDecom = new ArrayList<>(); ServerName serverToDecommission = serverNames.get(0); @@ -821,14 +817,14 @@ public void testGetRegionServers() throws Exception { ADMIN.decommissionRegionServers(serversToDecom, false); waitForServerCommissioned(serverToDecommission, true); - Assert.assertEquals(2, ADMIN.getRegionServers(true).size()); - Assert.assertEquals(3, ADMIN.getRegionServers(false).size()); + assertEquals(2, ADMIN.getRegionServers(true).size()); + assertEquals(3, ADMIN.getRegionServers(false).size()); ADMIN.recommissionRegionServer(serverToDecommission, Collections.emptyList()); waitForServerCommissioned(null, false); - Assert.assertEquals(3, ADMIN.getRegionServers(true).size()); - Assert.assertEquals(3, ADMIN.getRegionServers(false).size()); + assertEquals(3, ADMIN.getRegionServers(true).size()); + assertEquals(3, ADMIN.getRegionServers(false).size()); } private static void waitForServerCommissioned(ServerName excludeServer, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java index cc653e0b84c5..8ed755293102 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java @@ -18,19 +18,18 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.List; import java.util.regex.Pattern; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.InvalidFamilyOperationException; @@ -45,18 +44,15 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestAdmin3 extends TestAdminBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAdmin3.class); - private static final Logger LOG = LoggerFactory.getLogger(TestAdmin3.class); @Test @@ -64,7 +60,7 @@ public void testDisableAndEnableTable() throws IOException { final byte[] row = Bytes.toBytes("row"); final byte[] qualifier = Bytes.toBytes("qualifier"); final byte[] value = Bytes.toBytes("value"); - final TableName table = TableName.valueOf(name.getMethodName()); + final TableName table = TableName.valueOf(methodName); Table ht = TEST_UTIL.createTable(table, HConstants.CATALOG_FAMILY); Put put = new Put(row); put.addColumn(HConstants.CATALOG_FAMILY, qualifier, value); @@ -74,8 +70,8 @@ public void testDisableAndEnableTable() throws IOException { ht.get(get); ADMIN.disableTable(ht.getName()); - assertTrue("Table must be disabled.", TEST_UTIL.getHBaseCluster().getMaster() - .getTableStateManager().isTableState(ht.getName(), TableState.State.DISABLED)); + assertTrue(TEST_UTIL.getHBaseCluster().getMaster().getTableStateManager() + .isTableState(ht.getName(), TableState.State.DISABLED), "Table must be disabled."); assertEquals(TableState.State.DISABLED, getStateFromMeta(table)); // Test that table is disabled @@ -101,8 +97,8 @@ public void testDisableAndEnableTable() throws IOException { } assertTrue(ok); ADMIN.enableTable(table); - assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster().getMaster() - .getTableStateManager().isTableState(ht.getName(), TableState.State.ENABLED)); + assertTrue(TEST_UTIL.getHBaseCluster().getMaster().getTableStateManager() + .isTableState(ht.getName(), TableState.State.ENABLED), "Table must be enabled."); assertEquals(TableState.State.ENABLED, getStateFromMeta(table)); // Test that table is enabled @@ -120,8 +116,8 @@ public void testDisableAndEnableTables() throws IOException { final byte[] row = Bytes.toBytes("row"); final byte[] qualifier = Bytes.toBytes("qualifier"); final byte[] value = Bytes.toBytes("value"); - final TableName table1 = TableName.valueOf(name.getMethodName() + "1"); - final TableName table2 = TableName.valueOf(name.getMethodName() + "2"); + final TableName table1 = TableName.valueOf(methodName + "1"); + final TableName table2 = TableName.valueOf(methodName + "2"); Table ht1 = TEST_UTIL.createTable(table1, HConstants.CATALOG_FAMILY); Table ht2 = TEST_UTIL.createTable(table2, HConstants.CATALOG_FAMILY); Put put = new Put(row); @@ -182,7 +178,7 @@ public void testDisableAndEnableTables() throws IOException { */ @Test public void testEnableTableRetainAssignment() throws IOException { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); byte[][] splitKeys = { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, new byte[] { 3, 3, 3 }, new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, new byte[] { 6, 6, 6 }, new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 } }; @@ -194,9 +190,8 @@ public void testEnableTableRetainAssignment() throws IOException { try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) { List regions = l.getAllRegionLocations(); - assertEquals( - "Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(), - expectedRegions, regions.size()); + assertEquals(expectedRegions, regions.size(), + "Tried to create " + expectedRegions + " regions " + "but only found " + regions.size()); // Disable table. ADMIN.disableTable(tableName); // Enable table, use retain assignment to assign regions. @@ -211,9 +206,9 @@ public void testEnableTableRetainAssignment() throws IOException { @Test public void testEnableDisableAddColumnDeleteColumn() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); - while (!ADMIN.isTableEnabled(TableName.valueOf(name.getMethodName()))) { + while (!ADMIN.isTableEnabled(TableName.valueOf(methodName))) { Thread.sleep(10); } ADMIN.disableTable(tableName); @@ -236,7 +231,7 @@ public void testEnableDisableAddColumnDeleteColumn() throws Exception { @Test public void testGetTableDescriptor() throws IOException { - TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(methodName)) .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")) .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam2")) .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam3")).build(); @@ -256,7 +251,7 @@ public void testGetTableDescriptor() throws IOException { */ @Test public void testReadOnlyTableModify() throws IOException, InterruptedException { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); // Make table read only @@ -276,9 +271,9 @@ public void testReadOnlyTableModify() throws IOException, InterruptedException { @Test public void testDeleteLastColumnFamily() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); - while (!ADMIN.isTableEnabled(TableName.valueOf(name.getMethodName()))) { + while (!ADMIN.isTableEnabled(TableName.valueOf(methodName))) { Thread.sleep(10); } @@ -370,7 +365,7 @@ public void testDeleteEditUnknownColumnFamilyAndOrTable() throws IOException { // Now make it so at least the table exists and then do tests against a // nonexistent column family -- see if we get right exceptions. final TableName tableName = - TableName.valueOf(name.getMethodName() + EnvironmentEdgeManager.currentTime()); + TableName.valueOf(methodName + EnvironmentEdgeManager.currentTime()); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build(); ADMIN.createTable(htd); @@ -381,8 +376,8 @@ public void testDeleteEditUnknownColumnFamilyAndOrTable() throws IOException { } catch (IOException e) { exception = e; } - assertTrue("found=" + exception.getClass().getName(), - exception instanceof InvalidFamilyOperationException); + assertTrue(exception instanceof InvalidFamilyOperationException, + "found=" + exception.getClass().getName()); exception = null; try { @@ -390,8 +385,8 @@ public void testDeleteEditUnknownColumnFamilyAndOrTable() throws IOException { } catch (IOException e) { exception = e; } - assertTrue("found=" + exception.getClass().getName(), - exception instanceof InvalidFamilyOperationException); + assertTrue(exception instanceof InvalidFamilyOperationException, + "found=" + exception.getClass().getName()); } finally { ADMIN.disableTable(tableName); ADMIN.deleteTable(tableName); @@ -416,7 +411,7 @@ private void verifyModifyTableResult(TableName tableName, byte[] family, byte[] @Test public void testModifyTableStoreFileTracker() throws IOException { - TableName tableName = TableName.valueOf(name.getMethodName()); + TableName tableName = TableName.valueOf(methodName); byte[] family = Bytes.toBytes("info"); byte[] qual = Bytes.toBytes("q"); byte[] row = Bytes.toBytes(0); @@ -472,7 +467,7 @@ private void verifyModifyColumnFamilyResult(TableName tableName, byte[] family, @Test public void testModifyColumnFamilyStoreFileTracker() throws IOException { - TableName tableName = TableName.valueOf(name.getMethodName()); + TableName tableName = TableName.valueOf(methodName); byte[] family = Bytes.toBytes("info"); byte[] qual = Bytes.toBytes("q"); byte[] row = Bytes.toBytes(0); @@ -523,7 +518,7 @@ public void testModifyColumnFamilyStoreFileTracker() throws IOException { @Test public void testModifyStoreFileTrackerError() throws IOException { - TableName tableName = TableName.valueOf(name.getMethodName()); + TableName tableName = TableName.valueOf(methodName); byte[] family = Bytes.toBytes("info"); TEST_UTIL.createTable(tableName, family).close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin4.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin4.java index 61e028705f88..a30d4ce3b879 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin4.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin4.java @@ -19,16 +19,15 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsString; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -36,14 +35,12 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestAdmin4 extends TestAdminBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAdmin4.class); // For HBASE-24208 @Test @@ -63,8 +60,9 @@ public void testDecommissionAndStopRegionServers() throws Exception { // Stop decommissioned region server and verify it is removed from draining znode ServerName serverName = serversToDecommission.get(0); ADMIN.stopRegionServer(serverName.getHostname() + ":" + serverName.getPort()); - assertNotEquals("RS not removed from decommissioned list", -1, - TEST_UTIL.waitFor(10000, () -> ADMIN.listDecommissionedRegionServers().isEmpty())); + assertNotEquals(-1, + TEST_UTIL.waitFor(10000, () -> ADMIN.listDecommissionedRegionServers().isEmpty()), + "RS not removed from decommissioned list"); ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); assertEquals(-1, ZKUtil.checkExists(zkw, ZNodePaths.joinZNode(zkw.getZNodePaths().drainingZNode, serverName.getServerName()))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdminBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdminBase.java index 7b61d1ebfc32..87f1e3ae47a1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdminBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdminBase.java @@ -17,28 +17,32 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotNull; import java.io.IOException; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.TestInfo; public class TestAdminBase { protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); protected static Admin ADMIN; - @Rule - public TestName name = new TestName(); + protected String methodName; - @BeforeClass + @BeforeEach + public void setUpMethodName(TestInfo info) { + this.methodName = info.getTestMethod().get().getName(); + } + + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); @@ -51,12 +55,12 @@ public static void setUpBeforeClass() throws Exception { ADMIN = TEST_UTIL.getAdmin(); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @After + @AfterEach public void tearDown() throws Exception { for (TableDescriptor htd : ADMIN.listTableDescriptors()) { TEST_UTIL.deleteTable(htd.getTableName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAllowPartialScanResultCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAllowPartialScanResultCache.java index 21831a67b0ff..731d49aaf61d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAllowPartialScanResultCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAllowPartialScanResultCache.java @@ -18,39 +18,34 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.client.TestBatchScanResultCache.createCells; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertSame; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertSame; import java.io.IOException; import java.util.Arrays; import org.apache.hadoop.hbase.ExtendedCell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ SmallTests.class, ClientTests.class }) +@Tag(SmallTests.TAG) +@Tag(ClientTests.TAG) public class TestAllowPartialScanResultCache { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAllowPartialScanResultCache.class); - private static byte[] CF = Bytes.toBytes("cf"); private AllowPartialScanResultCache resultCache; - @Before + @BeforeEach public void setUp() { resultCache = new AllowPartialScanResultCache(); } - @After + @AfterEach public void tearDown() { resultCache.clear(); resultCache = null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAlwaysSetScannerId.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAlwaysSetScannerId.java index ac1bdb603892..66b1e2f03511 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAlwaysSetScannerId.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAlwaysSetScannerId.java @@ -17,11 +17,10 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; @@ -29,11 +28,10 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; @@ -46,13 +44,10 @@ /** * Testcase to make sure that we always set scanner id in ScanResponse. See HBASE-18000. */ -@Category({ RegionServerTests.class, MediumTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(MediumTests.TAG) public class TestAlwaysSetScannerId { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAlwaysSetScannerId.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static final TableName TABLE_NAME = TableName.valueOf("test"); @@ -69,7 +64,7 @@ public class TestAlwaysSetScannerId { private static ClientProtos.ClientService.Interface STUB; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { UTIL.startMiniCluster(1); try (Table table = UTIL.createTable(TABLE_NAME, CF)) { @@ -83,7 +78,7 @@ public static void setUp() throws Exception { STUB = CONN.getRegionServerStub(UTIL.getHBaseCluster().getRegionServer(0).getServerName()); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { Closeables.close(CONN, true); UTIL.shutdownMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAppendFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAppendFromClientSide.java index 6d8164dbde82..25e05ebb7956 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAppendFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAppendFromClientSide.java @@ -17,58 +17,49 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; import java.io.IOException; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; /** * Run Append tests that use the HBase clients; */ -@Category(MediumTests.class) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestAppendFromClientSide { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAppendFromClientSide.class); - protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static byte[] ROW = Bytes.toBytes("testRow"); private static byte[] FAMILY = Bytes.toBytes("testFamily"); private static byte[] QUALIFIER = Bytes.toBytes("testQualifier"); - @Rule - public TestName name = new TestName(); - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); TEST_UTIL.startMiniCluster(3); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @Test - public void testAppendWithCustomTimestamp() throws IOException { - TableName TABLENAME = TableName.valueOf(name.getMethodName()); + public void testAppendWithCustomTimestamp(TestInfo testInfo) throws IOException { + TableName TABLENAME = TableName.valueOf(testInfo.getTestMethod().get().getName()); Table table = TEST_UTIL.createTable(TABLENAME, FAMILY); long timestamp = 999; Append append = new Append(ROW); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java index 06d9c0314da9..fae2a5997999 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java @@ -19,17 +19,16 @@ import static org.apache.hadoop.hbase.NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR; import static org.apache.hadoop.hbase.client.AsyncConnectionConfiguration.START_LOG_ERRORS_AFTER_COUNT_KEY; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; -import java.util.Arrays; -import java.util.List; import java.util.Optional; import java.util.concurrent.ForkJoinPool; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import java.util.stream.Stream; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; @@ -40,34 +39,27 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Threads; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameter; -import org.junit.runners.Parameterized.Parameters; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.params.provider.Arguments; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -@RunWith(Parameterized.class) -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) +@HBaseParameterizedTestTemplate public class TestAsyncAdminBuilder { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncAdminBuilder.class); - - private static final Logger LOG = LoggerFactory.getLogger(TestAsyncAdminBuilder.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static AsyncConnection ASYNC_CONN; - @Parameter - public Supplier getAdminBuilder; + private final Supplier getAdminBuilder; + + public TestAsyncAdminBuilder(Supplier getAdminBuilder) { + this.getAdminBuilder = getAdminBuilder; + } private static AsyncAdminBuilder getRawAsyncAdminBuilder() { return ASYNC_CONN.getAdminBuilder(); @@ -77,17 +69,17 @@ private static AsyncAdminBuilder getAsyncAdminBuilder() { return ASYNC_CONN.getAdminBuilder(ForkJoinPool.commonPool()); } - @Parameters - public static List params() { - return Arrays.asList(new Supplier[] { TestAsyncAdminBuilder::getRawAsyncAdminBuilder }, - new Supplier[] { TestAsyncAdminBuilder::getAsyncAdminBuilder }); + public static Stream parameters() { + return Stream.of( + Arguments.of((Supplier) TestAsyncAdminBuilder::getRawAsyncAdminBuilder), + Arguments.of((Supplier) TestAsyncAdminBuilder::getAsyncAdminBuilder)); } private static final int DEFAULT_RPC_TIMEOUT = 10000; private static final int DEFAULT_OPERATION_TIMEOUT = 30000; private static final int DEFAULT_RETRIES_NUMBER = 2; - @Before + @BeforeEach public void setUp() throws Exception { TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, DEFAULT_RPC_TIMEOUT); TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, @@ -97,13 +89,13 @@ public void setUp() throws Exception { TEST_UTIL.getConfiguration().setInt(START_LOG_ERRORS_AFTER_COUNT_KEY, 0); } - @After + @AfterEach public void tearDown() throws Exception { Closeables.close(ASYNC_CONN, true); TEST_UTIL.shutdownMiniCluster(); } - @Test + @TestTemplate public void testRpcTimeout() throws Exception { TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, TestRpcTimeoutCoprocessor.class.getName()); @@ -126,7 +118,7 @@ public void testRpcTimeout() throws Exception { } } - @Test + @TestTemplate public void testOperationTimeout() throws Exception { // set retry number to 100 to make sure that this test only be affected by operation timeout TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 100); @@ -153,7 +145,7 @@ public void testOperationTimeout() throws Exception { } } - @Test + @TestTemplate public void testMaxRetries() throws Exception { // set operation timeout to 300s to make sure that this test only be affected by retry number TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 300000); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncBufferMutator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncBufferMutator.java index a6a6333cd08c..6030a4e31805 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncBufferMutator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncBufferMutator.java @@ -19,12 +19,12 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.ArrayList; @@ -35,28 +35,23 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.IntStream; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; import org.apache.hbase.thirdparty.io.netty.util.Timeout; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestAsyncBufferMutator { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncBufferMutator.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("async"); @@ -73,7 +68,7 @@ public class TestAsyncBufferMutator { private static AsyncConnection CONN; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(1); TEST_UTIL.createTable(TABLE_NAME, CF); @@ -82,7 +77,7 @@ public static void setUp() throws Exception { Bytes.random(VALUE); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { CONN.close(); TEST_UTIL.shutdownMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPauseForRpcThrottling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPauseForRpcThrottling.java index 3455b3975664..3fdf3353bce3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPauseForRpcThrottling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPauseForRpcThrottling.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -31,7 +31,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.RegionTooBusyException; @@ -42,11 +41,10 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; @@ -54,13 +52,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestAsyncClientPauseForRpcThrottling { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncClientPauseForRpcThrottling.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("RpcThrottling"); @@ -134,13 +129,12 @@ protected RSRpcServices createRpcServices() throws IOException { } } - @BeforeClass + @BeforeAll public static void setUp() throws Exception { - assertTrue( + assertTrue(MAX_MULTIPLIER_EXPECTATION < HConstants.RETRY_BACKOFF[RETRY_COUNT], "The MAX_MULTIPLIER_EXPECTATION must be less than HConstants.RETRY_BACKOFF[RETRY_COUNT] " + "in order for our tests to adequately verify that we aren't " - + "multiplying throttled pauses based on the retry count.", - MAX_MULTIPLIER_EXPECTATION < HConstants.RETRY_BACKOFF[RETRY_COUNT]); + + "multiplying throttled pauses based on the retry count."); UTIL.getConfiguration().setLong(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); UTIL.startMiniCluster(1); @@ -161,7 +155,7 @@ public static void setUp() throws Exception { CONN = ConnectionFactory.createAsyncConnection(conf).get(); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { UTIL.getAdmin().disableTable(TABLE_NAME); UTIL.getAdmin().deleteTable(TABLE_NAME); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPauseForServerOverloaded.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPauseForServerOverloaded.java index 799395c6733f..caba3627a785 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPauseForServerOverloaded.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPauseForServerOverloaded.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.client.AsyncConnectionConfiguration.HBASE_CLIENT_PAUSE_FOR_SERVER_OVERLOADED; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -33,7 +33,6 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -49,24 +48,20 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestAsyncClientPauseForServerOverloaded { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncClientPauseForServerOverloaded.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("ServerOverloaded"); @@ -149,7 +144,7 @@ public RpcScheduler create(Configuration conf, PriorityFunction priority, Aborta } - @BeforeClass + @BeforeAll public static void setUp() throws Exception { UTIL.getConfiguration().setLong(HConstants.HBASE_CLIENT_PAUSE, 10); UTIL.getConfiguration().set("hbase.ipc.server.callqueue.type", "pluggable"); @@ -164,13 +159,13 @@ public static void setUp() throws Exception { CONN = ConnectionFactory.createAsyncConnection(conf).get(); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { Closeables.close(CONN, true); UTIL.shutdownMiniCluster(); } - @Before + @BeforeEach public void setUpBeforeTest() throws IOException { try (Table table = UTIL.createTable(TABLE_NAME, FAMILY)) { for (int i = 0; i < 100; i++) { @@ -180,7 +175,7 @@ public void setUpBeforeTest() throws IOException { MODE = FailMode.CALL_QUEUE_TOO_BIG; } - @After + @AfterEach public void tearDownAfterTest() throws IOException { for (FailMode mode : FailMode.values()) { mode.invoked.clear(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPushback.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPushback.java index 88b9431338bf..72fd53da85b2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPushback.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPushback.java @@ -21,38 +21,33 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicLong; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FutureUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestAsyncClientPushback extends ClientPushbackTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncClientPushback.class); - private AsyncConnectionImpl conn; private AsyncBufferedMutator mutator; - @Before + @BeforeEach public void setUp() throws Exception { conn = (AsyncConnectionImpl) ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get(); mutator = conn.getBufferedMutator(tableName); } - @After + @AfterEach public void tearDown() throws IOException { Closeables.close(mutator, true); Closeables.close(conn, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java index 0bfbd18eb32f..a4ece476963f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java @@ -23,23 +23,23 @@ import static org.apache.hadoop.hbase.client.RegionReplicaTestHelper.testLocator; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertSame; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; import java.io.IOException; import java.util.Arrays; -import java.util.Collection; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.ThreadLocalRandom; import java.util.stream.IntStream; +import java.util.stream.Stream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CatalogReplicaMode; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MetaTableAccessor; @@ -56,28 +56,22 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameter; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.params.provider.Arguments; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -@Category({ MediumTests.class, ClientTests.class }) -@RunWith(Parameterized.class) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) +@HBaseParameterizedTestTemplate public class TestAsyncNonMetaRegionLocator { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncNonMetaRegionLocator.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final TableName TABLE_NAME = TableName.valueOf("async"); @@ -91,10 +85,13 @@ public class TestAsyncNonMetaRegionLocator { private AsyncConnectionImpl conn; private AsyncNonMetaRegionLocator locator; - @Parameter - public CatalogReplicaMode metaReplicaMode; + private final CatalogReplicaMode metaReplicaMode; + + public TestAsyncNonMetaRegionLocator(CatalogReplicaMode metaReplicaMode) { + this.metaReplicaMode = metaReplicaMode; + } - @BeforeClass + @BeforeAll public static void setUp() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); // Enable hbase:meta replication. @@ -117,12 +114,12 @@ public static void setUp() throws Exception { } } - @AfterClass + @AfterAll public static void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @Before + @BeforeEach public void setUpBeforeTest() throws InterruptedException, ExecutionException, IOException { Configuration c = new Configuration(TEST_UTIL.getConfiguration()); // Enable meta replica LoadBalance mode for this connection. @@ -134,7 +131,7 @@ public void setUpBeforeTest() throws InterruptedException, ExecutionException, I locator = new AsyncNonMetaRegionLocator(conn, AsyncConnectionImpl.RETRY_TIMER); } - @After + @AfterEach public void tearDownAfterTest() throws IOException { Admin admin = TEST_UTIL.getAdmin(); if (admin.tableExists(TABLE_NAME)) { @@ -146,10 +143,9 @@ public void tearDownAfterTest() throws IOException { Closeables.close(conn, true); } - @Parameterized.Parameters - public static Collection paramAbstractTestRegionLocatoreters() { - return Arrays - .asList(new Object[][] { { CatalogReplicaMode.NONE }, { CatalogReplicaMode.LOAD_BALANCE } }); + public static Stream parameters() { + return Stream.of(Arguments.of(CatalogReplicaMode.NONE), + Arguments.of(CatalogReplicaMode.LOAD_BALANCE)); } private void createSingleRegionTable() throws IOException, InterruptedException { @@ -164,7 +160,7 @@ private CompletableFuture getDefaultRegionLocation(TableName ta .thenApply(RegionLocations::getDefaultRegionLocation); } - @Test + @TestTemplate public void testNoTable() throws InterruptedException { for (RegionLocateType locateType : RegionLocateType.values()) { try { @@ -175,7 +171,7 @@ public void testNoTable() throws InterruptedException { } } - @Test + @TestTemplate public void testDisableTable() throws IOException, InterruptedException { createSingleRegionTable(); TEST_UTIL.getAdmin().disableTable(TABLE_NAME); @@ -197,7 +193,7 @@ private void assertLocEquals(byte[] startKey, byte[] endKey, ServerName serverNa assertEquals(serverName, loc.getServerName()); } - @Test + @TestTemplate public void testSingleRegionTable() throws IOException, InterruptedException, ExecutionException { createSingleRegionTable(); ServerName serverName = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME).getServerName(); @@ -243,7 +239,7 @@ private ServerName[] getLocations(byte[][] startKeys) { return serverNames; } - @Test + @TestTemplate public void testMultiRegionTable() throws IOException, InterruptedException { createMultiRegionTable(); byte[][] startKeys = getStartKeys(); @@ -283,7 +279,7 @@ public void testMultiRegionTable() throws IOException, InterruptedException { })); } - @Test + @TestTemplate public void testRegionMove() throws IOException, InterruptedException, ExecutionException { createSingleRegionTable(); ServerName serverName = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME).getServerName(); @@ -314,7 +310,7 @@ public void testRegionMove() throws IOException, InterruptedException, Execution // usually locate after will return the same result, so we add a test to make it return different // result. - @Test + @TestTemplate public void testLocateAfter() throws IOException, InterruptedException, ExecutionException { byte[] row = Bytes.toBytes("1"); byte[] splitKey = Arrays.copyOf(row, 2); @@ -339,7 +335,7 @@ public void testLocateAfter() throws IOException, InterruptedException, Executio } // For HBASE-17402 - @Test + @TestTemplate public void testConcurrentLocate() throws IOException, InterruptedException, ExecutionException { createMultiRegionTable(); byte[][] startKeys = getStartKeys(); @@ -358,7 +354,7 @@ public void testConcurrentLocate() throws IOException, InterruptedException, Exe } } - @Test + @TestTemplate public void testReload() throws Exception { createSingleRegionTable(); ServerName serverName = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME).getServerName(); @@ -416,7 +412,7 @@ public String explainFailure() throws Exception { } // Testcase for HBASE-20822 - @Test + @TestTemplate public void testLocateBeforeLastRegion() throws IOException, InterruptedException, ExecutionException { createMultiRegionTable(); @@ -427,7 +423,7 @@ public void testLocateBeforeLastRegion() assertArrayEquals(loc.getRegion().getEndKey(), EMPTY_END_ROW); } - @Test + @TestTemplate public void testRegionReplicas() throws Exception { TEST_UTIL.getAdmin().createTable(TableDescriptorBuilder.newBuilder(TABLE_NAME) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).setRegionReplication(3).build()); @@ -450,7 +446,7 @@ public RegionLocations getRegionLocations(TableName tableName, int replicaId, bo } // Testcase for HBASE-21961 - @Test + @TestTemplate public void testLocateBeforeInOnlyRegion() throws IOException, InterruptedException { createSingleRegionTable(); HRegionLocation loc = @@ -460,7 +456,7 @@ public void testLocateBeforeInOnlyRegion() throws IOException, InterruptedExcept assertArrayEquals(loc.getRegion().getEndKey(), EMPTY_END_ROW); } - @Test + @TestTemplate public void testConcurrentUpdateCachedLocationOnError() throws Exception { createSingleRegionTable(); HRegionLocation loc = @@ -469,7 +465,7 @@ public void testConcurrentUpdateCachedLocationOnError() throws Exception { .forEach(i -> locator.updateCachedLocationOnError(loc, new NotServingRegionException())); } - @Test + @TestTemplate public void testCacheLocationWhenGetAllLocations() throws Exception { createMultiRegionTable(); AsyncConnectionImpl conn = (AsyncConnectionImpl) ConnectionFactory @@ -481,7 +477,7 @@ public void testCacheLocationWhenGetAllLocations() throws Exception { } } - @Test + @TestTemplate public void testDoNotCacheLocationWithNullServerNameWhenGetAllLocations() throws Exception { createMultiRegionTable(); AsyncConnectionImpl conn = (AsyncConnectionImpl) ConnectionFactory diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java index 4529c07dfd13..a1412e00039a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java @@ -22,8 +22,8 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStartRow; import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow; import static org.apache.hadoop.hbase.coprocessor.CoprocessorHost.REGION_COPROCESSOR_CONF_KEY; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.List; @@ -33,7 +33,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.IntStream; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; @@ -48,21 +47,17 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestAsyncNonMetaRegionLocatorConcurrenyLimit { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncNonMetaRegionLocatorConcurrenyLimit.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("async"); @@ -117,7 +112,7 @@ public boolean postScannerNext(ObserverContext> getTable; + private final Supplier> getTable; + + public TestAsyncTable(Supplier> getTable) { + this.getTable = getTable; + } private static AsyncTable getRawTable() { return ASYNC_CONN.getTable(TABLE_NAME); @@ -112,13 +107,12 @@ private static AsyncTable getTable() { return ASYNC_CONN.getTable(TABLE_NAME, ForkJoinPool.commonPool()); } - @Parameters - public static List params() { - return Arrays.asList(new Supplier[] { TestAsyncTable::getRawTable }, - new Supplier[] { TestAsyncTable::getTable }); + public static Stream parameters() { + return Stream.of(Arguments.of((Supplier>) TestAsyncTable::getRawTable), + Arguments.of((Supplier>) TestAsyncTable::getTable)); } - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setInt(ConnectionConfiguration.MAX_KEYVALUE_SIZE_KEY, MAX_KEY_VALUE_SIZE); @@ -129,22 +123,22 @@ public static void setUpBeforeClass() throws Exception { assertFalse(ASYNC_CONN.isClosed()); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { Closeables.close(ASYNC_CONN, true); assertTrue(ASYNC_CONN.isClosed()); TEST_UTIL.shutdownMiniCluster(); } - @Before + @BeforeEach public void setUp() throws IOException, InterruptedException, ExecutionException { - row = Bytes.toBytes(testName.getMethodName().replaceAll("[^0-9A-Za-z]", "_")); + row = Bytes.toBytes("row" + ROW_COUNTER.getAndIncrement()); if (ASYNC_CONN.getAdmin().isTableDisabled(TABLE_NAME).get()) { ASYNC_CONN.getAdmin().enableTable(TABLE_NAME).get(); } } - @Test + @TestTemplate public void testSimple() throws Exception { AsyncTable table = getTable.get(); table.put(new Put(row).addColumn(FAMILY, QUALIFIER, VALUE)).get(); @@ -162,7 +156,7 @@ private byte[] concat(byte[] base, int index) { } @SuppressWarnings("FutureReturnValueIgnored") - @Test + @TestTemplate public void testSimpleMultiple() throws Exception { AsyncTable table = getTable.get(); int count = 100; @@ -207,7 +201,7 @@ public void testSimpleMultiple() throws Exception { } @SuppressWarnings("FutureReturnValueIgnored") - @Test + @TestTemplate public void testIncrement() throws InterruptedException, ExecutionException { AsyncTable table = getTable.get(); int count = 100; @@ -225,7 +219,7 @@ public void testIncrement() throws InterruptedException, ExecutionException { } @SuppressWarnings("FutureReturnValueIgnored") - @Test + @TestTemplate public void testAppend() throws InterruptedException, ExecutionException { AsyncTable table = getTable.get(); int count = 10; @@ -249,7 +243,7 @@ public void testAppend() throws InterruptedException, ExecutionException { assertArrayEquals(IntStream.range(0, count).toArray(), actual); } - @Test + @TestTemplate public void testMutateRow() throws InterruptedException, ExecutionException, IOException { AsyncTable table = getTable.get(); RowMutations mutation = new RowMutations(row); @@ -281,7 +275,7 @@ public void testMutateRow() throws InterruptedException, ExecutionException, IOE // Tests for old checkAndMutate API @SuppressWarnings("FutureReturnValueIgnored") - @Test + @TestTemplate @Deprecated public void testCheckAndPutForOldApi() throws InterruptedException, ExecutionException { AsyncTable table = getTable.get(); @@ -305,7 +299,7 @@ public void testCheckAndPutForOldApi() throws InterruptedException, ExecutionExc } @SuppressWarnings("FutureReturnValueIgnored") - @Test + @TestTemplate @Deprecated public void testCheckAndDeleteForOldApi() throws InterruptedException, ExecutionException { AsyncTable table = getTable.get(); @@ -344,7 +338,7 @@ public void testCheckAndDeleteForOldApi() throws InterruptedException, Execution } @SuppressWarnings("FutureReturnValueIgnored") - @Test + @TestTemplate @Deprecated public void testCheckAndMutateForOldApi() throws InterruptedException, ExecutionException { AsyncTable table = getTable.get(); @@ -389,7 +383,7 @@ public void testCheckAndMutateForOldApi() throws InterruptedException, Execution }); } - @Test + @TestTemplate @Deprecated public void testCheckAndMutateWithTimeRangeForOldApi() throws Exception { AsyncTable table = getTable.get(); @@ -430,7 +424,7 @@ public void testCheckAndMutateWithTimeRangeForOldApi() throws Exception { assertTrue(ok); } - @Test + @TestTemplate @Deprecated public void testCheckAndMutateWithSingleFilterForOldApi() throws Throwable { AsyncTable table = getTable.get(); @@ -490,7 +484,7 @@ public void testCheckAndMutateWithSingleFilterForOldApi() throws Throwable { assertFalse(table.exists(new Get(row).addColumn(FAMILY, Bytes.toBytes("A"))).get()); } - @Test + @TestTemplate @Deprecated public void testCheckAndMutateWithMultipleFiltersForOldApi() throws Throwable { AsyncTable table = getTable.get(); @@ -562,7 +556,7 @@ public void testCheckAndMutateWithMultipleFiltersForOldApi() throws Throwable { assertFalse(table.exists(new Get(row).addColumn(FAMILY, Bytes.toBytes("A"))).get()); } - @Test + @TestTemplate @Deprecated public void testCheckAndMutateWithTimestampFilterForOldApi() throws Throwable { AsyncTable table = getTable.get(); @@ -594,7 +588,7 @@ public void testCheckAndMutateWithTimestampFilterForOldApi() throws Throwable { assertFalse(table.exists(new Get(row).addColumn(FAMILY, Bytes.toBytes("C"))).get()); } - @Test + @TestTemplate @Deprecated public void testCheckAndMutateWithFilterAndTimeRangeForOldApi() throws Throwable { AsyncTable table = getTable.get(); @@ -626,17 +620,19 @@ public void testCheckAndMutateWithFilterAndTimeRangeForOldApi() throws Throwable assertFalse(table.exists(new Get(row).addColumn(FAMILY, Bytes.toBytes("C"))).get()); } - @Test(expected = NullPointerException.class) + @TestTemplate @Deprecated public void testCheckAndMutateWithoutConditionForOldApi() { - getTable.get().checkAndMutate(row, FAMILY) - .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); + assertThrows(NullPointerException.class, () -> { + getTable.get().checkAndMutate(row, FAMILY) + .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); + }); } // Tests for new CheckAndMutate API @SuppressWarnings("FutureReturnValueIgnored") - @Test + @TestTemplate public void testCheckAndPut() throws InterruptedException, ExecutionException { AsyncTable table = getTable.get(); AtomicInteger successCount = new AtomicInteger(0); @@ -661,7 +657,7 @@ public void testCheckAndPut() throws InterruptedException, ExecutionException { } @SuppressWarnings("FutureReturnValueIgnored") - @Test + @TestTemplate public void testCheckAndDelete() throws InterruptedException, ExecutionException { AsyncTable table = getTable.get(); int count = 10; @@ -701,7 +697,7 @@ public void testCheckAndDelete() throws InterruptedException, ExecutionException } @SuppressWarnings("FutureReturnValueIgnored") - @Test + @TestTemplate public void testCheckAndMutate() throws InterruptedException, ExecutionException { AsyncTable table = getTable.get(); int count = 10; @@ -749,7 +745,7 @@ public void testCheckAndMutate() throws InterruptedException, ExecutionException }); } - @Test + @TestTemplate public void testCheckAndMutateWithTimeRange() throws Exception { AsyncTable table = getTable.get(); final long ts = EnvironmentEdgeManager.currentTime() / 2; @@ -797,7 +793,7 @@ public void testCheckAndMutateWithTimeRange() throws Exception { assertNull(result.getResult()); } - @Test + @TestTemplate public void testCheckAndMutateWithSingleFilter() throws Throwable { AsyncTable table = getTable.get(); @@ -859,7 +855,7 @@ public void testCheckAndMutateWithSingleFilter() throws Throwable { assertFalse(table.exists(new Get(row).addColumn(FAMILY, Bytes.toBytes("A"))).get()); } - @Test + @TestTemplate public void testCheckAndMutateWithMultipleFilters() throws Throwable { AsyncTable table = getTable.get(); @@ -932,7 +928,7 @@ public void testCheckAndMutateWithMultipleFilters() throws Throwable { assertFalse(table.exists(new Get(row).addColumn(FAMILY, Bytes.toBytes("A"))).get()); } - @Test + @TestTemplate public void testCheckAndMutateWithTimestampFilter() throws Throwable { AsyncTable table = getTable.get(); @@ -965,7 +961,7 @@ public void testCheckAndMutateWithTimestampFilter() throws Throwable { assertFalse(table.exists(new Get(row).addColumn(FAMILY, Bytes.toBytes("C"))).get()); } - @Test + @TestTemplate public void testCheckAndMutateWithFilterAndTimeRange() throws Throwable { AsyncTable table = getTable.get(); @@ -996,7 +992,7 @@ public void testCheckAndMutateWithFilterAndTimeRange() throws Throwable { assertFalse(table.exists(new Get(row).addColumn(FAMILY, Bytes.toBytes("C"))).get()); } - @Test + @TestTemplate public void testCheckAndIncrement() throws Throwable { AsyncTable table = getTable.get(); @@ -1055,7 +1051,7 @@ public void testCheckAndIncrement() throws Throwable { assertEquals(3, Bytes.toLong(result.getValue(FAMILY, Bytes.toBytes("B")))); } - @Test + @TestTemplate public void testCheckAndAppend() throws Throwable { AsyncTable table = getTable.get(); @@ -1118,7 +1114,7 @@ public void testCheckAndAppend() throws Throwable { assertEquals("bbb", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B")))); } - @Test + @TestTemplate public void testCheckAndRowMutations() throws Throwable { final byte[] q1 = Bytes.toBytes("q1"); final byte[] q2 = Bytes.toBytes("q2"); @@ -1172,7 +1168,7 @@ public void testCheckAndRowMutations() throws Throwable { // Tests for batch version of checkAndMutate - @Test + @TestTemplate public void testCheckAndMutateBatch() throws Throwable { AsyncTable table = getTable.get(); byte[] row2 = Bytes.toBytes(Bytes.toString(row) + "2"); @@ -1257,7 +1253,7 @@ public void testCheckAndMutateBatch() throws Throwable { assertEquals("d", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("D")))); } - @Test + @TestTemplate public void testCheckAndMutateBatch2() throws Throwable { AsyncTable table = getTable.get(); byte[] row2 = Bytes.toBytes(Bytes.toString(row) + "2"); @@ -1339,7 +1335,7 @@ public void testCheckAndMutateBatch2() throws Throwable { assertEquals("d", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("D")))); } - @Test + @TestTemplate public void testCheckAndMutateBatchWithFilter() throws Throwable { AsyncTable table = getTable.get(); byte[] row2 = Bytes.toBytes(Bytes.toString(row) + "2"); @@ -1450,7 +1446,7 @@ public void testCheckAndMutateBatchWithFilter() throws Throwable { assertEquals("f", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("F")))); } - @Test + @TestTemplate public void testCheckAndMutateBatchWithFilterAndTimeRange() throws Throwable { AsyncTable table = getTable.get(); byte[] row2 = Bytes.toBytes(Bytes.toString(row) + "2"); @@ -1497,7 +1493,7 @@ public void testCheckAndMutateBatchWithFilterAndTimeRange() throws Throwable { assertEquals("f", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("F")))); } - @Test + @TestTemplate public void testCheckAndIncrementBatch() throws Throwable { AsyncTable table = getTable.get(); byte[] row2 = Bytes.toBytes(Bytes.toString(row) + "2"); @@ -1534,7 +1530,7 @@ public void testCheckAndIncrementBatch() throws Throwable { assertEquals(0, Bytes.toLong(result.getValue(FAMILY, Bytes.toBytes("D")))); } - @Test + @TestTemplate public void testCheckAndAppendBatch() throws Throwable { AsyncTable table = getTable.get(); byte[] row2 = Bytes.toBytes(Bytes.toString(row) + "2"); @@ -1572,7 +1568,7 @@ public void testCheckAndAppendBatch() throws Throwable { assertEquals("d", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("D")))); } - @Test + @TestTemplate public void testCheckAndRowMutationsBatch() throws Throwable { AsyncTable table = getTable.get(); byte[] row2 = Bytes.toBytes(Bytes.toString(row) + "2"); @@ -1628,7 +1624,7 @@ public void testCheckAndRowMutationsBatch() throws Throwable { assertEquals("h", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("H")))); } - @Test + @TestTemplate public void testDisabled() throws InterruptedException, ExecutionException { ASYNC_CONN.getAdmin().disableTable(TABLE_NAME).get(); try { @@ -1641,7 +1637,7 @@ public void testDisabled() throws InterruptedException, ExecutionException { } } - @Test + @TestTemplate public void testInvalidMutation() throws Exception { Consumer executeMutation = mutation -> { if (mutation instanceof Put) { @@ -1683,7 +1679,7 @@ public void testInvalidMutation() throws Exception { } } - @Test + @TestTemplate public void testInvalidMutationInRowMutations() throws IOException { final byte[] row = Bytes.toBytes(0); @@ -1716,7 +1712,7 @@ public void testInvalidMutationInRowMutations() throws IOException { } } - @Test + @TestTemplate public void testInvalidMutationInRowMutationsInCheckAndMutate() throws IOException { final byte[] row = Bytes.toBytes(0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java index 86c39153f60b..075d664d251c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java @@ -20,12 +20,12 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.io.UncheckedIOException; @@ -42,8 +42,9 @@ import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; +import java.util.stream.Stream; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -54,26 +55,19 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameter; -import org.junit.runners.Parameterized.Parameters; - -@RunWith(Parameterized.class) -@Category({ LargeTests.class, ClientTests.class }) +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.params.provider.Arguments; + +@HBaseParameterizedTestTemplate(name = "{index}: type={0}") +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestAsyncTableBatch { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableBatch.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("async"); @@ -91,11 +85,11 @@ public class TestAsyncTableBatch { private static int MAX_KEY_VALUE_SIZE = 64 * 1024; - @Parameter(0) - public String tableType; + private final Function> tableGetter; - @Parameter(1) - public Function> tableGetter; + public TestAsyncTableBatch(String tableType, Function> tableGetter) { + this.tableGetter = tableGetter; + } private static AsyncTable getRawTable(TableName tableName) { return CONN.getTable(tableName); @@ -105,15 +99,13 @@ private static AsyncTable getTable(TableName tableName) { return CONN.getTable(tableName, ForkJoinPool.commonPool()); } - @Parameters(name = "{index}: type={0}") - public static List params() { + public static Stream parameters() { Function> rawTableGetter = TestAsyncTableBatch::getRawTable; Function> tableGetter = TestAsyncTableBatch::getTable; - return Arrays.asList(new Object[] { "raw", rawTableGetter }, - new Object[] { "normal", tableGetter }); + return Stream.of(Arguments.of("raw", rawTableGetter), Arguments.of("normal", tableGetter)); } - @BeforeClass + @BeforeAll public static void setUp() throws Exception { TEST_UTIL.getConfiguration().setInt(ConnectionConfiguration.MAX_KEYVALUE_SIZE_KEY, MAX_KEY_VALUE_SIZE); @@ -125,19 +117,19 @@ public static void setUp() throws Exception { CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get(); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { CONN.close(); TEST_UTIL.shutdownMiniCluster(); } - @Before + @BeforeEach public void setUpBeforeTest() throws IOException, InterruptedException { TEST_UTIL.createTable(TABLE_NAME, FAMILY, SPLIT_KEYS); TEST_UTIL.waitTableAvailable(TABLE_NAME); } - @After + @AfterEach public void tearDownAfterTest() throws IOException { Admin admin = TEST_UTIL.getAdmin(); if (admin.isTableEnabled(TABLE_NAME)) { @@ -150,7 +142,7 @@ private byte[] getRow(int i) { return Bytes.toBytes(String.format("%03d", i)); } - @Test + @TestTemplate public void test() throws InterruptedException, ExecutionException, IOException, TimeoutException { AsyncTable table = tableGetter.apply(TABLE_NAME); @@ -193,7 +185,7 @@ public void test() results.forEach(r -> assertTrue(r.isEmpty())); } - @Test + @TestTemplate public void testWithRegionServerFailover() throws Exception { AsyncTable table = tableGetter.apply(TABLE_NAME); table.putAll(IntStream.range(0, COUNT) @@ -218,7 +210,7 @@ public void testWithRegionServerFailover() throws Exception { results.forEach(r -> assertTrue(r.isEmpty())); } - @Test + @TestTemplate public void testMixed() throws InterruptedException, ExecutionException, IOException { AsyncTable table = tableGetter.apply(TABLE_NAME); table.putAll(IntStream.range(0, 7) @@ -273,7 +265,7 @@ public void preGetOp(ObserverContext e, } } - @Test + @TestTemplate public void testPartialSuccess() throws IOException, InterruptedException, ExecutionException { Admin admin = TEST_UTIL.getAdmin(); TableDescriptor htd = TableDescriptorBuilder.newBuilder(admin.getDescriptor(TABLE_NAME)) @@ -295,7 +287,7 @@ public void testPartialSuccess() throws IOException, InterruptedException, Execu } } - @Test + @TestTemplate public void testPartialSuccessOnSameRegion() throws InterruptedException, ExecutionException { AsyncTable table = tableGetter.apply(TABLE_NAME); List> futures = table.batch(Arrays.asList( @@ -316,7 +308,7 @@ public void testPartialSuccessOnSameRegion() throws InterruptedException, Execut Bytes.toString(table.get(new Get(Bytes.toBytes("put"))).get().getValue(FAMILY, CQ))); } - @Test + @TestTemplate public void testInvalidMutation() { AsyncTable table = tableGetter.apply(TABLE_NAME); @@ -350,7 +342,7 @@ public void testInvalidMutation() { } } - @Test + @TestTemplate public void testInvalidMutationInRowMutations() throws IOException { final byte[] row = Bytes.toBytes(0); AsyncTable table = tableGetter.apply(TABLE_NAME); @@ -385,7 +377,7 @@ public void testInvalidMutationInRowMutations() throws IOException { } } - @Test + @TestTemplate public void testInvalidMutationInRowMutationsInCheckAndMutate() throws IOException { final byte[] row = Bytes.toBytes(0); AsyncTable table = tableGetter.apply(TABLE_NAME); @@ -421,7 +413,7 @@ public void testInvalidMutationInRowMutationsInCheckAndMutate() throws IOExcepti } } - @Test + @TestTemplate public void testWithCheckAndMutate() throws Exception { AsyncTable table = tableGetter.apply(TABLE_NAME); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatchRetryImmediately.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatchRetryImmediately.java index 05ed7bf1badf..bf1e9d1c4800 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatchRetryImmediately.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatchRetryImmediately.java @@ -17,14 +17,13 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import java.util.stream.IntStream; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -32,19 +31,15 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestAsyncTableBatchRetryImmediately { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableBatchRetryImmediately.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("async"); @@ -61,7 +56,7 @@ public class TestAsyncTableBatchRetryImmediately { private static String LOG_LEVEL; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { // disable the debug log to avoid flooding the output LOG_LEVEL = Log4jUtils.getEffectiveLevel(AsyncRegionLocatorHelper.class.getName()); @@ -78,7 +73,7 @@ public static void setUp() throws Exception { CONN = ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get(); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { if (LOG_LEVEL != null) { Log4jUtils.setLogLevel(AsyncRegionLocatorHelper.class.getName(), LOG_LEVEL); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java index ff250fd8a352..25802267ddff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT; import static org.apache.hadoop.hbase.io.ByteBuffAllocator.MAX_BUFFER_COUNT_KEY; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.util.ArrayList; @@ -35,7 +35,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; import java.util.stream.IntStream; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.MemoryCompactionPolicy; import org.apache.hadoop.hbase.ServerName; @@ -48,12 +47,10 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.RetryCounter; import org.apache.hadoop.hbase.util.Threads; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -63,13 +60,10 @@ /** * Will split the table, and move region randomly when testing. */ -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestAsyncTableGetMultiThreaded { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableGetMultiThreaded.class); - private static final Logger LOG = LoggerFactory.getLogger(TestAsyncTableGetMultiThreaded.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -85,7 +79,7 @@ public class TestAsyncTableGetMultiThreaded { private static byte[][] SPLIT_KEYS; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { setUp(MemoryCompactionPolicy.NONE); } @@ -113,7 +107,7 @@ protected static void setUp(MemoryCompactionPolicy memoryCompaction) throws Exce .get(); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { Closeables.close(CONN, true); TEST_UTIL.shutdownMiniCluster(); @@ -212,7 +206,7 @@ public String explainFailure() throws Exception { } List balancerDecisionRecords = admin.getLogEntries(null, "BALANCER_DECISION", ServerType.MASTER, 2, null); - Assert.assertEquals(balancerDecisionRecords.size(), 2); + assertEquals(balancerDecisionRecords.size(), 2); LOG.info("====== Read test finished, shutdown thread pool ======"); stop.set(true); executor.shutdown(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java index 29bb4c76af73..dfc1bf2c387e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java @@ -17,25 +17,20 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.MemoryCompactionPolicy; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Ignore; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; -@Ignore // Can't move hbase:meta off master server in AMv2. TODO. -@Category({ LargeTests.class, ClientTests.class }) +@Disabled // Can't move hbase:meta off master server in AMv2. TODO. +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestAsyncTableGetMultiThreadedWithBasicCompaction extends TestAsyncTableGetMultiThreaded { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableGetMultiThreadedWithBasicCompaction.class); - - @BeforeClass + @BeforeAll public static void setUp() throws Exception { setUp(MemoryCompactionPolicy.BASIC); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java index da70b199111d..67b0d6963536 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java @@ -17,25 +17,20 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.MemoryCompactionPolicy; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Ignore; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; -@Ignore // Can't move hbase:meta off master server in AMv2. TODO. -@Category({ LargeTests.class, ClientTests.class }) +@Disabled // Can't move hbase:meta off master server in AMv2. TODO. +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestAsyncTableGetMultiThreadedWithEagerCompaction extends TestAsyncTableGetMultiThreaded { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableGetMultiThreadedWithEagerCompaction.class); - - @BeforeClass + @BeforeAll public static void setUp() throws Exception { setUp(MemoryCompactionPolicy.EAGER); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocatePrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocatePrefetch.java index a9f9a2762947..ae1815ba3446 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocatePrefetch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocatePrefetch.java @@ -17,30 +17,25 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotNull; import java.util.concurrent.ExecutionException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestAsyncTableLocatePrefetch { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableLocatePrefetch.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("async"); @@ -51,7 +46,7 @@ public class TestAsyncTableLocatePrefetch { private static AsyncNonMetaRegionLocator LOCATOR; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { TEST_UTIL.getConfiguration().setInt(AsyncNonMetaRegionLocator.LOCATE_PREFETCH_LIMIT, 100); TEST_UTIL.startMiniCluster(3); @@ -62,7 +57,7 @@ public static void setUp() throws Exception { new AsyncNonMetaRegionLocator((AsyncConnectionImpl) CONN, AsyncConnectionImpl.RETRY_TIMER); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { Closeables.close(CONN, true); TEST_UTIL.shutdownMiniCluster(); @@ -77,8 +72,8 @@ public void test() throws InterruptedException, ExecutionException { // confirm that the locations of all the regions have been cached. assertNotNull(LOCATOR.getRegionLocationInCache(TABLE_NAME, Bytes.toBytes("aaa"))); for (byte[] row : HBaseTestingUtil.KEYS_FOR_HBA_CREATE_TABLE) { - assertNotNull("Expected location to not be null for " + Bytes.toStringBinary(row), - LOCATOR.getRegionLocationInCache(TABLE_NAME, row)); + assertNotNull(LOCATOR.getRegionLocationInCache(TABLE_NAME, row), + "Expected location to not be null for " + Bytes.toStringBinary(row)); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocateRegionForDeletedTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocateRegionForDeletedTable.java index c43498682acf..29e8517357f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocateRegionForDeletedTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocateRegionForDeletedTable.java @@ -17,21 +17,19 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @@ -39,13 +37,10 @@ * Fix an infinite loop in {@link AsyncNonMetaRegionLocator}, see the comments on HBASE-21943 for * more details. */ -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestAsyncTableLocateRegionForDeletedTable { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableLocateRegionForDeletedTable.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("async"); @@ -58,7 +53,7 @@ public class TestAsyncTableLocateRegionForDeletedTable { private static AsyncConnection ASYNC_CONN; - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(3); TEST_UTIL.createTable(TABLE_NAME, FAMILY); @@ -68,7 +63,7 @@ public static void setUpBeforeClass() throws Exception { assertFalse(ASYNC_CONN.isClosed()); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { Closeables.close(ASYNC_CONN, true); assertTrue(ASYNC_CONN.isClosed()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java index b3889880c5ed..1e8410f402ec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.Arrays; @@ -28,7 +28,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -40,24 +39,19 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestAsyncTableNoncedRetry { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableNoncedRetry.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final TableName TABLE_NAME = TableName.valueOf("async"); @@ -74,9 +68,6 @@ public class TestAsyncTableNoncedRetry { private static AsyncConnection ASYNC_CONN; - @Rule - public TestName testName = new TestName(); - private byte[] row; private static final AtomicInteger CALLED = new AtomicInteger(); @@ -105,7 +96,7 @@ public void postBatchMutate(ObserverContext future : futures) { result = future.join(); - Assert.assertNotNull(result.getMetrics()); + assertNotNull(result.getMetrics()); bbs += result.getMetrics().getBlockBytesScanned(); } - Assert.assertEquals(getClusterBlockBytesScanned(), bbs); + assertEquals(getClusterBlockBytesScanned(), bbs); } @Test @@ -118,13 +116,13 @@ public void itTestsDefaultGetNoMetrics() throws Exception { Get g1 = new Get(ROW_1); Result result = CONN.getTable(TABLE_NAME).get(g1).get(); - Assert.assertNull(result.getMetrics()); + assertNull(result.getMetrics()); // Test multigets Get g2 = new Get(ROW_2); Get g3 = new Get(ROW_3); List> futures = CONN.getTable(TABLE_NAME).get(List.of(g1, g2, g3)); - futures.forEach(f -> Assert.assertNull(f.join().getMetrics())); + futures.forEach(f -> assertNull(f.join().getMetrics())); } @@ -136,9 +134,9 @@ public void itTestsScans() { long bbs = getClusterBlockBytesScanned(); try (ResultScanner scanner = CONN.getTable(TABLE_NAME).getScanner(scan)) { for (Result result : scanner) { - Assert.assertNotNull(result.getMetrics()); + assertNotNull(result.getMetrics()); bbs += result.getMetrics().getBlockBytesScanned(); - Assert.assertEquals(getClusterBlockBytesScanned(), bbs); + assertEquals(getClusterBlockBytesScanned(), bbs); } } } @@ -149,7 +147,7 @@ public void itTestsDefaultScanNoMetrics() { try (ResultScanner scanner = CONN.getTable(TABLE_NAME).getScanner(scan)) { for (Result result : scanner) { - Assert.assertNull(result.getMetrics()); + assertNull(result.getMetrics()); } } } @@ -163,8 +161,8 @@ public void itTestsAtomicOperations() { CheckAndMutateResult result = CONN.getTable(TABLE_NAME).checkAndMutate(cam).join(); QueryMetrics metrics = result.getMetrics(); - Assert.assertNotNull(metrics); - Assert.assertEquals(getClusterBlockBytesScanned(), bbs + metrics.getBlockBytesScanned()); + assertNotNull(metrics); + assertEquals(getClusterBlockBytesScanned(), bbs + metrics.getBlockBytesScanned()); bbs = getClusterBlockBytesScanned(); List batch = new ArrayList<>(); @@ -177,7 +175,7 @@ public void itTestsAtomicOperations() { List res = CONN.getTable(TABLE_NAME).batchAll(batch).join(); long totalBbs = res.stream() .mapToLong(r -> ((CheckAndMutateResult) r).getMetrics().getBlockBytesScanned()).sum(); - Assert.assertEquals(getClusterBlockBytesScanned(), bbs + totalBbs); + assertEquals(getClusterBlockBytesScanned(), bbs + totalBbs); bbs = getClusterBlockBytesScanned(); @@ -187,7 +185,7 @@ public void itTestsAtomicOperations() { totalBbs = futures.stream().map(CompletableFuture::join) .mapToLong(r -> ((CheckAndMutateResult) r).getMetrics().getBlockBytesScanned()).sum(); - Assert.assertEquals(getClusterBlockBytesScanned(), bbs + totalBbs); + assertEquals(getClusterBlockBytesScanned(), bbs + totalBbs); } @Test @@ -198,7 +196,7 @@ public void itTestsDefaultAtomicOperations() { CheckAndMutateResult result = CONN.getTable(TABLE_NAME).checkAndMutate(cam).join(); QueryMetrics metrics = result.getMetrics(); - Assert.assertNull(metrics); + assertNull(metrics); List batch = new ArrayList<>(); batch.add(cam); @@ -209,7 +207,7 @@ public void itTestsDefaultAtomicOperations() { List res = CONN.getTable(TABLE_NAME).batchAll(batch).join(); for (Object r : res) { - Assert.assertNull(((CheckAndMutateResult) r).getMetrics()); + assertNull(((CheckAndMutateResult) r).getMetrics()); } // flush to force fetch from disk @@ -218,7 +216,7 @@ public void itTestsDefaultAtomicOperations() { for (CompletableFuture future : futures) { Object r = future.join(); - Assert.assertNull(((CheckAndMutateResult) r).getMetrics()); + assertNull(((CheckAndMutateResult) r).getMetrics()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRSCrashPublish.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRSCrashPublish.java index c98c13deb660..f6be32d8ec55 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRSCrashPublish.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRSCrashPublish.java @@ -17,12 +17,11 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; import java.io.IOException; import java.util.concurrent.ExecutionException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; @@ -30,29 +29,25 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; // Categorized as a large test so not run as part of general 'test' suite (which is small // and mediums). This test fails if networking is odd -- say if you are connected to a // VPN... See HBASE-23850 -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestAsyncTableRSCrashPublish { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableRSCrashPublish.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("Publish"); private static byte[] FAMILY = Bytes.toBytes("family"); - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { UTIL.getConfiguration().setBoolean(HConstants.STATUS_PUBLISHED, true); /* @@ -72,7 +67,7 @@ public static void beforeClass() throws Exception { UTIL.waitTableAvailable(TABLE_NAME); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionLocator.java index 2b65473380f2..7f29fa5f6652 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionLocator.java @@ -21,35 +21,30 @@ import java.io.IOException; import java.util.List; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Pair; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestAsyncTableRegionLocator extends AbstractTestRegionLocator { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableRegionLocator.class); - private static AsyncConnection CONN; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { startClusterAndCreateTable(); CONN = ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get(); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { Closeables.close(CONN, true); UTIL.shutdownMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasGet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasGet.java index 910e6bbcbb13..497594e47fa9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasGet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasGet.java @@ -17,26 +17,25 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import java.util.function.Supplier; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; -@RunWith(Parameterized.class) -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) +@HBaseParameterizedTestTemplate public class TestAsyncTableRegionReplicasGet extends AbstractTestAsyncTableRegionReplicasRead { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableRegionReplicasGet.class); + public TestAsyncTableRegionReplicasGet(Supplier> getTable) { + super(getTable); + } - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { startClusterAndCreateTable(); AsyncTable table = ASYNC_CONN.getTable(TABLE_NAME); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasScan.java index b38fc4b9b6e7..6e4a7ece9a39 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasScan.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasScan.java @@ -17,30 +17,29 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import java.util.function.Supplier; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; -@RunWith(Parameterized.class) -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) +@HBaseParameterizedTestTemplate public class TestAsyncTableRegionReplicasScan extends AbstractTestAsyncTableRegionReplicasRead { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableRegionReplicasScan.class); - private static int ROW_COUNT = 1000; + public TestAsyncTableRegionReplicasScan(Supplier> getTable) { + super(getTable); + } + private static byte[] getRow(int i) { return Bytes.toBytes(String.format("%s-%03d", Bytes.toString(ROW), i)); } @@ -49,7 +48,7 @@ private static byte[] getValue(int i) { return Bytes.toBytes(String.format("%s-%03d", Bytes.toString(VALUE), i)); } - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { startClusterAndCreateTable(); AsyncTable table = ASYNC_CONN.getTable(TABLE_NAME); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java index 3729296a9220..a5408ab06a13 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java @@ -17,18 +17,17 @@ */ package org.apache.hadoop.hbase.client; -import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import java.io.IOException; import java.util.List; import java.util.Optional; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -40,22 +39,18 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestAsyncTableScanException { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableScanException.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("scan"); @@ -99,7 +94,7 @@ public boolean postScannerNext(ObserverContext scanner.next()); } } @@ -142,13 +137,11 @@ public void testIOException() throws IOException { ERROR = true; try (ResultScanner scanner = CONN.getTableBuilder(TABLE_NAME).setMaxAttempts(3).build().getScanner(FAMILY)) { - scanner.next(); - fail(); - } catch (RetriesExhaustedException e) { - // expected + RetriesExhaustedException e = + assertThrows(RetriesExhaustedException.class, () -> scanner.next()); assertThat(e.getCause(), instanceOf(ScannerResetException.class)); } - assertTrue(REQ_COUNT.get() >= 3); + assertThat(REQ_COUNT.get(), greaterThanOrEqualTo(3)); } private void count() throws IOException { @@ -167,7 +160,7 @@ public void testRecoveryFromScannerResetWhileOpening() throws IOException { count(); // we should at least request 1 time otherwise the error will not be triggered, and then we // need at least one more request to get the remaining results. - assertTrue(REQ_COUNT.get() >= 2); + assertThat(REQ_COUNT.get(), greaterThanOrEqualTo(2)); } @Test @@ -176,6 +169,6 @@ public void testRecoveryFromScannerResetInTheMiddle() throws IOException { count(); // we should at least request 2 times otherwise the error will not be triggered, and then we // need at least one more request to get the remaining results. - assertTrue(REQ_COUNT.get() >= 3); + assertThat(REQ_COUNT.get(), greaterThanOrEqualTo(3)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanMetricsWithScannerSuspending.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanMetricsWithScannerSuspending.java index 6a355b083653..be15bf1a43d6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanMetricsWithScannerSuspending.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanMetricsWithScannerSuspending.java @@ -22,9 +22,10 @@ import static org.apache.hadoop.hbase.client.metrics.ScanMetrics.REGIONS_SCANNED_METRIC_NAME; import static org.apache.hadoop.hbase.client.metrics.ScanMetrics.RPC_CALLS_METRIC_NAME; import static org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.ArrayList; import java.util.Arrays; @@ -32,7 +33,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; @@ -41,22 +41,17 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestAsyncTableScanMetricsWithScannerSuspending { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableScanMetricsWithScannerSuspending.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static final TableName TABLE_NAME = @@ -70,7 +65,7 @@ public class TestAsyncTableScanMetricsWithScannerSuspending { private static AsyncConnection CONN; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { UTIL.startMiniCluster(1); // Create 3 rows in the table, with rowkeys starting with "xxx*", "yyy*" and "zzz*" so that @@ -83,7 +78,7 @@ public static void setUp() throws Exception { CONN = ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get(); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { Closeables.close(CONN, true); UTIL.shutdownMiniCluster(); @@ -131,7 +126,7 @@ public String explainFailure() throws Exception { assertEquals(i, rowsReadCounter.get()); results.add(scanner.next()); } - Assert.assertNull(scanner.next()); + assertNull(scanner.next()); // Assert on overall scan metrics and scan metrics by region ScanMetrics scanMetrics = scanner.getScanMetrics(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanRenewLease.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanRenewLease.java index 04e500fc8e34..ceeb0f90ad96 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanRenewLease.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanRenewLease.java @@ -17,13 +17,12 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; import java.util.stream.IntStream; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -31,19 +30,15 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestAsyncTableScanRenewLease { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableScanRenewLease.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("async"); @@ -58,7 +53,7 @@ public class TestAsyncTableScanRenewLease { private static int SCANNER_LEASE_TIMEOUT_PERIOD_MS = 5000; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, SCANNER_LEASE_TIMEOUT_PERIOD_MS); @@ -71,7 +66,7 @@ public static void setUp() throws Exception { .collect(Collectors.toList())).get(); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { CONN.close(); TEST_UTIL.shutdownMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScannerCloseWhileSuspending.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScannerCloseWhileSuspending.java index 4f52e6c8c13f..e02a41c64c98 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScannerCloseWhileSuspending.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScannerCloseWhileSuspending.java @@ -17,33 +17,28 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ForkJoinPool; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.stream.IntStream; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestAsyncTableScannerCloseWhileSuspending { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableScannerCloseWhileSuspending.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("async"); @@ -56,7 +51,7 @@ public class TestAsyncTableScannerCloseWhileSuspending { private static AsyncTable TABLE; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(1); TEST_UTIL.createTable(TABLE_NAME, FAMILY); @@ -67,7 +62,7 @@ public static void setUp() throws Exception { .collect(Collectors.toList())).get(); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { CONN.close(); TEST_UTIL.shutdownMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java index 18c53a49de7b..8cd96291991f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java @@ -17,14 +17,14 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import java.io.IOException; import java.util.Optional; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -39,20 +39,16 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FutureUtils; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category({ ClientTests.class, MediumTests.class }) +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +@Tag(ClientTests.TAG) +@Tag(MediumTests.TAG) public class TestAsyncTableUseMetaReplicas { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableUseMetaReplicas.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("Replica"); @@ -87,7 +83,7 @@ public void preScannerOpen(ObserverContext testRead(false)); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java index e1f21502d906..31ce704bf1bc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.ExtendedCell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -59,23 +58,19 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestAvoidCellReferencesIntoShippedBlocks { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAvoidCellReferencesIntoShippedBlocks.class); - protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); static byte[][] ROWS = new byte[2][]; private static byte[] ROW = Bytes.toBytes("testRow"); @@ -93,14 +88,14 @@ public class TestAvoidCellReferencesIntoShippedBlocks { private CountDownLatch latch = new CountDownLatch(1); private static CountDownLatch compactReadLatch = new CountDownLatch(1); private static AtomicBoolean doScan = new AtomicBoolean(false); + private String methodName; - @Rule - public TestName name = new TestName(); + @BeforeEach + public void setUp(TestInfo testInfo) throws Exception { + this.methodName = testInfo.getTestMethod().get().getName(); + } - /** - * @throws java.lang.Exception - */ - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { ROWS[0] = ROW; ROWS[1] = ROW1; @@ -123,14 +118,14 @@ public static void setUpBeforeClass() throws Exception { /** * @throws java.lang.Exception */ - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @Test public void testHBase16372InCompactionWritePath() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // Create a table with block size as 1024 final Table table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CompactorRegionObserver.class.getName()); @@ -197,7 +192,7 @@ public void testHBase16372InCompactionWritePath() throws Exception { try (ResultScanner scanner = table.getScanner(s)) { count = Iterables.size(scanner); } - assertEquals("Count all the rows ", 6, count); + assertEquals(6, count, "Count all the rows "); // all the cache is loaded // trigger a major compaction ScannerThread scannerThread = new ScannerThread(table, cache); @@ -208,7 +203,7 @@ public void testHBase16372InCompactionWritePath() throws Exception { try (ResultScanner scanner = table.getScanner(s)) { count = Iterables.size(scanner); } - assertEquals("Count all the rows ", 6, count); + assertEquals(6, count, "Count all the rows "); } finally { table.close(); } @@ -297,7 +292,7 @@ public boolean next(List result, ScannerContext scannerCon @Test public void testHBASE16372InReadPath() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // Create a table with block size as 1024 try (Table table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, null)) { // get the block cache and region @@ -356,7 +351,7 @@ public void testHBASE16372InReadPath() throws Exception { try (ResultScanner scanner = table.getScanner(s)) { count = Iterables.size(scanner); } - assertEquals("Count all the rows ", 6, count); + assertEquals(6, count, "Count all the rows "); // Scan from cache s = new Scan(); @@ -394,7 +389,7 @@ public void run() { iterator.next(); refBlockCount++; } - assertEquals("One block should be there ", 1, refBlockCount); + assertEquals(1, refBlockCount, "One block should be there "); // Rescan to prepopulate the data // cache this row. Scan s1 = new Scan(); @@ -406,7 +401,7 @@ public void run() { try (ResultScanner scanner = table.getScanner(s1)) { int count = Iterables.size(scanner); - assertEquals("Count the rows", 2, count); + assertEquals(2, count, "Count the rows"); int newBlockRefCount = 0; List newCacheList = new ArrayList<>(); while (true) { @@ -441,7 +436,7 @@ public void run() { } } } - assertEquals("Count should give all rows ", 10, count); + assertEquals(10, count, "Count should give all rows "); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBatchScanResultCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBatchScanResultCache.java index dd0074ae6913..e555fbdb6ee8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBatchScanResultCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBatchScanResultCache.java @@ -17,40 +17,35 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertSame; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertSame; import java.io.IOException; import java.util.Arrays; import org.apache.hadoop.hbase.ExtendedCell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ SmallTests.class, ClientTests.class }) +@Tag(SmallTests.TAG) +@Tag(ClientTests.TAG) public class TestBatchScanResultCache { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBatchScanResultCache.class); - private static byte[] CF = Bytes.toBytes("cf"); private BatchScanResultCache resultCache; - @Before + @BeforeEach public void setUp() { resultCache = new BatchScanResultCache(4); } - @After + @AfterEach public void tearDown() { resultCache.clear(); resultCache = null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java index 1f9bbd3aa7af..bee614e9d19c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -36,7 +36,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.ExtendedCell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; @@ -63,28 +62,22 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; -@Category({ LargeTests.class, ClientTests.class }) -@SuppressWarnings("deprecation") +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestBlockEvictionFromClient { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBlockEvictionFromClient.class); - private static final Logger LOG = LoggerFactory.getLogger(TestBlockEvictionFromClient.class); protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); static byte[][] ROWS = new byte[2][]; @@ -104,14 +97,17 @@ public class TestBlockEvictionFromClient { private static CountDownLatch getLatch; private static CountDownLatch compactionLatch; private static CountDownLatch exceptionLatch; + private String methodName; - @Rule - public TestName name = new TestName(); + @BeforeEach + public void setUp(TestInfo testInfo) throws Exception { + this.methodName = testInfo.getTestMethod().get().getName(); + CustomInnerRegionObserver.waitForGets.set(false); + CustomInnerRegionObserver.countOfNext.set(0); + CustomInnerRegionObserver.countOfGets.set(0); + } - /** - * @throws java.lang.Exception - */ - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { ROWS[0] = ROW; ROWS[1] = ROW1; @@ -132,7 +128,7 @@ public static void setUpBeforeClass() throws Exception { /** * @throws java.lang.Exception */ - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @@ -140,17 +136,7 @@ public static void tearDownAfterClass() throws Exception { /** * @throws java.lang.Exception */ - @Before - public void setUp() throws Exception { - CustomInnerRegionObserver.waitForGets.set(false); - CustomInnerRegionObserver.countOfNext.set(0); - CustomInnerRegionObserver.countOfGets.set(0); - } - - /** - * @throws java.lang.Exception - */ - @After + @AfterEach public void tearDown() throws Exception { if (latch != null) { while (latch.getCount() > 0) { @@ -186,7 +172,7 @@ public void testBlockEvictionWithParallelScans() throws Exception { Table table = null; try { latch = new CountDownLatch(1); - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // Create a table with block size as 1024 table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName()); @@ -274,7 +260,7 @@ public void testParallelGetsAndScans() throws IOException, InterruptedException latch = new CountDownLatch(2); // Check if get() returns blocks on its close() itself getLatch = new CountDownLatch(1); - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // Create KV that will give you two blocks // Create a table with block size as 1024 table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, @@ -332,7 +318,7 @@ public void testGetWithCellsInDifferentFiles() throws IOException, InterruptedEx latch = new CountDownLatch(1); // Check if get() returns blocks on its close() itself getLatch = new CountDownLatch(1); - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // Create KV that will give you two blocks // Create a table with block size as 1024 table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, @@ -393,7 +379,7 @@ public void testGetsWithMultiColumnsAndExplicitTracker() latch = new CountDownLatch(1); // Check if get() returns blocks on its close() itself getLatch = new CountDownLatch(1); - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // Create KV that will give you two blocks // Create a table with block size as 1024 table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, @@ -480,7 +466,7 @@ public void testGetWithMultipleColumnFamilies() throws IOException, InterruptedE latch = new CountDownLatch(1); // Check if get() returns blocks on its close() itself getLatch = new CountDownLatch(1); - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // Create KV that will give you two blocks // Create a table with block size as 1024 byte[][] fams = new byte[10][]; @@ -571,7 +557,7 @@ public void testGetWithMultipleColumnFamilies() throws IOException, InterruptedE public void testBlockRefCountAfterSplits() throws IOException, InterruptedException { Table table = null; try { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TableDescriptor desc = TEST_UTIL.createTableDescriptor(tableName); // This test expects rpc refcount of cached data blocks to be 0 after split. After split, // two daughter regions are opened and a compaction is scheduled to get rid of reference @@ -638,7 +624,7 @@ public void testMultiGets() throws IOException, InterruptedException { latch = new CountDownLatch(2); // Check if get() returns blocks on its close() itself getLatch = new CountDownLatch(1); - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // Create KV that will give you two blocks // Create a table with block size as 1024 table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, @@ -691,7 +677,7 @@ public void testMultiGets() throws IOException, InterruptedException { foundNonZeroBlock = true; } } - assertTrue("Should have found nonzero ref count block", foundNonZeroBlock); + assertTrue(foundNonZeroBlock, "Should have found nonzero ref count block"); CustomInnerRegionObserver.getCdl().get().countDown(); CustomInnerRegionObserver.getCdl().get().countDown(); for (MultiGetThread thread : getThreads) { @@ -716,7 +702,7 @@ public void testScanWithMultipleColumnFamilies() throws IOException, Interrupted try { latch = new CountDownLatch(1); // Check if get() returns blocks on its close() itself - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // Create KV that will give you two blocks // Create a table with block size as 1024 byte[][] fams = new byte[10][]; @@ -820,7 +806,7 @@ public void testParallelGetsAndScanWithWrappedRegionScanner() latch = new CountDownLatch(2); // Check if get() returns blocks on its close() itself getLatch = new CountDownLatch(1); - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // Create KV that will give you two blocks // Create a table with block size as 1024 table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, @@ -872,12 +858,12 @@ public void testParallelGetsAndScanWithWrappedRegionScanner() @Test public void testScanWithCompaction() throws IOException, InterruptedException { - testScanWithCompactionInternals(name.getMethodName(), false); + testScanWithCompactionInternals(methodName, false); } @Test public void testReverseScanWithCompaction() throws IOException, InterruptedException { - testScanWithCompactionInternals(name.getMethodName(), true); + testScanWithCompactionInternals(methodName, true); } private void testScanWithCompactionInternals(String tableNameStr, boolean reversed) @@ -945,7 +931,7 @@ private void testScanWithCompactionInternals(String tableNameStr, boolean revers usedBlocksFound = true; } } - assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound); + assertTrue(usedBlocksFound, "Blocks with non zero ref count should be found "); usedBlocksFound = false; System.out.println("Compacting"); assertEquals(2, store.getStorefilesCount()); @@ -972,7 +958,7 @@ private void testScanWithCompactionInternals(String tableNameStr, boolean revers usedBlocksFound = true; } } - assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound); + assertTrue(usedBlocksFound, "Blocks with non zero ref count should be found "); // Should not throw exception compactionLatch.countDown(); latch.countDown(); @@ -1003,7 +989,7 @@ public void testBlockEvictionAfterHBASE13082WithCompactionAndFlush() try { latch = new CountDownLatch(1); compactionLatch = new CountDownLatch(1); - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // Create a table with block size as 1024 table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserverWrapper.class.getName()); @@ -1071,7 +1057,7 @@ public void testBlockEvictionAfterHBASE13082WithCompactionAndFlush() // flush, one new block System.out.println("Flushing cache"); region.flush(true); - assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound); + assertTrue(usedBlocksFound, "Blocks with non zero ref count should be found "); usedBlocksFound = false; System.out.println("Compacting"); assertEquals(3, store.getStorefilesCount()); @@ -1098,7 +1084,7 @@ public void testBlockEvictionAfterHBASE13082WithCompactionAndFlush() usedBlocksFound = true; } } - assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound); + assertTrue(usedBlocksFound, "Blocks with non zero ref count should be found "); // Should not throw exception compactionLatch.countDown(); latch.countDown(); @@ -1130,7 +1116,7 @@ public void testScanWithException() throws IOException, InterruptedException { try { latch = new CountDownLatch(1); exceptionLatch = new CountDownLatch(1); - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // Create KV that will give you two blocks // Create a table with block size as 1024 table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBootstrapNodeUpdate.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBootstrapNodeUpdate.java index 549575f4f404..97d87e76901a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBootstrapNodeUpdate.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBootstrapNodeUpdate.java @@ -19,12 +19,11 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.hasItem; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import java.util.Set; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseRpcServicesBase; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.ServerName; @@ -32,11 +31,10 @@ import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @@ -44,18 +42,15 @@ * Make sure that we can update the bootstrap server from master to region server, and region server * could also contact each other to update the bootstrap nodes. */ -@Category({ RegionServerTests.class, MediumTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(MediumTests.TAG) public class TestBootstrapNodeUpdate { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBootstrapNodeUpdate.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static RpcConnectionRegistry REGISTRY; - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { Configuration conf = UTIL.getConfiguration(); conf.setLong(BootstrapNodeManager.REQUEST_MASTER_INTERVAL_SECS, 5); @@ -69,7 +64,7 @@ public static void setUpBeforeClass() throws Exception { REGISTRY = new RpcConnectionRegistry(conf, UserProvider.instantiate(conf).getCurrent()); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { Closeables.close(REGISTRY, true); UTIL.shutdownMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java index 8ec1403a4830..dd8a35a2515b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.concurrent.ExecutorService; @@ -28,29 +28,24 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.IntStream; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestBufferedMutator { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBufferedMutator.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("test"); @@ -61,23 +56,23 @@ public class TestBufferedMutator { private static byte[] VALUE = new byte[1024]; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(1); ThreadLocalRandom.current().nextBytes(VALUE); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @Before + @BeforeEach public void setUpBeforeTest() throws IOException { TEST_UTIL.createTable(TABLE_NAME, CF); } - @After + @AfterEach public void tearDownAfterTest() throws IOException { TEST_UTIL.deleteTable(TABLE_NAME); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIDeleteOperationTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIDeleteOperationTimeout.java index e550c65daaa4..8286db242484 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIDeleteOperationTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIDeleteOperationTimeout.java @@ -18,19 +18,14 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; -@Category({ ClientTests.class, LargeTests.class }) +@Tag(ClientTests.TAG) +@Tag(LargeTests.TAG) public class TestCIDeleteOperationTimeout extends AbstractTestCIOperationTimeout { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCIDeleteOperationTimeout.class); - @Override protected void execute(Table table) throws IOException { table.delete(new Delete(FAM_NAM)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIDeleteRpcTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIDeleteRpcTimeout.java index 5495db7db56c..02ddb8a2d7aa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIDeleteRpcTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIDeleteRpcTimeout.java @@ -18,19 +18,14 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; -@Category({ ClientTests.class, MediumTests.class }) +@Tag(ClientTests.TAG) +@Tag(MediumTests.TAG) public class TestCIDeleteRpcTimeout extends AbstractTestCIRpcTimeout { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCIDeleteRpcTimeout.class); - @Override protected void execute(Table table) throws IOException { table.delete(new Delete(FAM_NAM)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIGetOperationTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIGetOperationTimeout.java index 84dd1b86028b..f8c962934596 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIGetOperationTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIGetOperationTimeout.java @@ -18,19 +18,14 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; -@Category({ ClientTests.class, LargeTests.class }) +@Tag(ClientTests.TAG) +@Tag(LargeTests.TAG) public class TestCIGetOperationTimeout extends AbstractTestCIOperationTimeout { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCIGetOperationTimeout.class); - @Override protected void execute(Table table) throws IOException { table.get(new Get(FAM_NAM)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.java index 3ce7d4622377..801f4ce312d2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.java @@ -18,19 +18,14 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; -@Category({ ClientTests.class, MediumTests.class }) +@Tag(ClientTests.TAG) +@Tag(MediumTests.TAG) public class TestCIGetRpcTimeout extends AbstractTestCIRpcTimeout { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCIGetRpcTimeout.class); - @Override protected void execute(Table table) throws IOException { table.get(new Get(FAM_NAM)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIIncrementRpcTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIIncrementRpcTimeout.java index efc202cc1f99..e6d52b7cc735 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIIncrementRpcTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIIncrementRpcTimeout.java @@ -18,19 +18,14 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; -@Category({ ClientTests.class, MediumTests.class }) +@Tag(ClientTests.TAG) +@Tag(MediumTests.TAG) public class TestCIIncrementRpcTimeout extends AbstractTestCIRpcTimeout { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCIIncrementRpcTimeout.class); - @Override protected void execute(Table table) throws IOException { table.increment(new Increment(FAM_NAM).addColumn(FAM_NAM, FAM_NAM, 1)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIPutOperationTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIPutOperationTimeout.java index 08c44e42f2a6..855125e99835 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIPutOperationTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIPutOperationTimeout.java @@ -18,19 +18,14 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; -@Category({ ClientTests.class, LargeTests.class }) +@Tag(ClientTests.TAG) +@Tag(LargeTests.TAG) public class TestCIPutOperationTimeout extends AbstractTestCIOperationTimeout { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCIPutOperationTimeout.class); - @Override protected void execute(Table table) throws IOException { table.put(new Put(FAM_NAM).addColumn(FAM_NAM, FAM_NAM, FAM_NAM)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIPutRpcTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIPutRpcTimeout.java index 8d9f9c134f95..fbe235d79442 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIPutRpcTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIPutRpcTimeout.java @@ -18,19 +18,14 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; -@Category({ ClientTests.class, MediumTests.class }) +@Tag(ClientTests.TAG) +@Tag(MediumTests.TAG) public class TestCIPutRpcTimeout extends AbstractTestCIRpcTimeout { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCIPutRpcTimeout.class); - @Override protected void execute(Table table) throws IOException { table.put(new Put(FAM_NAM).addColumn(FAM_NAM, FAM_NAM, FAM_NAM)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java index 32c81de648db..badca41fb5b9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java @@ -17,35 +17,30 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.fail; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ ClientTests.class, MediumTests.class }) +@Tag(ClientTests.TAG) +@Tag(MediumTests.TAG) public class TestCISleep extends AbstractTestCITimeout { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCISleep.class); - private static Logger LOG = LoggerFactory.getLogger(TestCISleep.class); private TableName tableName; - @Before + @BeforeEach public void setUp() { - tableName = TableName.valueOf(name.getMethodName()); + tableName = name.getTableName(); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java index 184b4ba0d3cc..26bec655ee8b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java @@ -19,37 +19,32 @@ import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW; import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; import java.io.IOException; import java.util.concurrent.TimeUnit; import java.util.stream.IntStream; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestCatalogReplicaLoadBalanceSimpleSelector { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCatalogReplicaLoadBalanceSimpleSelector.class); - private static final Logger LOG = LoggerFactory.getLogger(TestCatalogReplicaLoadBalanceSimpleSelector.class); @@ -63,7 +58,7 @@ public class TestCatalogReplicaLoadBalanceSimpleSelector { private static ConnectionRegistry registry; private static Admin admin; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); @@ -82,7 +77,7 @@ public static void setUp() throws Exception { User.getCurrent()); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { Closeables.close(CONN, true); TEST_UTIL.shutdownMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java index 84e21dcf9108..09195e8b51f4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java @@ -19,18 +19,18 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.List; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.filter.BinaryComparator; @@ -41,45 +41,45 @@ import org.apache.hadoop.hbase.filter.TimestampsFilter; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; +import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; - -@Category(MediumTests.class) +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; + +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestCheckAndMutate { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCheckAndMutate.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final byte[] ROWKEY = Bytes.toBytes("12345"); private static final byte[] ROWKEY2 = Bytes.toBytes("67890"); private static final byte[] ROWKEY3 = Bytes.toBytes("abcde"); private static final byte[] ROWKEY4 = Bytes.toBytes("fghij"); private static final byte[] FAMILY = Bytes.toBytes("cf"); + private String methodName; - @Rule - public TestName name = new TestName(); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } + @BeforeEach + public void setUp(TestInfo testInfo) { + this.methodName = testInfo.getTestMethod().get().getName(); + } + private Table createTable() throws IOException, InterruptedException { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); Table table = TEST_UTIL.createTable(tableName, FAMILY); TEST_UTIL.waitTableAvailable(tableName.getName(), 5000); return table; @@ -96,22 +96,22 @@ private void putOneRow(Table table) throws IOException { private void getOneRowAndAssertAllExist(final Table table) throws IOException { Get get = new Get(ROWKEY); Result result = table.get(get); - assertTrue("Column A value should be a", - Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("A"))).equals("a")); - assertTrue("Column B value should be b", - Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B"))).equals("b")); - assertTrue("Column C value should be c", - Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("C"))).equals("c")); + assertTrue(Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("A"))).equals("a"), + "Column A value should be a"); + assertTrue(Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B"))).equals("b"), + "Column B value should be b"); + assertTrue(Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("C"))).equals("c"), + "Column C value should be c"); } private void getOneRowAndAssertAllButCExist(final Table table) throws IOException { Get get = new Get(ROWKEY); Result result = table.get(get); - assertTrue("Column A value should be a", - Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("A"))).equals("a")); - assertTrue("Column B value should be b", - Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B"))).equals("b")); - assertTrue("Column C should not exist", result.getValue(FAMILY, Bytes.toBytes("C")) == null); + assertTrue(Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("A"))).equals("a"), + "Column A value should be a"); + assertTrue(Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B"))).equals("b"), + "Column B value should be b"); + assertTrue(result.getValue(FAMILY, Bytes.toBytes("C")) == null, "Column C should not exist"); } private RowMutations makeRowMutationsWithColumnCDeleted() throws IOException { @@ -359,13 +359,15 @@ public void testCheckAndMutateWithFilterAndTimeRangeForOldApi() throws Throwable } } - @Test(expected = NullPointerException.class) + @Test @Deprecated public void testCheckAndMutateWithoutConditionForOldApi() throws Throwable { - try (Table table = createTable()) { - table.checkAndMutate(ROWKEY, FAMILY) - .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); - } + assertThrows(NullPointerException.class, () -> { + try (Table table = createTable()) { + table.checkAndMutate(ROWKEY, FAMILY) + .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); + } + }); } // Tests for new CheckAndMutate API @@ -590,10 +592,12 @@ public void testCheckAndMutateWithFilterAndTimeRange() throws Throwable { } } - @Test(expected = IllegalStateException.class) + @Test public void testCheckAndMutateBuilderWithoutCondition() { - CheckAndMutate.newBuilder(ROWKEY) - .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); + assertThrows(IllegalStateException.class, () -> { + CheckAndMutate.newBuilder(ROWKEY) + .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); + }); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutateWithByteBuff.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutateWithByteBuff.java index 8c7452cec0b4..8a50c68b7701 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutateWithByteBuff.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutateWithByteBuff.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.util.Threads.sleep; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.List; @@ -26,7 +26,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -37,36 +36,26 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Category(LargeTests.class) +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; + +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestCheckAndMutateWithByteBuff { - private static final Logger LOG = LoggerFactory.getLogger(TestCheckAndMutateWithByteBuff.class); - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCheckAndMutateWithByteBuff.class); - - @Rule - public TestName name = new TestName(); private static final byte[] CF = Bytes.toBytes("CF"); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final Configuration conf = TEST_UTIL.getConfiguration(); private static Admin admin = null; - @BeforeClass + @BeforeAll public static void setupBeforeClass() throws Exception { conf.set(HConstants.REGION_IMPL, TestCheckAndMutateRegion.class.getName()); conf.set(ByteBuffAllocator.BYTEBUFF_ALLOCATOR_CLASS, @@ -82,22 +71,23 @@ public static void setupBeforeClass() throws Exception { admin = TEST_UTIL.getAdmin(); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @Test - public void testCheckAndMutateWithByteBuffNoEncode() throws Exception { - testCheckAndMutateWithByteBuff(TableName.valueOf(name.getMethodName()), DataBlockEncoding.NONE); + public void testCheckAndMutateWithByteBuffNoEncode(TestInfo testInfo) throws Exception { + testCheckAndMutateWithByteBuff(TableName.valueOf(testInfo.getTestMethod().get().getName()), + DataBlockEncoding.NONE); } @Test - public void testCheckAndMutateWithByteBuffEncode() throws Exception { + public void testCheckAndMutateWithByteBuffEncode(TestInfo testInfo) throws Exception { // Tests for HBASE-26777. // As most HBase.getRegion() calls have been factored out from HBase, you'd need to revert // both HBASE-26777, and the HBase.get() replacements from HBASE-26036 for this test to fail - testCheckAndMutateWithByteBuff(TableName.valueOf(name.getMethodName()), + testCheckAndMutateWithByteBuff(TableName.valueOf(testInfo.getTestMethod().get().getName()), DataBlockEncoding.FAST_DIFF); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java index 912ded0a27bb..506e71c4c5b5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java @@ -17,28 +17,23 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.List; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MiscTests.class, MediumTests.class }) +@Tag(MiscTests.TAG) +@Tag(MediumTests.TAG) public class TestCleanupMetaReplica extends MetaWithReplicasTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCleanupMetaReplica.class); - - @BeforeClass + @BeforeAll public static void setUp() throws Exception { startCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplicaThroughConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplicaThroughConfig.java index 681d6da7c2fb..b9ef44727504 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplicaThroughConfig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplicaThroughConfig.java @@ -17,32 +17,27 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.List; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; /** * Make sure we will honor the {@link HConstants#META_REPLICAS_NUM}. */ -@Category({ MiscTests.class, MediumTests.class }) +@Tag(MiscTests.TAG) +@Tag(MediumTests.TAG) public class TestCleanupMetaReplicaThroughConfig extends MetaWithReplicasTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCleanupMetaReplicaThroughConfig.class); - - @BeforeClass + @BeforeAll public static void setUp() throws Exception { startCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientOperationInterrupt.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientOperationInterrupt.java index dcb96b49871f..ccb183d8d9dc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientOperationInterrupt.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientOperationInterrupt.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hbase.client; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + import java.io.IOException; import java.io.InterruptedIOException; import java.net.SocketTimeoutException; @@ -26,7 +29,6 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -39,22 +41,17 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestClientOperationInterrupt { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClientOperationInterrupt.class); - private static final Logger LOG = LoggerFactory.getLogger(TestClientOperationInterrupt.class); private static HBaseTestingUtil util; @@ -77,7 +74,7 @@ public void preGetOp(final ObserverContext asyncTable; - @Rule - public TestName name = new TestName(); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); // Don't report so often so easier to see other rpcs @@ -104,17 +95,17 @@ public static void setUpBeforeClass() throws Exception { CONN = ASYNC_CONN.toConnection(); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { CONN.close(); ASYNC_CONN.close(); TEST_UTIL.shutdownMiniCluster(); } - public void setup(boolean isSystemTable) throws IOException { + public void setup(boolean isSystemTable, TestInfo testInfo) throws IOException { RSRpcServicesWithScanTimeout.reset(); - String nameAsString = name.getMethodName(); + String nameAsString = testInfo.getTestMethod().get().getName(); if (isSystemTable) { nameAsString = NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR + ":" + nameAsString; } @@ -136,14 +127,13 @@ public void setup(boolean isSystemTable) throws IOException { } private void expectRow(byte[] expected, Result result) { - assertTrue("Expected row: " + Bytes.toString(expected), - Bytes.equals(expected, result.getRow())); + assertTrue(Bytes.equals(expected, result.getRow()), + "Expected row: " + Bytes.toString(expected)); } private void expectNumTries(int expected) { - assertEquals( - "Expected tryNumber=" + expected + ", actual=" + RSRpcServicesWithScanTimeout.tryNumber, - expected, RSRpcServicesWithScanTimeout.tryNumber); + assertEquals(expected, RSRpcServicesWithScanTimeout.tryNumber, + "Expected tryNumber=" + expected + ", actual=" + RSRpcServicesWithScanTimeout.tryNumber); // reset for next RSRpcServicesWithScanTimeout.tryNumber = 0; } @@ -156,16 +146,16 @@ private void expectNumTries(int expected) { * that we can test the retry logic appropriately. */ @Test - public void testRetryOutOfOrderScannerNextException() throws IOException { - expectRetryOutOfOrderScannerNext(this::getScanner); + public void testRetryOutOfOrderScannerNextException(TestInfo testInfo) throws IOException { + expectRetryOutOfOrderScannerNext(this::getScanner, testInfo); } /** * AsyncTable version of above */ @Test - public void testRetryOutOfOrderScannerNextExceptionAsync() throws IOException { - expectRetryOutOfOrderScannerNext(this::getAsyncScanner); + public void testRetryOutOfOrderScannerNextExceptionAsync(TestInfo testInfo) throws IOException { + expectRetryOutOfOrderScannerNext(this::getAsyncScanner, testInfo); } /** @@ -173,8 +163,8 @@ public void testRetryOutOfOrderScannerNextExceptionAsync() throws IOException { * scans. */ @Test - public void testNormalScanTimeoutOnNext() throws IOException { - setup(false); + public void testNormalScanTimeoutOnNext(TestInfo testInfo) throws IOException { + setup(false, testInfo); expectTimeoutOnNext(scanTimeout, this::getScanner); } @@ -182,8 +172,8 @@ public void testNormalScanTimeoutOnNext() throws IOException { * AsyncTable version of above */ @Test - public void testNormalScanTimeoutOnNextAsync() throws IOException { - setup(false); + public void testNormalScanTimeoutOnNextAsync(TestInfo testInfo) throws IOException { + setup(false, testInfo); expectTimeoutOnNext(scanTimeout, this::getAsyncScanner); } @@ -192,8 +182,8 @@ public void testNormalScanTimeoutOnNextAsync() throws IOException { * meta scans */ @Test - public void testNormalScanTimeoutOnOpenScanner() throws IOException { - setup(false); + public void testNormalScanTimeoutOnOpenScanner(TestInfo testInfo) throws IOException { + setup(false, testInfo); expectTimeoutOnOpenScanner(rpcTimeout, this::getScanner); } @@ -201,8 +191,8 @@ public void testNormalScanTimeoutOnOpenScanner() throws IOException { * AsyncTable version of above */ @Test - public void testNormalScanTimeoutOnOpenScannerAsync() throws IOException { - setup(false); + public void testNormalScanTimeoutOnOpenScannerAsync(TestInfo testInfo) throws IOException { + setup(false, testInfo); expectTimeoutOnOpenScanner(rpcTimeout, this::getAsyncScanner); } @@ -211,8 +201,8 @@ public void testNormalScanTimeoutOnOpenScannerAsync() throws IOException { * next() calls in meta scans */ @Test - public void testMetaScanTimeoutOnNext() throws IOException { - setup(true); + public void testMetaScanTimeoutOnNext(TestInfo testInfo) throws IOException { + setup(true, testInfo); expectTimeoutOnNext(metaScanTimeout, this::getScanner); } @@ -220,8 +210,8 @@ public void testMetaScanTimeoutOnNext() throws IOException { * AsyncTable version of above */ @Test - public void testMetaScanTimeoutOnNextAsync() throws IOException { - setup(true); + public void testMetaScanTimeoutOnNextAsync(TestInfo testInfo) throws IOException { + setup(true, testInfo); expectTimeoutOnNext(metaScanTimeout, this::getAsyncScanner); } @@ -230,8 +220,8 @@ public void testMetaScanTimeoutOnNextAsync() throws IOException { * openScanner() calls for meta scans */ @Test - public void testMetaScanTimeoutOnOpenScanner() throws IOException { - setup(true); + public void testMetaScanTimeoutOnOpenScanner(TestInfo testInfo) throws IOException { + setup(true, testInfo); expectTimeoutOnOpenScanner(metaScanTimeout, this::getScanner); } @@ -239,14 +229,14 @@ public void testMetaScanTimeoutOnOpenScanner() throws IOException { * AsyncTable version of above */ @Test - public void testMetaScanTimeoutOnOpenScannerAsync() throws IOException { - setup(true); + public void testMetaScanTimeoutOnOpenScannerAsync(TestInfo testInfo) throws IOException { + setup(true, testInfo); expectTimeoutOnOpenScanner(metaScanTimeout, this::getAsyncScanner); } - private void expectRetryOutOfOrderScannerNext(Supplier scannerSupplier) - throws IOException { - setup(false); + private void expectRetryOutOfOrderScannerNext(Supplier scannerSupplier, + TestInfo testInfo) throws IOException { + setup(false, testInfo); RSRpcServicesWithScanTimeout.seqNoToThrowOn = 1; LOG.info( @@ -297,8 +287,8 @@ private void expectRetryOutOfOrderScannerNext(Supplier scannerSup // ensure we verified all rows. this along with the expectRow check above // proves that we didn't miss any rows. - assertEquals("Expected to exhaust expectedResults array length=" + expectedResults.length - + ", actual index=" + i, expectedResults.length, i); + assertEquals(expectedResults.length, i, "Expected to exhaust expectedResults array length=" + + expectedResults.length + ", actual index=" + i); // expect all but the first row (which came from initial openScanner) to have thrown an error expectNumTries(expectedResults.length - 1); @@ -326,7 +316,7 @@ private void expectTimeoutOnNext(int timeout, Supplier scannerSup scanner.next(); fail("Expected CallTimeoutException"); } catch (RetriesExhaustedException e) { - assertTrue("Expected CallTimeoutException", e.getCause() instanceof CallTimeoutException); + assertTrue(e.getCause() instanceof CallTimeoutException, "Expected CallTimeoutException"); } expectTimeout(start, timeout); } @@ -341,15 +331,15 @@ private void expectTimeoutOnOpenScanner(int timeout, Supplier sca scannerSupplier.get().next(); fail("Expected CallTimeoutException"); } catch (RetriesExhaustedException e) { - assertTrue("Expected CallTimeoutException, but was " + e.getCause(), - e.getCause() instanceof CallTimeoutException); + assertTrue(e.getCause() instanceof CallTimeoutException, + "Expected CallTimeoutException, but was " + e.getCause()); } expectTimeout(start, timeout); } private void expectTimeout(long start, int timeout) { long duration = System.nanoTime() - start; - assertTrue("Expected duration >= " + timeout + ", but was " + duration, duration >= timeout); + assertTrue(duration >= timeout, "Expected duration >= " + timeout + ", but was " + duration); } private ResultScanner getScanner() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java index 375406e2d05a..8a69508934bc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java @@ -18,11 +18,14 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.instanceOf; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; @@ -37,7 +40,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -53,22 +55,16 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; - -@Category({ SmallTests.class, ClientTests.class }) +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; + +@Tag(SmallTests.TAG) +@Tag(ClientTests.TAG) public class TestClientSideRegionScanner { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClientSideRegionScanner.class); - private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final TableName TABLE_NAME = TableName.valueOf("test"); private static final byte[] FAM_NAME = Bytes.toBytes("f"); @@ -79,22 +75,21 @@ public class TestClientSideRegionScanner { private TableDescriptor htd; private RegionInfo hri; private Scan scan; + private String methodName; - @Rule - public TestName name = new TestName(); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(1); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @Before - public void setup() throws IOException { + @BeforeEach + public void setup(TestInfo testInfo) throws IOException { + this.methodName = testInfo.getTestMethod().get().getName(); conf = TEST_UTIL.getConfiguration(); rootDir = TEST_UTIL.getDefaultRootDirPath(); fs = TEST_UTIL.getTestFileSystem(); @@ -106,14 +101,14 @@ public void setup() throws IOException { @Test public void testDefaultBlockCache() throws IOException { Configuration copyConf = new Configuration(conf); - ClientSideRegionScanner clientSideRegionScanner = - new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null); - - BlockCache blockCache = clientSideRegionScanner.getRegion().getBlockCache(); - assertNotNull(blockCache); - assertTrue(blockCache instanceof IndexOnlyLruBlockCache); - assertTrue(HConstants.HBASE_CLIENT_SCANNER_ONHEAP_BLOCK_CACHE_FIXED_SIZE_DEFAULT - == blockCache.getMaxSize()); + try (ClientSideRegionScanner clientSideRegionScanner = + new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null)) { + BlockCache blockCache = clientSideRegionScanner.getRegion().getBlockCache(); + assertNotNull(blockCache); + assertThat(blockCache, instanceOf(IndexOnlyLruBlockCache.class)); + assertEquals(HConstants.HBASE_CLIENT_SCANNER_ONHEAP_BLOCK_CACHE_FIXED_SIZE_DEFAULT, + blockCache.getMaxSize()); + } } @Test @@ -122,24 +117,24 @@ public void testConfiguredBlockCache() throws IOException { // tiny 1MB fixed cache size long blockCacheFixedSize = 1024 * 1024L; copyConf.setLong(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY, blockCacheFixedSize); - ClientSideRegionScanner clientSideRegionScanner = - new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null); - - BlockCache blockCache = clientSideRegionScanner.getRegion().getBlockCache(); - assertNotNull(blockCache); - assertTrue(blockCache instanceof IndexOnlyLruBlockCache); - assertTrue(blockCacheFixedSize == blockCache.getMaxSize()); + try (ClientSideRegionScanner clientSideRegionScanner = + new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null)) { + BlockCache blockCache = clientSideRegionScanner.getRegion().getBlockCache(); + assertNotNull(blockCache); + assertThat(blockCache, instanceOf(IndexOnlyLruBlockCache.class)); + assertEquals(blockCacheFixedSize, blockCache.getMaxSize()); + } } @Test public void testNoBlockCache() throws IOException { Configuration copyConf = new Configuration(conf); copyConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f); - ClientSideRegionScanner clientSideRegionScanner = - new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null); - - BlockCache blockCache = clientSideRegionScanner.getRegion().getBlockCache(); - assertNull(blockCache); + try (ClientSideRegionScanner clientSideRegionScanner = + new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null)) { + BlockCache blockCache = clientSideRegionScanner.getRegion().getBlockCache(); + assertNull(blockCache); + } } @Test @@ -169,27 +164,28 @@ public void testContinuesToScanIfHasMore() throws IOException { // Flush contents to disk so we can scan the fs TEST_UTIL.getAdmin().flush(TABLE_NAME); - ClientSideRegionScanner clientSideRegionScanner = - new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null); - RegionScanner scannerSpy = spy(clientSideRegionScanner.scanner); - clientSideRegionScanner.scanner = scannerSpy; - Result result = clientSideRegionScanner.next(); + try (ClientSideRegionScanner clientSideRegionScanner = + new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null)) { + RegionScanner scannerSpy = spy(clientSideRegionScanner.scanner); + clientSideRegionScanner.scanner = scannerSpy; + Result result = clientSideRegionScanner.next(); - verify(scannerSpy, times(6)).nextRaw(anyList()); - assertNotNull(result); - assertEquals(Bytes.toInt(result.getRow()), 5); - assertTrue(clientSideRegionScanner.hasMore); + verify(scannerSpy, times(6)).nextRaw(anyList()); + assertNotNull(result); + assertEquals(Bytes.toInt(result.getRow()), 5); + assertTrue(clientSideRegionScanner.hasMore); + + for (int i = 6; i < 10; ++i) { + result = clientSideRegionScanner.next(); + verify(scannerSpy, times(i + 1)).nextRaw(anyList()); + assertNotNull(result); + assertEquals(Bytes.toInt(result.getRow()), i); + } - for (int i = 6; i < 10; ++i) { result = clientSideRegionScanner.next(); - verify(scannerSpy, times(i + 1)).nextRaw(anyList()); - assertNotNull(result); - assertEquals(Bytes.toInt(result.getRow()), i); + assertNull(result); + assertFalse(clientSideRegionScanner.hasMore); } - - result = clientSideRegionScanner.next(); - assertNull(result); - assertFalse(clientSideRegionScanner.hasMore); } } @@ -216,11 +212,11 @@ private void testScanMetricsWithScanMetricsByRegionDisabled(ScanMetrics scanMetr ScanMetrics scanMetricsFromScanner = clientSideRegionScanner.getScanMetrics(); assertNotNull(scanMetricsFromScanner); if (scanMetrics != null) { - Assert.assertSame(scanMetrics, scanMetricsFromScanner); + assertSame(scanMetrics, scanMetricsFromScanner); } Map metricsMap = scanMetricsFromScanner.getMetricsMap(false); - Assert.assertTrue(metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME) > 0); - Assert.assertTrue(scanMetricsFromScanner.collectMetricsByRegion(false).isEmpty()); + assertTrue(metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME) > 0); + assertTrue(scanMetricsFromScanner.collectMetricsByRegion(false).isEmpty()); } } @@ -245,19 +241,19 @@ private void testScanMetricByRegion(ScanMetrics scanMetrics) throws IOException ScanMetrics scanMetricsFromScanner = clientSideRegionScanner.getScanMetrics(); assertNotNull(scanMetricsFromScanner); if (scanMetrics != null) { - Assert.assertSame(scanMetrics, scanMetricsFromScanner); + assertSame(scanMetrics, scanMetricsFromScanner); } Map> scanMetricsByRegion = scanMetricsFromScanner.collectMetricsByRegion(); - Assert.assertEquals(1, scanMetricsByRegion.size()); + assertEquals(1, scanMetricsByRegion.size()); for (Map.Entry> entry : scanMetricsByRegion .entrySet()) { ScanMetricsRegionInfo scanMetricsRegionInfo = entry.getKey(); Map metricsMap = entry.getValue(); - Assert.assertEquals(hri.getEncodedName(), scanMetricsRegionInfo.getEncodedRegionName()); - Assert.assertNull(scanMetricsRegionInfo.getServerName()); - Assert.assertTrue(metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME) > 0); - Assert.assertEquals((long) metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME), + assertEquals(hri.getEncodedName(), scanMetricsRegionInfo.getEncodedRegionName()); + assertNull(scanMetricsRegionInfo.getServerName()); + assertTrue(metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME) > 0); + assertEquals((long) metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME), scanMetricsFromScanner.countOfRowsScanned.get()); } } @@ -276,7 +272,7 @@ public void testScanMetricsByRegionWithScanMetricsAsInput() throws IOException { @Test public void testGetFilesRead() throws Exception { // Create a table and add some data - TableName tableName = TableName.valueOf(name.getMethodName()); + TableName tableName = TableName.valueOf(methodName); try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAM_NAME })) { TableDescriptor tableHtd = TEST_UTIL.getAdmin().getDescriptor(tableName); RegionInfo tableHri = TEST_UTIL.getAdmin().getRegions(tableName).get(0); @@ -307,24 +303,24 @@ public void testGetFilesRead() throws Exception { expectedFilePaths.add(qualifiedPath); } int expectedFileCount = expectedFilePaths.size(); - assertTrue("Should have at least one store file after flush", expectedFileCount >= 1); + assertTrue(expectedFileCount >= 1, "Should have at least one store file after flush"); // Before closing, should return empty set Set filesReadBeforeClose = clientSideRegionScanner.getFilesRead(); - assertTrue("Should return empty set before closing", filesReadBeforeClose.isEmpty()); + assertTrue(filesReadBeforeClose.isEmpty(), "Should return empty set before closing"); // Scan through some results Result result; int count = 0; while ((result = clientSideRegionScanner.next()) != null && count < 3) { - assertNotNull("Result should not be null", result); + assertNotNull(result, "Result should not be null"); count++; } // Still should return empty set before closing filesReadBeforeClose = clientSideRegionScanner.getFilesRead(); - assertTrue("Should return empty set before closing even after scanning", - filesReadBeforeClose.isEmpty()); + assertTrue(filesReadBeforeClose.isEmpty(), + "Should return empty set before closing even after scanning"); // Close the scanner - this should collect files from the underlying scanner clientSideRegionScanner.close(); @@ -332,11 +328,11 @@ public void testGetFilesRead() throws Exception { // After closing, should return files from the underlying scanner Set filesReadAfterClose = clientSideRegionScanner.getFilesRead(); // Verify exact file count - assertEquals("Should have exact file count after closing", expectedFileCount, - filesReadAfterClose.size()); + assertEquals(expectedFileCount, filesReadAfterClose.size(), + "Should have exact file count after closing"); // Verify exact file names match - assertEquals("Should contain all expected file paths", expectedFilePaths, - filesReadAfterClose); + assertEquals(expectedFilePaths, filesReadAfterClose, + "Should contain all expected file paths"); } finally { TEST_UTIL.deleteTable(tableName); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTableMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTableMetrics.java index c0980c51256a..81db169bfa83 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTableMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTableMetrics.java @@ -17,34 +17,30 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import com.codahale.metrics.Timer; import java.io.IOException; import java.util.Arrays; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; -@Category(MediumTests.class) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestClientTableMetrics { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClientTableMetrics.class); - private static HBaseTestingUtil UTIL; private static Connection CONN; private static MetricsConnection METRICS; @@ -52,7 +48,7 @@ public class TestClientTableMetrics { private static final TableName TABLE_1 = TableName.valueOf(tableName); private static final byte[] FAMILY = Bytes.toBytes("f"); - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { Configuration conf = HBaseConfiguration.create(); conf.setBoolean(MetricsConnection.CLIENT_SIDE_METRICS_ENABLED_KEY, true); @@ -65,7 +61,7 @@ public static void beforeClass() throws Exception { METRICS = ((AsyncConnectionImpl) CONN.toAsyncConnection()).getConnectionMetrics().get(); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { UTIL.deleteTableIfAny(TABLE_1); UTIL.shutdownMiniCluster(); @@ -141,8 +137,8 @@ private static void verifyTableMetrics(String metricKey, int expectedVal) { long numOps = timer.getCount(); double p95 = timer.getSnapshot().get95thPercentile(); double p99 = timer.getSnapshot().get99thPercentile(); - assertEquals("metric: " + metricKey + numOpsSuffix + " val: " + numOps, expectedVal, numOps); - assertTrue("metric: " + metricKey + p95Suffix + " val: " + p95, p95 >= 0); - assertTrue("metric: " + metricKey + p99Suffix + " val: " + p99, p99 >= 0); + assertEquals(expectedVal, numOps, "metric: " + metricKey + numOpsSuffix + " val: " + numOps); + assertTrue(p95 >= 0, "metric: " + metricKey + p95Suffix + " val: " + p95); + assertTrue(p99 >= 0, "metric: " + metricKey + p99Suffix + " val: " + p99); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java index 9a92f4b1aa55..1d64a9caa55d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.net.SocketAddress; @@ -27,7 +27,6 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -40,11 +39,10 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; @@ -54,22 +52,19 @@ import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestClientTimeouts { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClientTimeouts.class); - private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); protected static int SLAVES = 1; - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(SLAVES); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCompleteResultScanResultCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCompleteResultScanResultCache.java index 4973c2c98a40..754ec3cd7608 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCompleteResultScanResultCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCompleteResultScanResultCache.java @@ -17,30 +17,25 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertSame; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertSame; import java.io.IOException; import java.util.Arrays; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ SmallTests.class, ClientTests.class }) +@Tag(SmallTests.TAG) +@Tag(ClientTests.TAG) public class TestCompleteResultScanResultCache { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCompleteResultScanResultCache.class); - private static byte[] CF = Bytes.toBytes("cf"); private static byte[] CQ1 = Bytes.toBytes("cq1"); @@ -51,12 +46,12 @@ public class TestCompleteResultScanResultCache { private CompleteScanResultCache resultCache; - @Before + @BeforeEach public void setUp() { resultCache = new CompleteScanResultCache(); } - @After + @AfterEach public void tearDown() { resultCache.clear(); resultCache = null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnection.java index 42d5a87a9cec..91eef7354814 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnection.java @@ -17,9 +17,12 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertSame; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.List; @@ -31,7 +34,6 @@ import java.util.stream.IntStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; @@ -41,23 +43,21 @@ import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; import org.apache.hbase.thirdparty.io.netty.util.ResourceLeakDetector; import org.apache.hbase.thirdparty.io.netty.util.ResourceLeakDetector.Level; @@ -67,24 +67,19 @@ /** * This class is for testing {@link Connection}. */ -@Category({ LargeTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestConnection { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestConnection.class); - private static final Logger LOG = LoggerFactory.getLogger(TestConnection.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final byte[] FAM_NAM = Bytes.toBytes("f"); private static final byte[] ROW = Bytes.toBytes("bbb"); private static final int RPC_RETRY = 5; + private String methodName; - @Rule - public TestName name = new TestName(); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { ResourceLeakDetector.setLevel(Level.PARANOID); TEST_UTIL.getConfiguration().setBoolean(HConstants.STATUS_PUBLISHED, true); @@ -96,13 +91,18 @@ public static void setUpBeforeClass() throws Exception { } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @After - public void tearDown() throws IOException { + @BeforeEach + public void setUp(TestInfo testInfo) { + this.methodName = testInfo.getTestMethod().get().getName(); + } + + @AfterEach + public void tearDown(TestInfo testInfo) throws IOException { TEST_UTIL.getAdmin().balancerSwitch(true, true); } @@ -112,11 +112,11 @@ public void tearDown() throws IOException { */ @Test public void testAdminFactory() throws IOException { - Connection con1 = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Admin admin = con1.getAdmin(); - assertTrue(admin.getConnection() == con1); - assertTrue(admin.getConfiguration() == TEST_UTIL.getConfiguration()); - con1.close(); + try (Connection con1 = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + Admin admin = con1.getAdmin()) { + assertSame(admin.getConnection(), con1); + assertSame(admin.getConfiguration(), TEST_UTIL.getConfiguration()); + } } /** @@ -217,7 +217,7 @@ public boolean evaluate() throws Exception { }); table.close(); connection.close(); - Assert.assertTrue("Unexpected exception is " + failed.get(), failed.get() == null); + assertNull(failed.get(), "Unexpected exception is " + failed.get()); } /** @@ -225,7 +225,7 @@ public boolean evaluate() throws Exception { */ @Test public void testConnectionIdle() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TEST_UTIL.createTable(tableName, FAM_NAM).close(); int idleTime = 20000; boolean previousBalance = TEST_UTIL.getAdmin().balancerSwitch(false, true); @@ -334,7 +334,7 @@ public void testCreateConnection() throws Exception { public void testLocateRegionsWithRegionReplicas() throws IOException { int regionReplication = 3; byte[] family = Bytes.toBytes("cf"); - TableName tableName = TableName.valueOf(name.getMethodName()); + TableName tableName = TableName.valueOf(methodName); // Create a table with region replicas TableDescriptorBuilder builder = @@ -361,10 +361,10 @@ public void testLocateRegionsWithRegionReplicas() throws IOException { } } - @Test(expected = DoNotRetryIOException.class) - public void testClosedConnection() throws ServiceException, Throwable { + @Test + public void testClosedConnection() throws Throwable { byte[] family = Bytes.toBytes("cf"); - TableName tableName = TableName.valueOf(name.getMethodName()); + TableName tableName = TableName.valueOf(methodName); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName) .setCoprocessor(MultiRowMutationEndpoint.class.getName()) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)); @@ -377,17 +377,19 @@ public void testClosedConnection() throws ServiceException, Throwable { } finally { conn.close(); } - Batch.Call callable = service -> { - throw new RuntimeException("Should not arrive here"); - }; - conn.getTable(tableName).coprocessorService(MultiRowMutationService.class, - HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, callable); + assertThrows(DoNotRetryIOException.class, () -> { + Batch.Call callable = service -> { + throw new RuntimeException("Should not arrive here"); + }; + conn.getTable(tableName).coprocessorService(MultiRowMutationService.class, + HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, callable); + }); } // There is no assertion, but you need to confirm that there is no resource leak output from netty @Test public void testCancelConnectionMemoryLeak() throws IOException, InterruptedException { - TableName tableName = TableName.valueOf(name.getMethodName()); + TableName tableName = TableName.valueOf(methodName); TEST_UTIL.createTable(tableName, FAM_NAM).close(); TEST_UTIL.getAdmin().balancerSwitch(false, true); try (Connection connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionAttributes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionAttributes.java index 5d4378813740..7463f37e83da 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionAttributes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionAttributes.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.util.HashMap; @@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.AuthUtil; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; @@ -43,19 +42,15 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ ClientTests.class, MediumTests.class }) +@Tag(ClientTests.TAG) +@Tag(MediumTests.TAG) public class TestConnectionAttributes { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestConnectionAttributes.class); - private static final Map CONNECTION_ATTRIBUTES = new HashMap<>(); static { CONNECTION_ATTRIBUTES.put("clientId", Bytes.toBytes("foo")); @@ -66,7 +61,7 @@ public class TestConnectionAttributes { private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static SingleProcessHBaseCluster cluster; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { cluster = TEST_UTIL.startMiniCluster(1); Table table = TEST_UTIL.createTable(TABLE_NAME, new byte[][] { FAMILY }, 1, @@ -74,7 +69,7 @@ public static void setUp() throws Exception { table.close(); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { cluster.close(); TEST_UTIL.shutdownMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionUtils.java index 6ddded6168b6..32ad6c3d6ea1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionUtils.java @@ -17,25 +17,20 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.Set; import java.util.TreeSet; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ SmallTests.class, ClientTests.class }) +@Tag(SmallTests.TAG) +@Tag(ClientTests.TAG) public class TestConnectionUtils { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestConnectionUtils.class); - @Test public void testRetryTimeJitter() { long[] retries = new long[200]; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestDropTimeoutRequest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestDropTimeoutRequest.java index 09689309d704..44c7aa62fec0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestDropTimeoutRequest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestDropTimeoutRequest.java @@ -22,7 +22,6 @@ import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -30,34 +29,24 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; +import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; /** * Test a drop timeout request. This test used to be in TestHCM but it has particulare requirements * -- i.e. one handler only -- so run it apart from the rest of TestHCM. */ -@Category({ MediumTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestDropTimeoutRequest { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDropTimeoutRequest.class); - - @Rule - public TestName name = new TestName(); - - private static final Logger LOG = LoggerFactory.getLogger(TestDropTimeoutRequest.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final byte[] FAM_NAM = Bytes.toBytes("f"); private static final int RPC_RETRY = 5; @@ -88,7 +77,7 @@ public void preGetOp(final ObserverContext puts = ROWS.stream().map(r -> new Put(r)).collect(Collectors.toList()); for (int i = 0; i != 20; ++i) { @@ -106,7 +98,7 @@ public void setUp() throws Exception { assertTrue(getRegionInfo().stream().allMatch(r -> r.getMemStoreDataSize() != 0)); } - @After + @AfterEach public void tearDown() throws Exception { for (TableDescriptor htd : TEST_UTIL.getAdmin().listTableDescriptors()) { LOG.info("Tear down, remove table=" + htd.getTableName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClientWithDisabledFlushProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClientWithDisabledFlushProcedure.java index 732e1c53c54b..fb71a1eccfa9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClientWithDisabledFlushProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClientWithDisabledFlushProcedure.java @@ -17,14 +17,13 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThrows; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager; @@ -34,27 +33,22 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FutureUtils; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestFlushFromClientWithDisabledFlushProcedure { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFlushFromClientWithDisabledFlushProcedure.class); - private static final Logger LOG = LoggerFactory.getLogger(TestFlushFromClientWithDisabledFlushProcedure.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -62,12 +56,9 @@ public class TestFlushFromClientWithDisabledFlushProcedure { private static final byte[] FAMILY = Bytes.toBytes("info"); private static final byte[] QUALIFIER = Bytes.toBytes("name"); - @Rule - public TestName name = new TestName(); - private TableName tableName; - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { Configuration configuration = TEST_UTIL.getConfiguration(); configuration.setBoolean(MasterFlushTableProcedureManager.FLUSH_PROCEDURE_ENABLED, false); @@ -75,15 +66,15 @@ public static void setUpBeforeClass() throws Exception { asyncConn = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get(); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { Closeables.close(asyncConn, true); TEST_UTIL.shutdownMiniCluster(); } - @Before - public void setUp() throws Exception { - tableName = TableName.valueOf(name.getMethodName()); + @BeforeEach + public void setUp(TestInfo testInfo) throws Exception { + tableName = TableName.valueOf(testInfo.getTestMethod().get().getName()); try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { List puts = new ArrayList<>(); for (int i = 0; i <= 10; ++i) { @@ -97,7 +88,7 @@ public void setUp() throws Exception { assertFalse(regions.isEmpty()); } - @After + @AfterEach public void tearDown() throws Exception { for (TableDescriptor htd : TEST_UTIL.getAdmin().listTableDescriptors()) { LOG.info("Tear down, remove table=" + htd.getTableName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetProcedureResult.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetProcedureResult.java index baf2f1bf0a01..1820df79d67d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetProcedureResult.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetProcedureResult.java @@ -17,11 +17,10 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.util.concurrent.CountDownLatch; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.TableName; @@ -35,11 +34,10 @@ import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; @@ -49,13 +47,10 @@ /** * Testcase for HBASE-19608. */ -@Category({ MasterTests.class, MediumTests.class }) +@Tag(MasterTests.TAG) +@Tag(MediumTests.TAG) public class TestGetProcedureResult { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGetProcedureResult.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); public static final class DummyProcedure extends Procedure @@ -102,12 +97,12 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws } } - @BeforeClass + @BeforeAll public static void setUp() throws Exception { UTIL.startMiniCluster(1); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetScanColumnsWithNewVersionBehavior.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetScanColumnsWithNewVersionBehavior.java index 51c47b8549cd..92e9e4106488 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetScanColumnsWithNewVersionBehavior.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetScanColumnsWithNewVersionBehavior.java @@ -17,35 +17,30 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; import java.io.IOException; import java.util.ArrayList; import java.util.List; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; /** * Testcase for HBASE-21032, where use the wrong readType from a Scan instance which is actually a * get scan and cause returning only 1 cell per rpc call. */ -@Category({ ClientTests.class, MediumTests.class }) +@Tag(ClientTests.TAG) +@Tag(MediumTests.TAG) public class TestGetScanColumnsWithNewVersionBehavior { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGetScanColumnsWithNewVersionBehavior.class); - private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final TableName TABLE = TableName.valueOf("table"); private static final byte[] CF = { 'c', 'f' }; @@ -55,7 +50,7 @@ public class TestGetScanColumnsWithNewVersionBehavior { private static final byte[] COLC = { 'c' }; private static final long TS = 42; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(1); ColumnFamilyDescriptor cd = @@ -64,7 +59,7 @@ public static void setUp() throws Exception { null); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetScanPartialResult.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetScanPartialResult.java index b390ebfc509f..bc245fb12bb1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetScanPartialResult.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetScanPartialResult.java @@ -17,33 +17,28 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; /** * Testcase for HBASE-21032, where use the wrong readType from a Scan instance which is actually a * get scan and cause returning only 1 cell per rpc call. */ -@Category({ ClientTests.class, MediumTests.class }) +@Tag(ClientTests.TAG) +@Tag(MediumTests.TAG) public class TestGetScanPartialResult { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGetScanPartialResult.class); - private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final TableName TABLE = TableName.valueOf("table"); private static final byte[] CF = { 'c', 'f' }; @@ -51,13 +46,13 @@ public class TestGetScanPartialResult { private static final int VALUE_SIZE = 10000; private static final int NUM_COLUMNS = 300; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(); TEST_UTIL.createTable(TABLE, CF); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java index 360641e64b7d..1b4235496fc0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java @@ -17,10 +17,10 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.time.Instant; @@ -33,9 +33,10 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import java.util.stream.Stream; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -58,18 +59,12 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameter; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.params.provider.Arguments; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -79,21 +74,15 @@ * Class to test HBaseHbck. Spins up the minicluster once at test start and then takes it down * afterward. Add any testing of HBaseHbck functionality here. */ -@RunWith(Parameterized.class) -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: async={0}") public class TestHbck { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHbck.class); private static final Logger LOG = LoggerFactory.getLogger(TestHbck.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - @Rule - public TestName name = new TestName(); - - @SuppressWarnings("checkstyle:VisibilityModifier") - @Parameter - public boolean async; + private final boolean async; private static final TableName TABLE_NAME = TableName.valueOf(TestHbck.class.getSimpleName()); @@ -101,9 +90,12 @@ public class TestHbck { private static AsyncConnection ASYNC_CONN; - @Parameters(name = "{index}: async={0}") - public static List params() { - return Arrays.asList(new Object[] { false }, new Object[] { true }); + public static Stream parameters() { + return Stream.of(Arguments.of(false), Arguments.of(true)); + } + + public TestHbck(boolean async) { + this.async = async; } private Hbck getHbck() throws Exception { @@ -114,7 +106,7 @@ private Hbck getHbck() throws Exception { } } - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(3); TEST_UTIL.createMultiRegionTable(TABLE_NAME, 3, new byte[][] { Bytes.toBytes("family1") }); @@ -128,13 +120,13 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.getHBaseCluster().getMaster().getConfiguration()); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { Closeables.close(ASYNC_CONN, true); TEST_UTIL.shutdownMiniCluster(); } - @Before + @BeforeEach public void setUp() throws IOException { TEST_UTIL.ensureSomeRegionServersAvailable(3); } @@ -163,7 +155,7 @@ public TableOperationType getTableOperationType() { } } - @Test + @TestTemplate public void testBypassProcedure() throws Exception { // SuspendProcedure final SuspendProcedure proc = new SuspendProcedure(); @@ -173,12 +165,12 @@ public void testBypassProcedure() throws Exception { // bypass the procedure List pids = Arrays. asList(procId); List results = getHbck().bypassProcedure(pids, 30000, false, false); - assertTrue("Failed to by pass procedure!", results.get(0)); + assertTrue(results.get(0), "Failed to by pass procedure!"); TEST_UTIL.waitFor(5000, () -> proc.isSuccess() && proc.isBypass()); LOG.info("{} finished", proc); } - @Test + @TestTemplate public void testSetTableStateInMeta() throws Exception { Hbck hbck = getHbck(); // set table state to DISABLED @@ -187,11 +179,11 @@ public void testSetTableStateInMeta() throws Exception { // will be DISABLED TableState prevState = hbck.setTableStateInMeta(new TableState(TABLE_NAME, TableState.State.ENABLED)); - assertTrue("Incorrect previous state! expected=DISABLED, found=" + prevState.getState(), - prevState.isDisabled()); + assertTrue(prevState.isDisabled(), + "Incorrect previous state! expected=DISABLED, found=" + prevState.getState()); } - @Test + @TestTemplate public void testSetRegionStateInMeta() throws Exception { Hbck hbck = getHbck(); Admin admin = TEST_UTIL.getAdmin(); @@ -213,14 +205,14 @@ public void testSetRegionStateInMeta() throws Exception { Map result = hbck.setRegionStateInMeta(requestStates); result.forEach((k, v) -> { RegionState.State beforeState = beforeStates.get(k); - assertEquals("response state should match before state; " + k, beforeState, v); + assertEquals(beforeState, v, "response state should match before state; " + k); }); regions.forEach(r -> { RegionState afterState = am.getRegionStates().getRegionState(r.getEncodedName()); RegionState.State expectedState = requestStates.get(r.getEncodedName()); LOG.debug("After test: {}, {}", r, afterState); - assertEquals("state in AM should match requested state ; " + r, expectedState, - afterState.getState()); + assertEquals(expectedState, afterState.getState(), + "state in AM should match requested state ; " + r); }); return null; }; @@ -229,7 +221,7 @@ public void testSetRegionStateInMeta() throws Exception { hbck.setRegionStateInMeta(beforeStates); } - @Test + @TestTemplate public void testAssigns() throws Exception { Hbck hbck = getHbck(); final AssignmentManager am = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager(); @@ -274,7 +266,7 @@ public void testAssigns() throws Exception { RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .getRegionStates().getRegionState(ri.getEncodedName()); LOG.info("RS: {}", rs.toString()); - assertTrue(rs.toString(), rs.isClosed()); + assertTrue(rs.isClosed(), rs.toString()); } pids = hbck.assigns(regions.stream().map(RegionInfo::getEncodedName).collect(Collectors.toList())); @@ -290,7 +282,7 @@ public void testAssigns() throws Exception { RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .getRegionStates().getRegionState(ri.getEncodedName()); LOG.info("RS: {}", rs.toString()); - assertTrue(rs.toString(), rs.isOpened()); + assertTrue(rs.isOpened(), rs.toString()); } // Rerun the assign with override. Should fail for all Regions since they already assigned pids = hbck.assigns( @@ -307,7 +299,7 @@ public void testAssigns() throws Exception { } } - @Test + @TestTemplate public void testScheduleSCP() throws Exception { HRegionServer testRs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME); try (final Table t = TEST_UTIL.getConnection().getTable(TABLE_NAME)) { @@ -326,7 +318,7 @@ public void testScheduleSCP() throws Exception { waitOnPids(pids); } - @Test + @TestTemplate public void testRunHbckChore() throws Exception { HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); HbckChore hbckChore = master.getHbckChore(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java index 9b314ba87c57..b025bdc26b65 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.contains; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -27,7 +27,6 @@ import java.io.IOException; import java.lang.reflect.Field; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -38,37 +37,33 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.TableDescriptorChecker; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestIllegalTableDescriptor { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIllegalTableDescriptor.class); - // NOTE: Increment tests were moved to their own class, TestIncrementsFromClientSide. - private static final Logger LOGGER; + private static final Logger LOGGER = mock(Logger.class); protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static byte[] FAMILY = Bytes.toBytes("testFamily"); - @Rule - public TestName name = new TestName(); + private String methodName; - static { - LOGGER = mock(Logger.class); + @BeforeEach + public void setUp(TestInfo testInfo) { + methodName = testInfo.getTestMethod().get().getName(); } - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { // replacing HMaster.LOG with our mock logger for verifying logging Field field = TableDescriptorChecker.class.getDeclaredField("LOG"); @@ -79,7 +74,7 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(1); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @@ -87,7 +82,7 @@ public static void tearDownAfterClass() throws Exception { @Test public void testIllegalTableDescriptor() throws Exception { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(methodName)); ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY); // create table with 0 families @@ -194,7 +189,7 @@ public void testIllegalTableDescriptor() throws Exception { public void testIllegalTableDescriptorWithDataTiering() throws IOException { // table level configuration changes TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(methodName)); ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY); builder.setColumnFamily(cfBuilder.build()); @@ -214,7 +209,7 @@ public void testIllegalTableDescriptorWithDataTiering() throws IOException { // column family level configuration changes for (boolean viaSetValue : new boolean[] { false, true }) { - builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(methodName)); cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY); // First scenario: DataTieringType set to TIME_RANGE without DateTieredStoreEngine diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java index f93fc9d5bf5d..4c5588b75982 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java @@ -17,31 +17,26 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; /** * Make sure we will honor the {@link HConstants#META_REPLICAS_NUM}.And also test upgrading. */ -@Category({ MiscTests.class, MediumTests.class }) +@Tag(MiscTests.TAG) +@Tag(MediumTests.TAG) public class TestIncreaseMetaReplicaThroughConfig extends MetaWithReplicasTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIncreaseMetaReplicaThroughConfig.class); - - @BeforeClass + @BeforeAll public static void setUp() throws Exception { startCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java index c453e013ebc6..7179b4554e47 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java @@ -18,28 +18,24 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; import org.apache.hadoop.hbase.regionserver.NoOpScanPolicyObserver; +import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; /** * Test all {@link Increment} client operations with a coprocessor that just implements the default * flush/compact/scan policy. This test takes a long time. The test it derives from is parameterized * so we run through both options of the test. */ -@Category(LargeTests.class) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestIncrementFromClientSideWithCoprocessor extends TestIncrementsFromClientSide { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIncrementFromClientSideWithCoprocessor.class); - - @Before + @BeforeEach public void before() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java index 1e8cdb2f3203..9893497ba420 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java @@ -19,11 +19,11 @@ import static org.apache.hadoop.hbase.HConstants.RPC_CODEC_CONF_KEY; import static org.apache.hadoop.hbase.ipc.RpcClient.DEFAULT_CODEC_CLASS; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.ArrayList; @@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -48,15 +47,14 @@ import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; +import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -67,13 +65,10 @@ * should be faster than starting/stopping a cluster per test. Test takes a long time because spin * up a cluster between each run -- ugh. */ -@Category(LargeTests.class) +@org.junit.jupiter.api.Tag(LargeTests.TAG) +@org.junit.jupiter.api.Tag(ClientTests.TAG) public class TestIncrementsFromClientSide { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIncrementsFromClientSide.class); - final Logger LOG = LoggerFactory.getLogger(getClass()); protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static byte[] ROW = Bytes.toBytes("testRow"); @@ -82,10 +77,14 @@ public class TestIncrementsFromClientSide { // This test depends on there being only one slave running at at a time. See the @Before // method where we do rolling restart. protected static int SLAVES = 1; - @Rule - public TestName name = new TestName(); + private String methodName; + + @BeforeEach + public void setUp(TestInfo testInfo) { + methodName = testInfo.getTestMethod().get().getName(); + } - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, @@ -94,10 +93,7 @@ public static void beforeClass() throws Exception { TEST_UTIL.startMiniCluster(SLAVES); } - /** - * @throws java.lang.Exception - */ - @AfterClass + @AfterAll public static void afterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @@ -107,8 +103,7 @@ public static void afterClass() throws Exception { */ @Test public void testDuplicateIncrement() throws Exception { - TableDescriptorBuilder builder = - TEST_UTIL.createModifyableTableDescriptor(name.getMethodName()); + TableDescriptorBuilder builder = TEST_UTIL.createModifyableTableDescriptor(methodName); Map kvs = new HashMap<>(); kvs.put(SleepAtFirstRpcCall.SLEEP_TIME_CONF_KEY, "2000"); builder.setCoprocessor(CoprocessorDescriptorBuilder @@ -120,9 +115,8 @@ public void testDuplicateIncrement() throws Exception { // Client will retry beacuse rpc timeout is small than the sleep time of first rpc call c.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1500); - try (Connection connection = ConnectionFactory.createConnection(c); - Table table = connection.getTableBuilder(TableName.valueOf(name.getMethodName()), null) - .setOperationTimeout(3 * 1000).build()) { + try (Connection connection = ConnectionFactory.createConnection(c); Table table = connection + .getTableBuilder(TableName.valueOf(methodName), null).setOperationTimeout(3 * 1000).build()) { Increment inc = new Increment(ROW); inc.addColumn(HBaseTestingUtil.fam1, QUALIFIER, 1); Result result = table.increment(inc); @@ -144,8 +138,7 @@ public void testDuplicateIncrement() throws Exception { */ @Test public void testDuplicateBatchIncrement() throws Exception { - TableDescriptorBuilder builder = - TEST_UTIL.createModifyableTableDescriptor(name.getMethodName()); + TableDescriptorBuilder builder = TEST_UTIL.createModifyableTableDescriptor(methodName); Map kvs = new HashMap<>(); kvs.put(SleepAtFirstRpcCall.SLEEP_TIME_CONF_KEY, "2000"); builder.setCoprocessor(CoprocessorDescriptorBuilder @@ -157,9 +150,8 @@ public void testDuplicateBatchIncrement() throws Exception { // Client will retry beacuse rpc timeout is small than the sleep time of first rpc call c.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1500); - try (Connection connection = ConnectionFactory.createConnection(c); - Table table = connection.getTableBuilder(TableName.valueOf(name.getMethodName()), null) - .setOperationTimeout(3 * 1000).build()) { + try (Connection connection = ConnectionFactory.createConnection(c); Table table = connection + .getTableBuilder(TableName.valueOf(methodName), null).setOperationTimeout(3 * 1000).build()) { Increment inc = new Increment(ROW); inc.addColumn(HBaseTestingUtil.fam1, QUALIFIER, 1); @@ -181,9 +173,8 @@ public void testDuplicateBatchIncrement() throws Exception { @Test public void testIncrementWithDeletes() throws Exception { - LOG.info("Starting " + this.name.getMethodName()); - final TableName TABLENAME = - TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName())); + LOG.info("Starting " + methodName); + final TableName TABLENAME = TableName.valueOf(filterStringSoTableNameSafe(methodName)); Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY); final byte[] COLUMN = Bytes.toBytes("column"); @@ -203,9 +194,8 @@ public void testIncrementWithDeletes() throws Exception { @Test public void testIncrementingInvalidValue() throws Exception { - LOG.info("Starting " + this.name.getMethodName()); - final TableName TABLENAME = - TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName())); + LOG.info("Starting " + methodName); + final TableName TABLENAME = TableName.valueOf(filterStringSoTableNameSafe(methodName)); Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY); final byte[] COLUMN = Bytes.toBytes("column"); Put p = new Put(ROW); @@ -231,7 +221,7 @@ public void testIncrementingInvalidValue() throws Exception { @Test public void testBatchIncrementsWithReturnResultFalse() throws Exception { LOG.info("Starting testBatchIncrementsWithReturnResultFalse"); - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); Table table = TEST_UTIL.createTable(tableName, FAMILY); Increment inc1 = new Increment(Bytes.toBytes("row2")); inc1.setReturnResults(false); @@ -254,9 +244,8 @@ public void testBatchIncrementsWithReturnResultFalse() throws Exception { @Test public void testIncrementInvalidArguments() throws Exception { - LOG.info("Starting " + this.name.getMethodName()); - final TableName TABLENAME = - TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName())); + LOG.info("Starting " + methodName); + final TableName TABLENAME = TableName.valueOf(filterStringSoTableNameSafe(methodName)); Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY); final byte[] COLUMN = Bytes.toBytes("column"); try { @@ -293,9 +282,8 @@ public void testIncrementInvalidArguments() throws Exception { @Test public void testIncrementOutOfOrder() throws Exception { - LOG.info("Starting " + this.name.getMethodName()); - final TableName TABLENAME = - TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName())); + LOG.info("Starting " + methodName); + final TableName TABLENAME = TableName.valueOf(filterStringSoTableNameSafe(methodName)); Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY); byte[][] QUALIFIERS = @@ -334,8 +322,8 @@ public void testIncrementOutOfOrder() throws Exception { @Test public void testIncrementOnSameColumn() throws Exception { - LOG.info("Starting " + this.name.getMethodName()); - final byte[] TABLENAME = Bytes.toBytes(filterStringSoTableNameSafe(this.name.getMethodName())); + LOG.info("Starting " + methodName); + final byte[] TABLENAME = Bytes.toBytes(filterStringSoTableNameSafe(methodName)); Table ht = TEST_UTIL.createTable(TableName.valueOf(TABLENAME), FAMILY); byte[][] QUALIFIERS = @@ -378,9 +366,8 @@ public void testIncrementOnSameColumn() throws Exception { @Test public void testIncrementIncrZeroAtFirst() throws Exception { - LOG.info("Starting " + this.name.getMethodName()); - final TableName TABLENAME = - TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName())); + LOG.info("Starting " + methodName); + final TableName TABLENAME = TableName.valueOf(filterStringSoTableNameSafe(methodName)); Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY); byte[] col1 = Bytes.toBytes("col1"); @@ -421,9 +408,8 @@ public void testIncrementIncrZeroAtFirst() throws Exception { @Test public void testIncrement() throws Exception { - LOG.info("Starting " + this.name.getMethodName()); - final TableName TABLENAME = - TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName())); + LOG.info("Starting " + methodName); + final TableName TABLENAME = TableName.valueOf(filterStringSoTableNameSafe(methodName)); Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY); byte[][] ROWS = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c"), @@ -505,7 +491,7 @@ public void testIncrement() throws Exception { @Test public void testIncrementWithCustomTimestamp() throws IOException { - TableName TABLENAME = TableName.valueOf(name.getMethodName()); + TableName TABLENAME = TableName.valueOf(methodName); Table table = TEST_UTIL.createTable(TABLENAME, FAMILY); long timestamp = 999; Increment increment = new Increment(ROW); @@ -543,9 +529,8 @@ public static String filterStringSoTableNameSafe(final String str) { */ @Test public void testIncrementWithTtlTags() throws Exception { - LOG.info("Starting " + this.name.getMethodName()); - final TableName tableName = - TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName())); + LOG.info("Starting " + methodName); + final TableName tableName = TableName.valueOf(filterStringSoTableNameSafe(methodName)); Table ht = TEST_UTIL.createTable(tableName, FAMILY); final byte[] COLUMN = Bytes.toBytes("column"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java index a7230436f22d..b0243428add1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java @@ -20,7 +20,6 @@ import java.util.ArrayList; import java.util.List; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HTestConst; import org.apache.hadoop.hbase.KeyValue; @@ -29,20 +28,16 @@ import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; /** * Test scan/get offset and limit settings within one row through HRegion API. */ -@Category({ SmallTests.class, ClientTests.class }) +@Tag(SmallTests.TAG) +@Tag(ClientTests.TAG) public class TestIntraRowPagination { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIntraRowPagination.class); - private static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestInvalidMutationDurabilityException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestInvalidMutationDurabilityException.java index 529f8c19cb27..64a4fe29b5b3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestInvalidMutationDurabilityException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestInvalidMutationDurabilityException.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import static org.junit.jupiter.api.Assertions.assertThrows; + import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -25,19 +26,15 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestInvalidMutationDurabilityException { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestInvalidMutationDurabilityException.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static TableName TABLE_NOT_REPLICATE = TableName.valueOf("TableNotReplicate"); @@ -52,7 +49,7 @@ public class TestInvalidMutationDurabilityException { private static Table tableNeedReplicate; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { UTIL.startMiniCluster(); UTIL.getAdmin().createTable(TableDescriptorBuilder.newBuilder(TABLE_NOT_REPLICATE) @@ -66,7 +63,7 @@ public static void setUp() throws Exception { tableNeedReplicate = UTIL.getConnection().getTable(TABLE_NEED_REPLICATE); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { UTIL.getAdmin().disableTable(TABLE_NOT_REPLICATE); UTIL.getAdmin().disableTable(TABLE_NEED_REPLICATE); @@ -87,9 +84,10 @@ public void testPutToTableNotReplicate() throws Exception { tableNotReplicate.put(newPutWithSkipWAL()); } - @Test(expected = InvalidMutationDurabilityException.class) + @Test public void testPutToTableNeedReplicate() throws Exception { - tableNeedReplicate.put(newPutWithSkipWAL()); + assertThrows(InvalidMutationDurabilityException.class, + () -> tableNeedReplicate.put(newPutWithSkipWAL())); } private Delete newDeleteWithSkipWAL() { @@ -104,9 +102,10 @@ public void testDeleteToTableNotReplicate() throws Exception { tableNotReplicate.delete(newDeleteWithSkipWAL()); } - @Test(expected = InvalidMutationDurabilityException.class) + @Test public void testDeleteToTableNeedReplicate() throws Exception { - tableNeedReplicate.delete(newDeleteWithSkipWAL()); + assertThrows(InvalidMutationDurabilityException.class, + () -> tableNeedReplicate.delete(newDeleteWithSkipWAL())); } private Append newAppendWithSkipWAL() { @@ -121,9 +120,10 @@ public void testAppendToTableNotReplicate() throws Exception { tableNotReplicate.append(newAppendWithSkipWAL()); } - @Test(expected = InvalidMutationDurabilityException.class) + @Test public void testAppendToTableNeedReplicate() throws Exception { - tableNeedReplicate.append(newAppendWithSkipWAL()); + assertThrows(InvalidMutationDurabilityException.class, + () -> tableNeedReplicate.append(newAppendWithSkipWAL())); } private Increment newIncrementWithSkipWAL() { @@ -138,9 +138,10 @@ public void testIncrementToTableNotReplicate() throws Exception { tableNotReplicate.increment(newIncrementWithSkipWAL()); } - @Test(expected = InvalidMutationDurabilityException.class) + @Test public void testIncrementToTableNeedReplicate() throws Exception { - tableNeedReplicate.increment(newIncrementWithSkipWAL()); + assertThrows(InvalidMutationDurabilityException.class, + () -> tableNeedReplicate.increment(newIncrementWithSkipWAL())); } @Test @@ -149,9 +150,10 @@ public void testCheckWithMutateToTableNotReplicate() throws Exception { .thenPut(newPutWithSkipWAL()); } - @Test(expected = InvalidMutationDurabilityException.class) + @Test public void testCheckWithMutateToTableNeedReplicate() throws Exception { - tableNeedReplicate.checkAndMutate(Bytes.toBytes("row"), CF).qualifier(CQ).ifNotExists() - .thenPut(newPutWithSkipWAL()); + assertThrows(InvalidMutationDurabilityException.class, + () -> tableNeedReplicate.checkAndMutate(Bytes.toBytes("row"), CF).qualifier(CQ).ifNotExists() + .thenPut(newPutWithSkipWAL())); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLimitedScanWithFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLimitedScanWithFilter.java index cd626be47b7e..05c032359dcd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLimitedScanWithFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLimitedScanWithFilter.java @@ -17,23 +17,21 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; /** * With filter we may stop at a middle of row and think that we still have more cells for the @@ -41,13 +39,10 @@ * lead to a Result that mayHaveMoreCellsInRow is true but actually there are no cells for the same * row. Here we want to test if our limited scan still works. */ -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestLimitedScanWithFilter { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLimitedScanWithFilter.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static final TableName TABLE_NAME = TableName.valueOf("TestRegionScanner"); @@ -59,7 +54,7 @@ public class TestLimitedScanWithFilter { private static int ROW_COUNT = 10; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { UTIL.startMiniCluster(1); try (Table table = UTIL.createTable(TABLE_NAME, FAMILY)) { @@ -73,7 +68,7 @@ public static void setUp() throws Exception { } } - @AfterClass + @AfterAll public static void tearDown() throws Exception { UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMalformedCellFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMalformedCellFromClient.java index 1497f3590804..519d358a3169 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMalformedCellFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMalformedCellFromClient.java @@ -17,11 +17,11 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.ArrayList; @@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.ExtendedCell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -44,13 +43,12 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -63,26 +61,24 @@ /** * The purpose of this test is to ensure whether rs deals with the malformed cells correctly. */ -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestMalformedCellFromClient { private static final Logger LOG = LoggerFactory.getLogger(TestMalformedCellFromClient.class); - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMalformedCellFromClient.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final byte[] FAMILY = Bytes.toBytes("testFamily"); private static final int CELL_SIZE = 100; private static final TableName TABLE_NAME = TableName.valueOf("TestMalformedCellFromClient"); - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { // disable the retry TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 0); TEST_UTIL.startMiniCluster(1); } - @Before + @BeforeEach public void before() throws Exception { TableDescriptor desc = TableDescriptorBuilder.newBuilder(TABLE_NAME) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) @@ -90,14 +86,14 @@ public void before() throws Exception { TEST_UTIL.getConnection().getAdmin().createTable(desc); } - @After + @AfterEach public void tearDown() throws Exception { for (TableDescriptor htd : TEST_UTIL.getAdmin().listTableDescriptors()) { TEST_UTIL.deleteTable(htd.getTableName()); } } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @@ -282,7 +278,7 @@ public void testNonAtomicOperations() throws InterruptedException, IOException { } catch (RetriesExhaustedException e) { Throwable error = e.getCause(); for (;;) { - assertNotNull("Can not find a DoNotRetryIOException on stack trace", error); + assertNotNull(error, "Can not find a DoNotRetryIOException on stack trace"); if (error instanceof DoNotRetryIOException) { break; } @@ -318,7 +314,7 @@ public void testRowMutations() throws InterruptedException, IOException { } catch (RetriesExhaustedException e) { Throwable error = e.getCause(); for (;;) { - assertNotNull("Can not find a DoNotRetryIOException on stack trace", error); + assertNotNull(error, "Can not find a DoNotRetryIOException on stack trace"); if (error instanceof DoNotRetryIOException) { break; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java index d79603cea3cc..afd58f7785fe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.ArrayList; import java.util.Arrays; @@ -28,7 +28,6 @@ import java.util.List; import java.util.Set; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; @@ -40,23 +39,20 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestMasterRegistry { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMasterRegistry.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - @BeforeClass + @BeforeAll public static void setUp() throws Exception { StartTestingClusterOption.Builder builder = StartTestingClusterOption.builder(); builder.numMasters(3).numRegionServers(3); @@ -64,7 +60,7 @@ public static void setUp() throws Exception { HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java index ce52918bfe42..90016f27cb8e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java @@ -17,11 +17,11 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.Arrays; @@ -31,7 +31,6 @@ import java.util.stream.IntStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CallQueueTooBigException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MultiActionResultTooLarge; @@ -47,13 +46,12 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.function.ThrowingRunnable; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.function.Executable; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; @@ -63,13 +61,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestMetaCache { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetaCache.class); - private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final TableName TABLE_NAME = TableName.valueOf("test_table"); private static final byte[] FAMILY = Bytes.toBytes("fam1"); @@ -81,7 +76,7 @@ public class TestMetaCache { private MetricsConnection metrics; private AsyncRegionLocator locator; - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setStrings(HConstants.REGION_SERVER_IMPL, RegionServerWithFakeRpcServices.class.getName()); @@ -96,12 +91,12 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.createTable(desc, null); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @After + @AfterEach public void tearDown() throws IOException { Closeables.close(conn, true); } @@ -210,10 +205,10 @@ public void testMergeEmptyWithMetaCache() throws Throwable { } } - private long executeAndGetNewMisses(ThrowingRunnable runnable, MetricsConnection metrics) + private long executeAndGetNewMisses(Executable runnable, MetricsConnection metrics) throws Throwable { long lastVal = metrics.getMetaCacheMisses(); - runnable.run(); + runnable.execute(); long curVal = metrics.getMetaCacheMisses(); return curVal - lastVal; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java index 29223dea5dbe..b44c55b1eefd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java @@ -17,15 +17,14 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.ArrayList; import java.util.Collections; import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; @@ -43,24 +42,20 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -@Category({ SmallTests.class, MasterTests.class }) +@Tag(SmallTests.TAG) +@Tag(MasterTests.TAG) public class TestMetaRegionLocationCache { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetaRegionLocationCache.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static ConnectionRegistry REGISTRY; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(3); HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); @@ -69,7 +64,7 @@ public static void setUp() throws Exception { TEST_UTIL.getAdmin().balancerSwitch(false, true); } - @AfterClass + @AfterAll public static void cleanUp() throws Exception { Closeables.close(REGISTRY, true); TEST_UTIL.shutdownMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaReplicasAddressChange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaReplicasAddressChange.java index 023acc5f8140..ed5419be53fa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaReplicasAddressChange.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaReplicasAddressChange.java @@ -17,14 +17,13 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.Collection; import java.util.EnumSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics.Option; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -33,25 +32,21 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -@Category({ MiscTests.class, MediumTests.class }) +@Tag(MiscTests.TAG) +@Tag(MediumTests.TAG) public class TestMetaReplicasAddressChange extends MetaWithReplicasTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetaReplicasAddressChange.class); - private static final Logger LOG = LoggerFactory.getLogger(TestMetaReplicasAddressChange.class); - @BeforeClass + @BeforeAll public static void setUp() throws Exception { startCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java index 8236c6a88b71..8006d442c888 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java @@ -17,46 +17,41 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; import java.util.List; import org.apache.hadoop.hbase.CatalogFamilyFormat; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; /** * Test MetaTableAccessor but without spinning up a cluster. We mock regionserver back and forth (we * do spin up a zk cluster). */ -@Category({ MiscTests.class, MediumTests.class }) +@Tag(MiscTests.TAG) +@Tag(MediumTests.TAG) public class TestMetaTableAccessorNoCluster { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetaTableAccessorNoCluster.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); - @Before + @BeforeEach public void before() throws Exception { UTIL.startMiniZKCluster(); } - @After + @AfterEach public void after() throws IOException { UTIL.shutdownMiniZKCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java index eae7078639d1..b4fc988e34b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java @@ -17,12 +17,11 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; @@ -32,21 +31,17 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -@Category({ MiscTests.class, MediumTests.class }) +@Tag(MiscTests.TAG) +@Tag(MediumTests.TAG) public class TestMetaWithReplicasBasic extends MetaWithReplicasTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetaWithReplicasBasic.class); - - @BeforeClass + @BeforeAll public static void setUp() throws Exception { startCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java index 267d618d03d1..59abca5f5f58 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java @@ -17,13 +17,12 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.Arrays; import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; @@ -37,26 +36,22 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -@Category({ MiscTests.class, MediumTests.class }) +@Tag(MiscTests.TAG) +@Tag(MediumTests.TAG) public class TestMetaWithReplicasShutdownHandling extends MetaWithReplicasTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetaWithReplicasShutdownHandling.class); - private static final Logger LOG = LoggerFactory.getLogger(TestMetaWithReplicasShutdownHandling.class); - @BeforeClass + @BeforeAll public static void setUp() throws Exception { startCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java index ca9247938cf4..648a63b7c9e6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java @@ -18,38 +18,30 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; +import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; /** * Test to verify that the cloned table is independent of the table from which it was cloned */ -@Category(LargeTests.class) -public class TestMobSnapshotCloneIndependence extends TestSnapshotCloneIndependence { +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) +public class TestMobSnapshotCloneIndependence extends SnapshotCloneIndependenceTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMobSnapshotCloneIndependence.class); - - /** - * Setup the config for the cluster and start it - * @throws Exception on failure - */ - @BeforeClass + @BeforeAll public static void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniCluster(NUM_RS); } protected static void setupConf(Configuration conf) { - TestSnapshotCloneIndependence.setupConf(conf); + SnapshotCloneIndependenceTestBase.setupConf(conf); conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java index 1077a80d4fa9..72b302c66e30 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java @@ -18,39 +18,41 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.mob.MobConstants; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; /** * Test create/using/deleting snapshots from the client *

* This is an end-to-end test for the snapshot utility */ -@Category({ LargeTests.class, ClientTests.class }) -public class TestMobSnapshotFromClient extends TestSnapshotFromClient { +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: tracker={0}") +public class TestMobSnapshotFromClient extends SnapshotFromClientTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMobSnapshotFromClient.class); + public TestMobSnapshotFromClient(StoreFileTrackerFactory.Trackers trackerImpl) { + super(trackerImpl); + } /** * Setup the config for the cluster * @throws Exception on failure */ - @BeforeClass + @BeforeAll public static void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniCluster(NUM_RS); } protected static void setupConf(Configuration conf) { - TestSnapshotFromClient.setupConf(conf); + SnapshotFromClientTestBase.setupConf(conf); conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java index 55646c35e435..4ec644e73682 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java @@ -17,34 +17,29 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ SmallTests.class, ClientTests.class }) +@Tag(SmallTests.TAG) +@Tag(ClientTests.TAG) public class TestMultiActionMetricsFromClient { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiActionMetricsFromClient.class); - private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final TableName TABLE_NAME = TableName.valueOf("test_table"); private static final byte[] FAMILY = Bytes.toBytes("fam1"); private static final byte[] QUALIFIER = Bytes.toBytes("qual"); - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(1); TEST_UTIL.getHBaseCluster().waitForActiveAndReadyMaster(); @@ -52,7 +47,7 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.createTable(TABLE_NAME, FAMILY); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java index 2b7ba33c7245..74911bd476fc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java @@ -17,9 +17,10 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.ArrayList; @@ -30,7 +31,6 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CoprocessorEnvironment; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -46,23 +46,18 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ MediumTests.class, FlakeyTests.class }) +@Tag(MediumTests.TAG) +@Tag(FlakeyTests.TAG) public class TestMultiParallel { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiParallel.class); - private static final Logger LOG = LoggerFactory.getLogger(TestMultiParallel.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -77,7 +72,7 @@ public class TestMultiParallel { private static final int slaves = 5; // also used for testing HTable pool size private static Connection CONNECTION; - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { // Uncomment the following lines if more verbosity is needed for // debugging (see HBASE-12285 for details). @@ -101,13 +96,13 @@ public static void beforeClass() throws Exception { assertTrue(MyMasterObserver.start.get()); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { CONNECTION.close(); UTIL.shutdownMiniCluster(); } - @Before + @BeforeEach public void before() throws Exception { final int balanceCount = MyMasterObserver.postBalanceCount.get(); LOG.info("before"); @@ -187,14 +182,14 @@ public void testBatchWithGet() throws Exception { singleRes.add(table.get((Get) get)); } // Compare results - Assert.assertEquals(singleRes.size(), multiRes.length); + assertEquals(singleRes.size(), multiRes.length); for (int i = 0; i < singleRes.size(); i++) { - Assert.assertTrue(singleRes.get(i).containsColumn(BYTES_FAMILY, QUALIFIER)); + assertTrue(singleRes.get(i).containsColumn(BYTES_FAMILY, QUALIFIER)); Cell[] singleKvs = singleRes.get(i).rawCells(); Cell[] multiKvs = multiRes[i].rawCells(); for (int j = 0; j < singleKvs.length; j++) { - Assert.assertEquals(singleKvs[j], multiKvs[j]); - Assert.assertEquals(0, + assertEquals(singleKvs[j], multiKvs[j]); + assertEquals(0, Bytes.compareTo(CellUtil.cloneValue(singleKvs[j]), CellUtil.cloneValue(multiKvs[j]))); } } @@ -289,8 +284,8 @@ private void doTestFlushCommits(boolean doAbort) throws Exception { LOG.info("Count=" + count + ", Alive=" + t.getRegionServer()); } LOG.info("Count=" + count); - Assert.assertEquals("Server count=" + count + ", abort=" + doAbort, - (doAbort ? (liveRScount - 1) : liveRScount), count); + assertEquals((doAbort ? (liveRScount - 1) : liveRScount), count, + "Server count=" + count + ", abort=" + doAbort); if (doAbort) { UTIL.getMiniHBaseCluster().waitOnRegionServer(0); UTIL.waitFor(15 * 1000, new Waiter.Predicate() { @@ -367,7 +362,7 @@ public void testBatchWithDelete() throws Exception { for (byte[] k : KEYS) { Get get = new Get(k); get.addColumn(BYTES_FAMILY, QUALIFIER); - Assert.assertFalse(table.exists(get)); + assertFalse(table.exists(get)); } table.close(); } @@ -396,7 +391,7 @@ public void testHTableDeleteWithList() throws Exception { for (byte[] k : KEYS) { Get get = new Get(k); get.addColumn(BYTES_FAMILY, QUALIFIER); - Assert.assertFalse(table.exists(get)); + assertFalse(table.exists(get)); } table.close(); } @@ -585,7 +580,7 @@ private void validateResult(Object r) { private void validateResult(Object r1, byte[] qual, byte[] val) { Result r = (Result) r1; - Assert.assertTrue(r.containsColumn(BYTES_FAMILY, qual)); + assertTrue(r.containsColumn(BYTES_FAMILY, qual)); byte[] value = r.getValue(BYTES_FAMILY, qual); if (0 != Bytes.compareTo(val, value)) { fail("Expected [" + Bytes.toStringBinary(val) + "] but got [" + Bytes.toStringBinary(value) @@ -638,8 +633,8 @@ private void validateLoadedData(Table table) throws IOException { } else { if (results != null) { for (Result r : results) { - Assert.assertTrue(r.containsColumn(BYTES_FAMILY, QUALIFIER)); - Assert.assertEquals(0, Bytes.compareTo(VALUE, r.getValue(BYTES_FAMILY, QUALIFIER))); + assertTrue(r.containsColumn(BYTES_FAMILY, QUALIFIER)); + assertEquals(0, Bytes.compareTo(VALUE, r.getValue(BYTES_FAMILY, QUALIFIER))); } LOG.info("Validating data on " + table + " successfully!"); } @@ -648,13 +643,13 @@ private void validateLoadedData(Table table) throws IOException { private void validateEmpty(Object r1) { Result result = (Result) r1; - Assert.assertTrue(result != null); - Assert.assertTrue(result.isEmpty()); + assertTrue(result != null); + assertTrue(result.isEmpty()); } private void validateSizeAndEmpty(Object[] results, int expectedSize) { // Validate got back the same number of Result objects, all empty - Assert.assertEquals(expectedSize, results.length); + assertEquals(expectedSize, results.length); for (Object result : results) { validateEmpty(result); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java index 0a42878c7cf8..ec8cf2ca14d1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.util.ArrayList; @@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.CompatibilityFactory; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -44,25 +43,21 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; /** * This test sets the multi size WAAAAAY low and then checks to make sure that gets will still make * progress. */ -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestMultiRespectsLimits { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiRespectsLimits.class); - private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final MetricsAssertHelper METRICS_ASSERT = CompatibilityFactory.getInstance(MetricsAssertHelper.class); @@ -70,10 +65,14 @@ public class TestMultiRespectsLimits { public static final int MAX_SIZE = 90; private static String LOG_LEVEL; - @Rule - public TestName name = new TestName(); + private String methodName; + + @BeforeEach + public void setUp(TestInfo testInfo) { + methodName = testInfo.getTestMethod().get().getName(); + } - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { // disable the debug log to avoid flooding the output LOG_LEVEL = Log4jUtils.getEffectiveLevel(AsyncRegionLocatorHelper.class.getName()); @@ -85,7 +84,7 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(1); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { if (LOG_LEVEL != null) { Log4jUtils.setLogLevel(AsyncRegionLocatorHelper.class.getName(), LOG_LEVEL); @@ -95,7 +94,7 @@ public static void tearDownAfterClass() throws Exception { @Test public void testMultiLimits() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); Table t = TEST_UTIL.createTable(tableName, FAMILY); TEST_UTIL.loadTable(t, FAMILY, false); @@ -134,7 +133,7 @@ public boolean evaluate() throws Exception { @Test public void testBlockMultiLimits() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TEST_UTIL.getAdmin().createTable( TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder .newBuilder(FAMILY).setDataBlockEncoding(DataBlockEncoding.FAST_DIFF).build()).build()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java index 89babd919dd4..fa35dd3ed3f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.util.Arrays; @@ -25,21 +25,17 @@ import java.util.List; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,54 +44,33 @@ * APIs. Sets up the HBase mini cluster once at start. Each creates a table named for the method and * does its stuff against that. */ -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestMultipleTimestamps { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultipleTimestamps.class); - private static final Logger LOG = LoggerFactory.getLogger(TestMultipleTimestamps.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - @Rule - public TestName name = new TestName(); + private String methodName; - /** - * @throws java.lang.Exception - */ - @BeforeClass + @BeforeEach + public void setUp(TestInfo testInfo) { + methodName = testInfo.getTestMethod().get().getName(); + } + + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(); } - /** - * @throws java.lang.Exception - */ - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - /** - * @throws java.lang.Exception - */ - @Before - public void setUp() throws Exception { - // Nothing to do. - } - - /** - * @throws java.lang.Exception - */ - @After - public void tearDown() throws Exception { - // Nothing to do. - } - @Test public void testReseeksWithOneColumnMiltipleTimestamp() throws IOException { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); byte[] FAMILY = Bytes.toBytes("event_log"); byte[][] FAMILIES = new byte[][] { FAMILY }; @@ -134,8 +109,8 @@ public void testReseeksWithOneColumnMiltipleTimestamp() throws IOException { @Test public void testReseeksWithMultipleColumnOneTimestamp() throws IOException { - LOG.info(name.getMethodName()); - final TableName tableName = TableName.valueOf(name.getMethodName()); + LOG.info(methodName); + final TableName tableName = TableName.valueOf(methodName); byte[] FAMILY = Bytes.toBytes("event_log"); byte[][] FAMILIES = new byte[][] { FAMILY }; @@ -172,9 +147,9 @@ public void testReseeksWithMultipleColumnOneTimestamp() throws IOException { @Test public void testReseeksWithMultipleColumnMultipleTimestamp() throws IOException { - LOG.info(name.getMethodName()); + LOG.info(methodName); - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); byte[] FAMILY = Bytes.toBytes("event_log"); byte[][] FAMILIES = new byte[][] { FAMILY }; @@ -225,8 +200,8 @@ public void testReseeksWithMultipleColumnMultipleTimestamp() throws IOException @Test public void testReseeksWithMultipleFiles() throws IOException { - LOG.info(name.getMethodName()); - final TableName tableName = TableName.valueOf(name.getMethodName()); + LOG.info(methodName); + final TableName tableName = TableName.valueOf(methodName); byte[] FAMILY = Bytes.toBytes("event_log"); byte[][] FAMILIES = new byte[][] { FAMILY }; @@ -292,9 +267,9 @@ public void testWithVersionDeletes() throws Exception { } public void testWithVersionDeletes(boolean flushTables) throws IOException { - LOG.info(name.getMethodName() + "_" + (flushTables ? "flush" : "noflush")); + LOG.info(methodName + "_" + (flushTables ? "flush" : "noflush")); final TableName tableName = - TableName.valueOf(name.getMethodName() + "_" + (flushTables ? "flush" : "noflush")); + TableName.valueOf(methodName + "_" + (flushTables ? "flush" : "noflush")); byte[] FAMILY = Bytes.toBytes("event_log"); byte[][] FAMILIES = new byte[][] { FAMILY }; @@ -324,9 +299,9 @@ public void testWithVersionDeletes(boolean flushTables) throws IOException { @Test public void testWithMultipleVersionDeletes() throws IOException { - LOG.info(name.getMethodName()); + LOG.info(methodName); - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); byte[] FAMILY = Bytes.toBytes("event_log"); byte[][] FAMILIES = new byte[][] { FAMILY }; @@ -351,7 +326,7 @@ public void testWithMultipleVersionDeletes() throws IOException { @Test public void testWithColumnDeletes() throws IOException { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); byte[] FAMILY = Bytes.toBytes("event_log"); byte[][] FAMILIES = new byte[][] { FAMILY }; @@ -376,7 +351,7 @@ public void testWithColumnDeletes() throws IOException { @Test public void testWithFamilyDeletes() throws IOException { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); byte[] FAMILY = Bytes.toBytes("event_log"); byte[][] FAMILIES = new byte[][] { FAMILY }; @@ -416,7 +391,7 @@ private void checkOneCell(Cell kv, byte[] cf, int rowIdx, int colIdx, long ts) { assertEquals("Column qualifier mismatch while checking: " + ctx, "column:" + colIdx, Bytes.toString(CellUtil.cloneQualifier(kv))); - assertEquals("Timestamp mismatch while checking: " + ctx, ts, kv.getTimestamp()); + assertEquals(ts, kv.getTimestamp(), "Timestamp mismatch while checking: " + ctx); assertEquals("Value mismatch while checking: " + ctx, "value-version-" + ts, Bytes.toString(CellUtil.cloneValue(kv))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMutationGetCellBuilder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMutationGetCellBuilder.java index 19ded2fb60d3..0eb7ecb71502 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMutationGetCellBuilder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMutationGetCellBuilder.java @@ -17,52 +17,43 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.Arrays; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilder; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestMutationGetCellBuilder { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMutationGetCellBuilder.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - @Rule - public TestName name = new TestName(); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @Test - public void testMutationGetCellBuilder() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + public void testMutationGetCellBuilder(TestInfo testInfo) throws Exception { + final TableName tableName = TableName.valueOf(testInfo.getTestMethod().get().getName()); final byte[] rowKey = Bytes.toBytes("12345678"); final byte[] uselessRowKey = Bytes.toBytes("123"); final byte[] family = Bytes.toBytes("cf"); @@ -78,16 +69,16 @@ public void testMutationGetCellBuilder() throws Exception { cellBuilder.setRow(uselessRowKey); put.add(cellBuilder.build()); byte[] cloneRow = CellUtil.cloneRow(cellBuilder.build()); - assertTrue("setRow must be useless", !Arrays.equals(cloneRow, uselessRowKey)); + assertTrue(!Arrays.equals(cloneRow, uselessRowKey), "setRow must be useless"); table.put(put); // get the row back and assert the values Get get = new Get(rowKey); get.setTimestamp(now); Result result = table.get(get); - assertTrue("row key must be same", Arrays.equals(result.getRow(), rowKey)); - assertTrue("Column foo value should be bar", - Bytes.toString(result.getValue(family, qualifier)).equals("bar")); + assertTrue(Arrays.equals(result.getRow(), rowKey), "row key must be same"); + assertTrue(Bytes.toString(result.getValue(family, qualifier)).equals("bar"), + "Column foo value should be bar"); // Delete that row Delete delete = new Delete(rowKey); @@ -101,7 +92,7 @@ public void testMutationGetCellBuilder() throws Exception { get = new Get(rowKey); get.setTimestamp(now); result = table.get(get); - assertTrue("Column foo should not exist", result.getValue(family, qualifier) == null); + assertTrue(result.getValue(family, qualifier) == null, "Column foo should not exist"); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMvccConsistentScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMvccConsistentScanner.java index fec5cb1012ef..c147f867fdea 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMvccConsistentScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMvccConsistentScanner.java @@ -17,34 +17,28 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; - -@Category({ MediumTests.class, ClientTests.class }) +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; + +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestMvccConsistentScanner { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMvccConsistentScanner.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static Connection CONN; @@ -56,26 +50,24 @@ public class TestMvccConsistentScanner { private static final byte[] CQ2 = Bytes.toBytes("cq2"); private static final byte[] CQ3 = Bytes.toBytes("cq3"); - @Rule - public TestName testName = new TestName(); private TableName tableName; - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { UTIL.startMiniCluster(2); CONN = ConnectionFactory.createConnection(UTIL.getConfiguration()); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { CONN.close(); UTIL.shutdownMiniCluster(); } - @Before - public void setUp() throws IOException, InterruptedException { - tableName = TableName.valueOf(testName.getMethodName().replaceAll("[^0-9a-zA-Z]", "_")); + @BeforeEach + public void setUp(TestInfo testInfo) throws IOException, InterruptedException { + tableName = TableName.valueOf(testInfo.getTestMethod().get().getName()); UTIL.createTable(tableName, CF); UTIL.waitTableAvailable(tableName); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPreadReversedScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPreadReversedScanner.java index b9c5d62d1394..2bce417ff9db 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPreadReversedScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPreadReversedScanner.java @@ -17,30 +17,27 @@ */ package org.apache.hadoop.hbase.client; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Scan.ReadType; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category(MediumTests.class) +@Tag(MediumTests.TAG) public class TestPreadReversedScanner { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestPreadReversedScanner.class); - public static final Logger LOG = LoggerFactory.getLogger(TestPreadReversedScanner.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -49,7 +46,7 @@ public class TestPreadReversedScanner { private static Table htable = null; - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(1); @@ -63,12 +60,12 @@ public static void setUpBeforeClass() throws Exception { htable = TEST_UTIL.createTable(TABLE_NAME, COLUMN_FAMILY, splitKeys); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @After + @AfterEach public void tearDown() throws IOException { TEST_UTIL.truncateTable(TABLE_NAME); } @@ -107,11 +104,11 @@ private void testPreadReversedScanInternal(String[] inputRowKeys) throws IOExcep Result r; int value = rowCount; while ((r = scanner.next()) != null) { - Assert.assertArrayEquals(r.getValue(COLUMN_FAMILY, null), Bytes.toBytes(--value)); - Assert.assertArrayEquals(r.getRow(), Bytes.toBytes(inputRowKeys[value])); + assertArrayEquals(r.getValue(COLUMN_FAMILY, null), Bytes.toBytes(--value)); + assertArrayEquals(r.getRow(), Bytes.toBytes(inputRowKeys[value])); } - Assert.assertEquals(0, value); + assertEquals(0, value); } /** @@ -136,10 +133,10 @@ public void testSmallReversedScan02() throws IOException { Result r; int count = 1; while ((r = scanner.next()) != null) { - Assert.assertArrayEquals(r.getValue(COLUMN_FAMILY, null), Bytes.toBytes(0)); - Assert.assertArrayEquals(r.getRow(), new byte[] { (char) 0x00 }); - Assert.assertTrue(--count >= 0); + assertArrayEquals(r.getValue(COLUMN_FAMILY, null), Bytes.toBytes(0)); + assertArrayEquals(r.getRow(), new byte[] { (char) 0x00 }); + assertTrue(--count >= 0); } - Assert.assertEquals(0, count); + assertEquals(0, count); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java index c9b8ae650bb1..53c5e2e96ea7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java @@ -17,8 +17,9 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.Arrays; @@ -26,26 +27,21 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; /** * Test that I can Iterate Client Actions that hold Cells (Get does not have Cells). */ -@Category({ SmallTests.class, ClientTests.class }) +@Tag(SmallTests.TAG) +@Tag(ClientTests.TAG) public class TestPutDeleteEtcCellIteration { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestPutDeleteEtcCellIteration.class); - private static final byte[] ROW = new byte[] { 'r' }; private static final long TIMESTAMP = EnvironmentEdgeManager.currentTime(); private static final int COUNT = 10; @@ -66,21 +62,23 @@ public void testPutIteration() throws IOException { assertEquals(COUNT, index); } - @Test(expected = ConcurrentModificationException.class) + @Test public void testPutConcurrentModificationOnIteration() throws IOException { Put p = new Put(ROW); for (int i = 0; i < COUNT; i++) { byte[] bytes = Bytes.toBytes(i); p.addColumn(bytes, bytes, TIMESTAMP, bytes); } - int index = 0; - for (CellScanner cellScanner = p.cellScanner(); cellScanner.advance();) { - Cell cell = cellScanner.current(); - byte[] bytes = Bytes.toBytes(index++); - // When we hit the trigger, try inserting a new KV; should trigger exception - p.addColumn(bytes, bytes, TIMESTAMP, bytes); - assertEquals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes), cell); - } + assertThrows(ConcurrentModificationException.class, () -> { + int index = 0; + for (CellScanner cellScanner = p.cellScanner(); cellScanner.advance();) { + Cell cell = cellScanner.current(); + byte[] bytes = Bytes.toBytes(index++); + // When we hit the trigger, try inserting a new KV; should trigger exception + p.addColumn(bytes, bytes, TIMESTAMP, bytes); + assertEquals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes), cell); + } + }); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutWithDelete.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutWithDelete.java index ea9448adccf4..f475cda138b3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutWithDelete.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutWithDelete.java @@ -17,9 +17,8 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -27,50 +26,35 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestPutWithDelete { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestPutWithDelete.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - @Rule - public TestName name = new TestName(); - - /** - * @throws java.lang.Exception - */ - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(); } - /** - * @throws java.lang.Exception - */ - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @Test - public void testHbasePutDeleteCell() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + public void testHbasePutDeleteCell(TestInfo testInfo) throws Exception { + final TableName tableName = TableName.valueOf(testInfo.getTestMethod().get().getName()); final byte[] rowKey = Bytes.toBytes("12345"); final byte[] family = Bytes.toBytes("cf"); - Table table = TEST_UTIL.createTable(tableName, family); - TEST_UTIL.waitTableAvailable(tableName.getName(), 5000); - try { + try (Table table = TEST_UTIL.createTable(tableName, family)) { + TEST_UTIL.waitTableAvailable(tableName.getName(), 5000); // put one row Put put = new Put(rowKey); put.addColumn(family, Bytes.toBytes("A"), Bytes.toBytes("a")); @@ -81,14 +65,14 @@ public void testHbasePutDeleteCell() throws Exception { // get row back and assert the values Get get = new Get(rowKey); Result result = table.get(get); - assertTrue("Column A value should be a", - Bytes.toString(result.getValue(family, Bytes.toBytes("A"))).equals("a")); - assertTrue("Column B value should be b", - Bytes.toString(result.getValue(family, Bytes.toBytes("B"))).equals("b")); - assertTrue("Column C value should be c", - Bytes.toString(result.getValue(family, Bytes.toBytes("C"))).equals("c")); - assertTrue("Column D value should be d", - Bytes.toString(result.getValue(family, Bytes.toBytes("D"))).equals("d")); + assertTrue(Bytes.toString(result.getValue(family, Bytes.toBytes("A"))).equals("a"), + "Column A value should be a"); + assertTrue(Bytes.toString(result.getValue(family, Bytes.toBytes("B"))).equals("b"), + "Column B value should be b"); + assertTrue(Bytes.toString(result.getValue(family, Bytes.toBytes("C"))).equals("c"), + "Column C value should be c"); + assertTrue(Bytes.toString(result.getValue(family, Bytes.toBytes("D"))).equals("d"), + "Column D value should be d"); // put the same row again with C column deleted put = new Put(rowKey); put.addColumn(family, Bytes.toBytes("A"), Bytes.toBytes("a1")); @@ -101,15 +85,13 @@ public void testHbasePutDeleteCell() throws Exception { // get row back and assert the values get = new Get(rowKey); result = table.get(get); - assertTrue("Column A value should be a1", - Bytes.toString(result.getValue(family, Bytes.toBytes("A"))).equals("a1")); - assertTrue("Column B value should be b1", - Bytes.toString(result.getValue(family, Bytes.toBytes("B"))).equals("b1")); - assertTrue("Column C should not exist", result.getValue(family, Bytes.toBytes("C")) == null); - assertTrue("Column D value should be d1", - Bytes.toString(result.getValue(family, Bytes.toBytes("D"))).equals("d1")); - } finally { - table.close(); + assertTrue(Bytes.toString(result.getValue(family, Bytes.toBytes("A"))).equals("a1"), + "Column A value should be a1"); + assertTrue(Bytes.toString(result.getValue(family, Bytes.toBytes("B"))).equals("b1"), + "Column B value should be b1"); + assertTrue(result.getValue(family, Bytes.toBytes("C")) == null, "Column C should not exist"); + assertTrue(Bytes.toString(result.getValue(family, Bytes.toBytes("D"))).equals("d1"), + "Column D value should be d1"); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncScanCursor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncScanCursor.java index 3954615ab46d..fb9f34678664 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncScanCursor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncScanCursor.java @@ -17,42 +17,39 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestRawAsyncScanCursor extends AbstractTestScanCursor { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRawAsyncScanCursor.class); - private static AsyncConnection CONN; - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { - AbstractTestScanCursor.setUpBeforeClass(); + AbstractTestScanCursor.startCluster(); CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get(); } + @AfterAll public static void tearDownAfterClass() throws Exception { if (CONN != null) { CONN.close(); } - AbstractTestScanCursor.tearDownAfterClass(); + stopCluster(); } private void doTest(boolean reversed) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableLimitedScanWithFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableLimitedScanWithFilter.java index bdab871e1104..2d1a206a15a4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableLimitedScanWithFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableLimitedScanWithFilter.java @@ -17,25 +17,23 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.List; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import java.util.stream.IntStream; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; /** * With filter we may stop at a middle of row and think that we still have more cells for the @@ -43,13 +41,10 @@ * lead to a Result that mayHaveMoreCellsInRow is true but actually there are no cells for the same * row. Here we want to test if our limited scan still works. */ -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestRawAsyncTableLimitedScanWithFilter { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRawAsyncTableLimitedScanWithFilter.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static final TableName TABLE_NAME = TableName.valueOf("TestRegionScanner"); @@ -65,7 +60,7 @@ public class TestRawAsyncTableLimitedScanWithFilter { private static AsyncTable TABLE; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { UTIL.startMiniCluster(1); UTIL.createTable(TABLE_NAME, FAMILY); @@ -79,7 +74,7 @@ public static void setUp() throws Exception { }).collect(Collectors.toList())).get(); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { if (CONN != null) { CONN.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTablePartialScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTablePartialScan.java index 0b734fa93c9f..ebcdd720557c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTablePartialScan.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTablePartialScan.java @@ -17,31 +17,26 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.List; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import java.util.stream.IntStream; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestRawAsyncTablePartialScan { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRawAsyncTablePartialScan.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("async"); @@ -57,7 +52,7 @@ public class TestRawAsyncTablePartialScan { private static AsyncTable TABLE; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(1); TEST_UTIL.createTable(TABLE_NAME, FAMILY); @@ -71,7 +66,7 @@ public static void setUp() throws Exception { .collect(Collectors.toList())).get(); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { CONN.close(); TEST_UTIL.shutdownMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocationCaching.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocationCaching.java index 8fab17a00f7e..121a198dfc40 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocationCaching.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocationCaching.java @@ -17,16 +17,15 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; import java.util.List; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -34,33 +33,29 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ ClientTests.class, LargeTests.class }) +@Tag(ClientTests.TAG) +@Tag(LargeTests.TAG) public class TestRegionLocationCaching { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionLocationCaching.class); - private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static int SLAVES = 1; private static TableName TABLE_NAME = TableName.valueOf("TestRegionLocationCaching"); private static byte[] FAMILY = Bytes.toBytes("testFamily"); private static byte[] QUALIFIER = Bytes.toBytes("testQualifier"); - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(SLAVES); TEST_UTIL.createTable(TABLE_NAME, new byte[][] { FAMILY }); TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @@ -110,7 +105,7 @@ private void checkRegionLocationIsCached(final TableName tableName, final Connec for (int count = 0; count < 50; count++) { int number = ((AsyncConnectionImpl) conn.toAsyncConnection()).getLocator() .getNumberOfCachedRegionLocations(tableName); - assertNotEquals("Expected non-zero number of cached region locations", 0, number); + assertNotEquals(0, number, "Expected non-zero number of cached region locations"); Thread.sleep(100); } } @@ -125,7 +120,7 @@ private void checkRegionLocationIsNotCached(final TableName tableName, final Con for (int count = 0; count < 50; count++) { int number = ((AsyncConnectionImpl) conn.toAsyncConnection()).getLocator() .getNumberOfCachedRegionLocations(tableName); - assertEquals("Expected zero number of cached region locations", 0, number); + assertEquals(0, number, "Expected zero number of cached region locations"); Thread.sleep(100); } } @@ -142,7 +137,7 @@ private static void checkExistence(final TableName tableName, final byte[] row, int nbTry = 0; try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { do { - assertTrue("Failed to get row after " + nbTry + " tries", nbTry < 50); + assertTrue(nbTry < 50, "Failed to get row after " + nbTry + " tries"); nbTry++; Thread.sleep(100); r = table.get(get); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocator.java index 531e36175766..e464344c0c6e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocator.java @@ -19,30 +19,25 @@ import java.io.IOException; import java.util.List; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Pair; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestRegionLocator extends AbstractTestRegionLocator { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionLocator.class); - - @BeforeClass + @BeforeAll public static void setUp() throws Exception { startClusterAndCreateTable(); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java index c38be19a238e..2ff2f7f9f543 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hbase.client; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + import java.io.IOException; import java.util.Arrays; import java.util.Collections; @@ -31,7 +35,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -51,22 +54,17 @@ import org.apache.hadoop.hbase.tool.BulkLoadHFiles; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestReplicaWithCluster { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicaWithCluster.class); - private static final Logger LOG = LoggerFactory.getLogger(TestReplicaWithCluster.class); private static final int NB_SERVERS = 3; @@ -231,7 +229,7 @@ public void preScannerOpen(final ObserverContext getSecondaryCdl() { } } - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { // enable store file refreshing HTU.getConfiguration().setInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, @@ -224,16 +222,16 @@ public static void beforeClass() throws Exception { LOG.info("Master has stopped"); rsServerName = HTU.getHBaseCluster().getRegionServer(0).getServerName(); - Assert.assertNotNull(rsServerName); + assertNotNull(rsServerName); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { HRegionServer.TEST_SKIP_REPORTING_TRANSITION = false; HTU.shutdownMiniCluster(); } - @Before + @BeforeEach public void before() throws IOException { HTU.getConnection().clearRegionLocationCache(); try { @@ -247,7 +245,7 @@ public void before() throws IOException { table = HTU.getConnection().getTable(TABLE_NAME); } - @After + @AfterEach public void after() throws IOException, KeeperException { try { closeRegion(hriSecondary); @@ -273,8 +271,8 @@ private void openRegion(RegionInfo hri) throws Exception { AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest(getRS().getServerName(), hri, null); AdminProtos.OpenRegionResponse responseOpen = getRS().getRSRpcServices().openRegion(null, orr); - Assert.assertEquals(1, responseOpen.getOpeningStateCount()); - Assert.assertEquals(AdminProtos.OpenRegionResponse.RegionOpeningState.OPENED, + assertEquals(1, responseOpen.getOpeningStateCount()); + assertEquals(AdminProtos.OpenRegionResponse.RegionOpeningState.OPENED, responseOpen.getOpeningState(0)); checkRegionIsOpened(hri); } @@ -284,7 +282,7 @@ private void closeRegion(RegionInfo hri) throws Exception { ProtobufUtil.buildCloseRegionRequest(getRS().getServerName(), hri.getRegionName()); AdminProtos.CloseRegionResponse responseClose = getRS().getRSRpcServices().closeRegion(null, crr); - Assert.assertTrue(responseClose.getClosed()); + assertTrue(responseClose.getClosed()); checkRegionIsClosed(hri.getEncodedName()); } @@ -306,7 +304,7 @@ private void checkRegionIsClosed(String encodedRegionName) throws Exception { } try { - Assert.assertFalse(getRS().getRegionByEncodedName(encodedRegionName).isAvailable()); + assertFalse(getRS().getRegionByEncodedName(encodedRegionName).isAvailable()); } catch (NotServingRegionException expected) { // That's how it work: if the region is closed we have an exception. } @@ -326,7 +324,7 @@ public void testUseRegionWithoutReplica() throws Exception { try { Get g = new Get(b1); Result r = table.get(g); - Assert.assertFalse(r.isStale()); + assertFalse(r.isStale()); } finally { closeRegion(hriSecondary); } @@ -341,17 +339,17 @@ public void testLocations() throws Exception { RegionLocator locator = conn.getRegionLocator(TABLE_NAME)) { conn.clearRegionLocationCache(); List rl = locator.getRegionLocations(b1, true); - Assert.assertEquals(2, rl.size()); + assertEquals(2, rl.size()); rl = locator.getRegionLocations(b1, false); - Assert.assertEquals(2, rl.size()); + assertEquals(2, rl.size()); conn.clearRegionLocationCache(); rl = locator.getRegionLocations(b1, false); - Assert.assertEquals(2, rl.size()); + assertEquals(2, rl.size()); rl = locator.getRegionLocations(b1, true); - Assert.assertEquals(2, rl.size()); + assertEquals(2, rl.size()); } finally { closeRegion(hriSecondary); } @@ -366,7 +364,7 @@ public void testGetNoResultNoStaleRegionWithReplica() throws Exception { // A get works and is not stale Get g = new Get(b1); Result r = table.get(g); - Assert.assertFalse(r.isStale()); + assertFalse(r.isStale()); } finally { closeRegion(hriSecondary); } @@ -382,7 +380,7 @@ public void testGetNoResultStaleRegionWithReplica() throws Exception { Get g = new Get(b1); g.setConsistency(Consistency.TIMELINE); Result r = table.get(g); - Assert.assertTrue(r.isStale()); + assertTrue(r.isStale()); } finally { SlowMeCopro.getPrimaryCdl().get().countDown(); closeRegion(hriSecondary); @@ -399,7 +397,7 @@ public void testGetNoResultNotStaleSleepRegionWithReplica() throws Exception { SlowMeCopro.sleepTime.set(2000); Get g = new Get(b1); Result r = table.get(g); - Assert.assertFalse(r.isStale()); + assertFalse(r.isStale()); } finally { SlowMeCopro.sleepTime.set(0); @@ -480,16 +478,16 @@ public void testUseRegionWithReplica() throws Exception { // A get works and is not stale Get g = new Get(b1); Result r = table.get(g); - Assert.assertFalse(r.isStale()); - Assert.assertFalse(r.getColumnCells(f, b1).isEmpty()); + assertFalse(r.isStale()); + assertFalse(r.getColumnCells(f, b1).isEmpty()); LOG.info("get works and is not stale done"); // Even if it we have to wait a little on the main region SlowMeCopro.sleepTime.set(2000); g = new Get(b1); r = table.get(g); - Assert.assertFalse(r.isStale()); - Assert.assertFalse(r.getColumnCells(f, b1).isEmpty()); + assertFalse(r.isStale()); + assertFalse(r.getColumnCells(f, b1).isEmpty()); SlowMeCopro.sleepTime.set(0); LOG.info("sleep and is not stale done"); @@ -498,8 +496,8 @@ public void testUseRegionWithReplica() throws Exception { g = new Get(b1); g.setConsistency(Consistency.TIMELINE); r = table.get(g); - Assert.assertTrue(r.isStale()); - Assert.assertTrue(r.getColumnCells(f, b1).isEmpty()); + assertTrue(r.isStale()); + assertTrue(r.getColumnCells(f, b1).isEmpty()); SlowMeCopro.getPrimaryCdl().get().countDown(); LOG.info("stale done"); @@ -508,8 +506,8 @@ public void testUseRegionWithReplica() throws Exception { g = new Get(b1); g.setCheckExistenceOnly(true); r = table.get(g); - Assert.assertFalse(r.isStale()); - Assert.assertTrue(r.getExists()); + assertFalse(r.isStale()); + assertTrue(r.getExists()); LOG.info("exists not stale done"); // exists works on stale but don't see the put @@ -518,8 +516,8 @@ public void testUseRegionWithReplica() throws Exception { g.setCheckExistenceOnly(true); g.setConsistency(Consistency.TIMELINE); r = table.get(g); - Assert.assertTrue(r.isStale()); - Assert.assertFalse("The secondary has stale data", r.getExists()); + assertTrue(r.isStale()); + assertFalse(r.getExists(), "The secondary has stale data"); SlowMeCopro.getPrimaryCdl().get().countDown(); LOG.info("exists stale before flush done"); @@ -533,8 +531,8 @@ public void testUseRegionWithReplica() throws Exception { g = new Get(b1); g.setConsistency(Consistency.TIMELINE); r = table.get(g); - Assert.assertTrue(r.isStale()); - Assert.assertFalse(r.isEmpty()); + assertTrue(r.isStale()); + assertFalse(r.isEmpty()); SlowMeCopro.getPrimaryCdl().get().countDown(); LOG.info("stale done"); @@ -544,8 +542,8 @@ public void testUseRegionWithReplica() throws Exception { g.setCheckExistenceOnly(true); g.setConsistency(Consistency.TIMELINE); r = table.get(g); - Assert.assertTrue(r.isStale()); - Assert.assertTrue(r.getExists()); + assertTrue(r.isStale()); + assertTrue(r.getExists()); SlowMeCopro.getPrimaryCdl().get().countDown(); LOG.info("exists stale after flush done"); @@ -573,8 +571,8 @@ public void testHedgedRead() throws Exception { // A get works and is not stale Get g = new Get(b1); Result r = table.get(g); - Assert.assertFalse(r.isStale()); - Assert.assertFalse(r.getColumnCells(f, b1).isEmpty()); + assertFalse(r.isStale()); + assertFalse(r.getColumnCells(f, b1).isEmpty()); LOG.info("get works and is not stale done"); // reset @@ -594,10 +592,10 @@ public void testHedgedRead() throws Exception { g = new Get(b1); g.setConsistency(Consistency.TIMELINE); r = table.get(g); - Assert.assertFalse(r.isStale()); - Assert.assertFalse(r.getColumnCells(f, b1).isEmpty()); - Assert.assertEquals(1, hedgedReadOps.getCount()); - Assert.assertEquals(0, hedgedReadWin.getCount()); + assertFalse(r.isStale()); + assertFalse(r.getColumnCells(f, b1).isEmpty()); + assertEquals(1, hedgedReadOps.getCount()); + assertEquals(0, hedgedReadWin.getCount()); SlowMeCopro.sleepTime.set(0); SlowMeCopro.getSecondaryCdl().get().countDown(); LOG.info("hedged read occurred but not faster"); @@ -607,9 +605,9 @@ public void testHedgedRead() throws Exception { g = new Get(b1); g.setConsistency(Consistency.TIMELINE); r = table.get(g); - Assert.assertTrue(r.isStale()); - Assert.assertTrue(r.getColumnCells(f, b1).isEmpty()); - Assert.assertEquals(2, hedgedReadOps.getCount()); + assertTrue(r.isStale()); + assertTrue(r.getColumnCells(f, b1).isEmpty()); + assertEquals(2, hedgedReadOps.getCount()); // we update the metrics after we finish the request so we use a waitFor here, use assert // directly may cause failure if we run too fast. HTU.waitFor(10000, () -> hedgedReadWin.getCount() == 1); @@ -672,21 +670,20 @@ private void assertScanMetrics(Scan scan, RegionInfo regionInfo, boolean isStale throws IOException { try (ResultScanner rs = table.getScanner(scan);) { for (Result r : rs) { - Assert.assertEquals(isStale, r.isStale()); - Assert.assertFalse(r.isEmpty()); + assertEquals(isStale, r.isStale()); + assertFalse(r.isEmpty()); } Map> scanMetricsByRegion = rs.getScanMetrics().collectMetricsByRegion(false); - Assert.assertEquals(1, scanMetricsByRegion.size()); + assertEquals(1, scanMetricsByRegion.size()); for (Map.Entry> entry : scanMetricsByRegion .entrySet()) { ScanMetricsRegionInfo scanMetricsRegionInfo = entry.getKey(); Map metrics = entry.getValue(); - Assert.assertEquals(rsServerName, scanMetricsRegionInfo.getServerName()); - Assert.assertEquals(regionInfo.getEncodedName(), - scanMetricsRegionInfo.getEncodedRegionName()); - Assert.assertEquals(1, (long) metrics.get(REGIONS_SCANNED_METRIC_NAME)); - Assert.assertEquals(1, (long) metrics.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); + assertEquals(rsServerName, scanMetricsRegionInfo.getServerName()); + assertEquals(regionInfo.getEncodedName(), scanMetricsRegionInfo.getEncodedRegionName()); + assertEquals(1, (long) metrics.get(REGIONS_SCANNED_METRIC_NAME)); + assertEquals(1, (long) metrics.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAttributes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAttributes.java index 9d6dc33a46a3..14e233fa3e9a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAttributes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestAttributes.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.ExtendedCellScannable; import org.apache.hadoop.hbase.ExtendedCellScanner; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; @@ -50,19 +49,15 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WALEdit; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ ClientTests.class, MediumTests.class }) +@Tag(ClientTests.TAG) +@Tag(MediumTests.TAG) public class TestRequestAttributes { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRequestAttributes.class); - private static final byte[] ROW_KEY1 = Bytes.toBytes("1"); private static final byte[] ROW_KEY2A = Bytes.toBytes("2A"); private static final byte[] ROW_KEY2B = Bytes.toBytes("2B"); @@ -96,7 +91,7 @@ public class TestRequestAttributes { private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static SingleProcessHBaseCluster cluster; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { cluster = TEST_UTIL.startMiniCluster(1); Table table = TEST_UTIL.createTable(TABLE_NAME, new byte[][] { FAMILY }, 1, @@ -104,7 +99,7 @@ public static void setUp() throws Exception { table.close(); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { cluster.close(); TEST_UTIL.shutdownMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestTooBigException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestTooBigException.java index dfda69d90001..b678999603e3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestTooBigException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestTooBigException.java @@ -18,30 +18,25 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.ipc.RpcServer.MAX_REQUEST_SIZE; -import static org.junit.Assert.assertThrows; +import static org.junit.jupiter.api.Assertions.assertThrows; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.RequestTooBigException; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestRequestTooBigException { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRequestTooBigException.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final TableName NAME = TableName.valueOf("request_too_big"); @@ -50,7 +45,7 @@ public class TestRequestTooBigException { private static Table TABLE; - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setInt(MAX_REQUEST_SIZE, 10 * 1024); TEST_UTIL.startMiniCluster(1); @@ -58,7 +53,7 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.waitTableAvailable(NAME); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { Closeables.close(TABLE, true); TEST_UTIL.shutdownMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java index 1de686b253b0..4c142612f519 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java @@ -20,14 +20,14 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThan; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotSame; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotSame; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.nio.ByteBuffer; @@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -48,18 +47,14 @@ import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ SmallTests.class, ClientTests.class }) +@org.junit.jupiter.api.Tag(SmallTests.TAG) +@org.junit.jupiter.api.Tag(ClientTests.TAG) public class TestResult { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestResult.class); - private static final Logger LOG = LoggerFactory.getLogger(TestResult.class.getName()); static KeyValue[] genKVs(final byte[] row, final byte[] family, final byte[] value, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultFromCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultFromCoprocessor.java index 2ab9b674dc85..7af305b03e9d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultFromCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultFromCoprocessor.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.client; -import static junit.framework.TestCase.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.Arrays; @@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; @@ -38,19 +37,15 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestResultFromCoprocessor { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestResultFromCoprocessor.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final byte[] ROW = Bytes.toBytes("normal_row"); private static final byte[] FAMILY = Bytes.toBytes("fm"); @@ -63,7 +58,7 @@ public class TestResultFromCoprocessor { private static final Result FIXED_RESULT = Result.create(Arrays.asList(FIXED_CELL)); private static final TableName TABLE_NAME = TableName.valueOf("TestResultFromCoprocessor"); - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(3); TableDescriptor desc = @@ -72,7 +67,7 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.getAdmin().createTable(desc); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultScannerCursor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultScannerCursor.java index 86e3a8bac719..b04e0905c397 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultScannerCursor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultScannerCursor.java @@ -18,18 +18,25 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestResultScannerCursor extends AbstractTestResultScannerCursor { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestResultScannerCursor.class); + @BeforeAll + public static void setUpBeforeClass() throws Exception { + startCluster(); + } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + stopCluster(); + } @Override protected ResultScanner getScanner(Scan scan) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultSizeEstimation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultSizeEstimation.java index 82072ecf7c69..01a4a321bda4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultSizeEstimation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultSizeEstimation.java @@ -17,11 +17,10 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -30,30 +29,28 @@ import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; -@Category(MediumTests.class) +@org.junit.jupiter.api.Tag(MediumTests.TAG) public class TestResultSizeEstimation { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestResultSizeEstimation.class); - final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); final static int TAG_DATA_SIZE = 2048; final static int SCANNER_DATA_LIMIT = TAG_DATA_SIZE + 256; - @Rule - public TestName name = new TestName(); + private String methodName; + + @BeforeEach + public void setUp(TestInfo testInfo) { + this.methodName = testInfo.getTestMethod().get().getName(); + } - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); // Need HFileV3 @@ -64,7 +61,7 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(1); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @@ -77,7 +74,7 @@ public void testResultSizeEstimation() throws Exception { byte[] QUALIFIER = Bytes.toBytes("testQualifier"); byte[] VALUE = Bytes.toBytes("testValue"); - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); byte[][] FAMILIES = new byte[][] { FAMILY }; Table table = TEST_UTIL.createTable(tableName, FAMILIES); Put p = new Put(ROW1); @@ -94,7 +91,7 @@ public void testResultSizeEstimation() throws Exception { while (rs.next() != null) { count++; } - assertEquals("Result size estimation did not work properly", 2, count); + assertEquals(2, count, "Result size estimation did not work properly"); rs.close(); table.close(); } @@ -107,7 +104,7 @@ public void testResultSizeEstimationWithTags() throws Exception { byte[] QUALIFIER = Bytes.toBytes("testQualifier"); byte[] VALUE = Bytes.toBytes("testValue"); - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); byte[][] FAMILIES = new byte[][] { FAMILY }; Table table = TEST_UTIL.createTable(tableName, FAMILIES); Put p = new Put(ROW1); @@ -126,7 +123,7 @@ public void testResultSizeEstimationWithTags() throws Exception { while (rs.next() != null) { count++; } - assertEquals("Result size estimation did not work properly", 2, count); + assertEquals(2, count, "Result size estimation did not work properly"); rs.close(); table.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java index d33cc943355c..161be7e312b0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java @@ -21,8 +21,8 @@ import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasItems; import static org.hamcrest.Matchers.hasSize; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThrows; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import java.io.IOException; import java.util.Arrays; @@ -30,7 +30,6 @@ import java.util.List; import java.util.Set; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; @@ -42,29 +41,25 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestRpcConnectionRegistry { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRpcConnectionRegistry.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private RpcConnectionRegistry registry; - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { // allow refresh immediately so we will switch to use region servers soon. UTIL.getConfiguration().setLong(RpcConnectionRegistry.INITIAL_REFRESH_DELAY_SECS, 1); @@ -75,17 +70,17 @@ public static void setUpBeforeClass() throws Exception { HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { UTIL.shutdownMiniCluster(); } - @Before + @BeforeEach public void setUp() throws IOException { registry = new RpcConnectionRegistry(UTIL.getConfiguration(), User.getCurrent()); } - @After + @AfterEach public void tearDown() throws IOException { Closeables.close(registry, true); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScanAttributes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScanAttributes.java index 369355117340..7e197e8ee970 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScanAttributes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScanAttributes.java @@ -17,34 +17,32 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.Assert; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ ClientTests.class, SmallTests.class }) +@Tag(ClientTests.TAG) +@Tag(SmallTests.TAG) public class TestScanAttributes { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScanAttributes.class); @Test public void testCoEnableAndCoDisableScanMetricsAndScanMetricsByRegion() { Scan scan = new Scan(); - Assert.assertFalse(scan.isScanMetricsEnabled()); - Assert.assertFalse(scan.isScanMetricsByRegionEnabled()); + assertFalse(scan.isScanMetricsEnabled()); + assertFalse(scan.isScanMetricsByRegionEnabled()); // Assert enabling scan metrics by region enables scan metrics also scan.setEnableScanMetricsByRegion(true); - Assert.assertTrue(scan.isScanMetricsEnabled()); - Assert.assertTrue(scan.isScanMetricsByRegionEnabled()); + assertTrue(scan.isScanMetricsEnabled()); + assertTrue(scan.isScanMetricsByRegionEnabled()); // Assert disabling scan metrics disables scan metrics by region scan.setScanMetricsEnabled(false); - Assert.assertFalse(scan.isScanMetricsEnabled()); - Assert.assertFalse(scan.isScanMetricsByRegionEnabled()); + assertFalse(scan.isScanMetricsEnabled()); + assertFalse(scan.isScanMetricsByRegionEnabled()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScanWithoutFetchingData.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScanWithoutFetchingData.java index 22ab75af26dd..aaf1e9a988cd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScanWithoutFetchingData.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScanWithoutFetchingData.java @@ -17,11 +17,10 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; @@ -30,11 +29,10 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; @@ -48,13 +46,10 @@ * Testcase to make sure that we do not close scanners if ScanRequest.numberOfRows is zero. See * HBASE-18042 for more details. */ -@Category({ RegionServerTests.class, MediumTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(MediumTests.TAG) public class TestScanWithoutFetchingData { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScanWithoutFetchingData.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static final TableName TABLE_NAME = TableName.valueOf("test"); @@ -71,7 +66,7 @@ public class TestScanWithoutFetchingData { private static ClientProtos.ClientService.Interface STUB; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { UTIL.startMiniCluster(1); try (Table table = UTIL.createTable(TABLE_NAME, CF)) { @@ -85,7 +80,7 @@ public static void setUp() throws Exception { STUB = CONN.getRegionServerStub(UTIL.getHBaseCluster().getRegionServer(0).getServerName()); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java index 96d43e56a590..c4a7d40f4892 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; @@ -29,25 +28,21 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Test various scanner timeout issues. */ -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestScannerTimeout { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannerTimeout.class); - private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final Logger LOG = LoggerFactory.getLogger(TestScannerTimeout.class); @@ -63,7 +58,7 @@ public class TestScannerTimeout { /** * @throws java.lang.Exception */ - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { Configuration c = TEST_UTIL.getConfiguration(); c.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, SCANNER_TIMEOUT); @@ -82,7 +77,7 @@ public static void setUpBeforeClass() throws Exception { /** * @throws java.lang.Exception */ - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @@ -90,7 +85,7 @@ public static void tearDownAfterClass() throws Exception { /** * @throws java.lang.Exception */ - @Before + @BeforeEach public void setUp() throws Exception { TEST_UTIL.ensureSomeNonStoppedRegionServersAvailable(2); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide2.java index 36e1f4ad50eb..da1a1fceef22 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide2.java @@ -17,41 +17,35 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import java.util.stream.Stream; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameter; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.params.provider.Arguments; /** * Testcase for newly added feature in HBASE-17143, such as startRow and stopRow * inclusive/exclusive, limit for rows, etc. */ -@RunWith(Parameterized.class) -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: batch={0}, smallResultSize={1}, allowPartial={2}") public class TestScannersFromClientSide2 { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannersFromClientSide2.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("scan"); @@ -62,30 +56,32 @@ public class TestScannersFromClientSide2 { private static byte[] CQ2 = Bytes.toBytes("cq2"); - @Parameter(0) - public boolean batch; + private final boolean batch; - @Parameter(1) - public boolean smallResultSize; + private final boolean smallResultSize; - @Parameter(2) - public boolean allowPartial; + private final boolean allowPartial; - @Parameters(name = "{index}: batch={0}, smallResultSize={1}, allowPartial={2}") - public static List params() { - List params = new ArrayList<>(); + public static Stream parameters() { + List params = new ArrayList<>(); boolean[] values = new boolean[] { false, true }; for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { for (int k = 0; k < 2; k++) { - params.add(new Object[] { values[i], values[j], values[k] }); + params.add(Arguments.of(values[i], values[j], values[k])); } } } - return params; + return params.stream(); + } + + public TestScannersFromClientSide2(boolean batch, boolean smallResultSize, boolean allowPartial) { + this.batch = batch; + this.smallResultSize = smallResultSize; + this.allowPartial = allowPartial; } - @BeforeClass + @BeforeAll public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(3); byte[][] splitKeys = new byte[8][]; @@ -102,7 +98,7 @@ public static void setUp() throws Exception { table.put(puts); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @@ -141,7 +137,7 @@ private List doScan(Scan scan) throws IOException { private List assertAndCreateCompleteResults(List results) throws IOException { if ((!batch && !allowPartial) || (allowPartial && !batch && !smallResultSize)) { for (Result result : results) { - assertFalse("Should not have partial result", result.mayHaveMoreCellsInRow()); + assertFalse(result.mayHaveMoreCellsInRow(), "Should not have partial result"); } return results; } @@ -149,7 +145,7 @@ private List assertAndCreateCompleteResults(List results) throws List partialResults = new ArrayList<>(); for (Result result : results) { if (!result.mayHaveMoreCellsInRow()) { - assertFalse("Should have partial result", partialResults.isEmpty()); + assertFalse(partialResults.isEmpty(), "Should have partial result"); partialResults.add(result); completeResults.add(Result.createCompleteResult(partialResults)); partialResults.clear(); @@ -157,7 +153,7 @@ private List assertAndCreateCompleteResults(List results) throws partialResults.add(result); } } - assertTrue("Should not have orphan partial result", partialResults.isEmpty()); + assertTrue(partialResults.isEmpty(), "Should not have orphan partial result"); return completeResults; } @@ -203,7 +199,7 @@ private void testReversedScan(int start, boolean startInclusive, int stop, boole } } - @Test + @TestTemplate public void testScanWithLimit() throws Exception { testScan(1, true, 998, false, 900); // from first region to last region testScan(123, true, 345, true, 100); @@ -213,7 +209,7 @@ public void testScanWithLimit() throws Exception { } - @Test + @TestTemplate public void testScanWithLimitGreaterThanActualCount() throws Exception { testScan(1, true, 998, false, 1000); // from first region to last region testScan(123, true, 345, true, 200); @@ -222,7 +218,7 @@ public void testScanWithLimitGreaterThanActualCount() throws Exception { testScan(456, false, 678, false, 200); } - @Test + @TestTemplate public void testReversedScanWithLimit() throws Exception { testReversedScan(998, true, 1, false, 900); // from last region to first region testReversedScan(543, true, 321, true, 100); @@ -231,7 +227,7 @@ public void testReversedScanWithLimit() throws Exception { testReversedScan(876, false, 654, false, 100); } - @Test + @TestTemplate public void testReversedScanWithLimitGreaterThanActualCount() throws Exception { testReversedScan(998, true, 1, false, 1000); // from last region to first region testReversedScan(543, true, 321, true, 200); @@ -240,7 +236,7 @@ public void testReversedScanWithLimitGreaterThanActualCount() throws Exception { testReversedScan(876, false, 654, false, 200); } - @Test + @TestTemplate public void testStartRowStopRowInclusive() throws Exception { testScan(1, true, 998, false, -1); // from first region to last region testScan(123, true, 345, true, -1); @@ -249,7 +245,7 @@ public void testStartRowStopRowInclusive() throws Exception { testScan(456, false, 678, false, -1); } - @Test + @TestTemplate public void testReversedStartRowStopRowInclusive() throws Exception { testReversedScan(998, true, 1, false, -1); // from last region to first region testReversedScan(543, true, 321, true, -1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java index 52ccd5d8b7da..c90bc910ffde 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java @@ -17,20 +17,19 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.File; import java.io.IOException; import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; import org.apache.hadoop.hbase.StartTestingClusterOption; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNameTestRule; +import org.apache.hadoop.hbase.TableNameTestExtension; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil; import org.apache.hadoop.hbase.regionserver.HRegionServer; @@ -39,17 +38,17 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ ClientTests.class, MediumTests.class }) +@Tag(ClientTests.TAG) +@Tag(MediumTests.TAG) public class TestSeparateClientZKCluster { private static final Logger LOG = LoggerFactory.getLogger(TestSeparateClientZKCluster.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -64,14 +63,10 @@ public class TestSeparateClientZKCluster { private final byte[] value = Bytes.toBytes("v1"); private final byte[] newVal = Bytes.toBytes("v2"); - @Rule - public TableNameTestRule name = new TableNameTestRule(); + @RegisterExtension + private TableNameTestExtension name = new TableNameTestExtension(); - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSeparateClientZKCluster.class); - - @BeforeClass + @BeforeAll public static void beforeAllTests() throws Exception { int clientZkPort = 21828; clientZkCluster = new MiniZooKeeperCluster(TEST_UTIL.getConfiguration()); @@ -93,14 +88,14 @@ public static void beforeAllTests() throws Exception { TEST_UTIL.startMiniCluster(option); } - @AfterClass + @AfterAll public static void afterAllTests() throws Exception { TEST_UTIL.shutdownMiniCluster(); clientZkCluster.shutdown(); FileUtils.deleteDirectory(clientZkDir); } - @Before + @BeforeEach public void setUp() throws IOException { try (Admin admin = TEST_UTIL.getConnection().getAdmin()) { waitForNewMasterUpAndAddressSynced(admin); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java index 6ed80b84e637..c1f552ab13c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.util.List; @@ -25,7 +25,6 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -37,33 +36,24 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WALEdit; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; /** * This class is for testing HBaseConnectionManager ServerBusyException. Be careful adding to this * class. It sets a low HBASE_CLIENT_PERSERVER_REQUESTS_THRESHOLD */ -@Category({ LargeTests.class }) +@Tag(LargeTests.TAG) public class TestServerBusyException { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestServerBusyException.class); - private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final byte[] FAM_NAM = Bytes.toBytes("f"); private static final byte[] ROW = Bytes.toBytes("bbb"); private static final int RPC_RETRY = 5; - @Rule - public TestName name = new TestName(); - public static class SleepCoprocessor implements RegionCoprocessor, RegionObserver { public static final int SLEEP_TIME = 5000; @@ -122,7 +112,7 @@ public void preGetOp(final ObserverContext parameters() { - List configurations = new ArrayList<>(); - configurations.add(new Object[] { createConfigurationForSimpleRpcServer() }); - configurations.add(new Object[] { createConfigurationForNettyRpcServer() }); - return configurations; + public static Stream parameters() { + return Stream.of(Arguments.of(createConfigurationForSimpleRpcServer()), + Arguments.of(createConfigurationForNettyRpcServer())); } private static Configuration createConfigurationForSimpleRpcServer() { @@ -87,30 +78,27 @@ private static Configuration createConfigurationForNettyRpcServer() { protected Connection conn; protected Admin admin; - @Rule - public TestName testName = new TestName(); protected TableName tableName; - @Before + @BeforeEach public void setUp() throws Exception { utility = new HBaseTestingUtil(conf); utility.startMiniCluster(2); conn = ConnectionFactory.createConnection(utility.getConfiguration()); admin = conn.getAdmin(); - String methodName = testName.getMethodName(); - tableName = TableName.valueOf(methodName.substring(0, methodName.length() - 3)); + tableName = TableName.valueOf("testTable"); } - @After + @AfterEach public void tearDown() throws Exception { utility.shutdownMiniCluster(); } - @Test + @TestTemplate public void testCompactionTimestamps() throws Exception { createTableWithDefaultConf(tableName); try (Table table = conn.getTable(tableName)) { - long ts = admin.getLastMajorCompactionTimestamp(tableName); + admin.getLastMajorCompactionTimestamp(tableName); } } @@ -119,5 +107,4 @@ private void createTableWithDefaultConf(TableName tableName) throws IOException builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)); admin.createTable(builder.build()); } - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java index c39fc076ef2b..9197f1bea17e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java @@ -17,30 +17,25 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ MiscTests.class, MediumTests.class }) +@Tag(MiscTests.TAG) +@Tag(MediumTests.TAG) public class TestShutdownOfMetaReplicaHolder extends MetaWithReplicasTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestShutdownOfMetaReplicaHolder.class); - private static final Logger LOG = LoggerFactory.getLogger(TestShutdownOfMetaReplicaHolder.class); - @BeforeClass + @BeforeAll public static void setUp() throws Exception { startCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java index 846333e53706..26e275839e3e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java @@ -17,35 +17,29 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.ArrayList; import java.util.LinkedList; import java.util.List; import java.util.Map.Entry; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Scan.ReadType; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; -@Category(LargeTests.class) +@Tag(LargeTests.TAG) public class TestSizeFailures { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSizeFailures.class); - private static final Logger LOG = LoggerFactory.getLogger(TestSizeFailures.class); protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static byte[] FAMILY = Bytes.toBytes("testFamily"); @@ -53,7 +47,7 @@ public class TestSizeFailures { private static TableName TABLENAME; private static final int NUM_ROWS = 1000 * 1000, NUM_COLS = 9; - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { // Uncomment the following lines if more verbosity is needed for // debugging (see HBASE-12285 for details). @@ -103,7 +97,7 @@ public static void setUpBeforeClass() throws Exception { } } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java index 452fe5d514f9..559e4fc481a9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java @@ -17,375 +17,21 @@ */ package org.apache.hadoop.hbase.client; -import java.util.List; -import java.util.regex.Pattern; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; -import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; -import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.Threads; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; -/** - * Test to verify that the cloned table is independent of the table from which it was cloned - */ -@Category({ LargeTests.class, ClientTests.class }) -public class TestSnapshotCloneIndependence { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotCloneIndependence.class); - - private static final Logger LOG = LoggerFactory.getLogger(TestSnapshotCloneIndependence.class); - - @Rule - public TestName testName = new TestName(); - - protected static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); - - protected static final int NUM_RS = 2; - private static final String TEST_FAM_STR = "fam"; - protected static final byte[] TEST_FAM = Bytes.toBytes(TEST_FAM_STR); - private static final int CLEANER_INTERVAL = 100; - - private FileSystem fs; - private Path rootDir; - private Admin admin; - private TableName originalTableName; - private Table originalTable; - private TableName cloneTableName; - private int countOriginalTable; - String snapshotNameAsString; - String snapshotName; +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) +public class TestSnapshotCloneIndependence extends SnapshotCloneIndependenceTestBase { /** * Setup the config for the cluster and start it */ - @BeforeClass + @BeforeAll public static void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniCluster(NUM_RS); } - - static void setupConf(Configuration conf) { - // Up the handlers; this test needs more than usual. - conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 15); - // enable snapshot support - conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); - // change the flush size to a small amount, regulating number of store files - conf.setInt("hbase.hregion.memstore.flush.size", 25000); - // so make sure we get a compaction when doing a load, but keep around - // some files in the store - conf.setInt("hbase.hstore.compaction.min", 10); - conf.setInt("hbase.hstore.compactionThreshold", 10); - // block writes if we get to 12 store files - conf.setInt("hbase.hstore.blockingStoreFiles", 12); - conf.setInt("hbase.regionserver.msginterval", 100); - conf.setBoolean("hbase.master.enabletable.roundrobin", true); - // Avoid potentially aggressive splitting which would cause snapshot to fail - conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, - ConstantSizeRegionSplitPolicy.class.getName()); - // Execute cleaner frequently to induce failures - conf.setInt("hbase.master.cleaner.interval", CLEANER_INTERVAL); - conf.setInt("hbase.master.hfilecleaner.plugins.snapshot.period", CLEANER_INTERVAL); - // Effectively disable TimeToLiveHFileCleaner. Don't want to fully disable it because that - // will even trigger races between creating the directory containing back references and - // the back reference itself. - conf.setInt("hbase.master.hfilecleaner.ttl", CLEANER_INTERVAL); - } - - @Before - public void setup() throws Exception { - fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); - rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); - - admin = UTIL.getAdmin(); - originalTableName = TableName.valueOf("test" + testName.getMethodName()); - cloneTableName = TableName.valueOf("test-clone-" + originalTableName); - snapshotNameAsString = "snapshot_" + originalTableName; - snapshotName = snapshotNameAsString; - - originalTable = createTable(originalTableName, TEST_FAM); - loadData(originalTable, TEST_FAM); - countOriginalTable = countRows(originalTable); - System.out.println("Original table has: " + countOriginalTable + " rows"); - } - - @After - public void tearDown() throws Exception { - UTIL.deleteTable(originalTableName); - UTIL.deleteTable(cloneTableName); - SnapshotTestingUtils.deleteAllSnapshots(UTIL.getAdmin()); - SnapshotTestingUtils.deleteArchiveDirectory(UTIL); - } - - @AfterClass - public static void cleanupTest() throws Exception { - try { - UTIL.shutdownMiniCluster(); - } catch (Exception e) { - LOG.warn("failure shutting down cluster", e); - } - } - - /** - * Verify that adding data to the cloned table will not affect the original, and vice-versa when - * it is taken as an online snapshot. - */ - @Test - public void testOnlineSnapshotAppendIndependent() throws Exception { - createAndCloneSnapshot(true); - runTestSnapshotAppendIndependent(); - } - - /** - * Verify that adding data to the cloned table will not affect the original, and vice-versa when - * it is taken as an offline snapshot. - */ - @Test - public void testOfflineSnapshotAppendIndependent() throws Exception { - createAndCloneSnapshot(false); - runTestSnapshotAppendIndependent(); - } - - /** - * Verify that adding metadata to the cloned table will not affect the original, and vice-versa - * when it is taken as an online snapshot. - */ - @Test - public void testOnlineSnapshotMetadataChangesIndependent() throws Exception { - createAndCloneSnapshot(true); - runTestSnapshotMetadataChangesIndependent(); - } - - /** - * Verify that adding netadata to the cloned table will not affect the original, and vice-versa - * when is taken as an online snapshot. - */ - @Test - public void testOfflineSnapshotMetadataChangesIndependent() throws Exception { - createAndCloneSnapshot(false); - runTestSnapshotMetadataChangesIndependent(); - } - - /** - * Verify that region operations, in this case splitting a region, are independent between the - * cloned table and the original. - */ - @Test - public void testOfflineSnapshotRegionOperationsIndependent() throws Exception { - createAndCloneSnapshot(false); - runTestRegionOperationsIndependent(); - } - - /** - * Verify that region operations, in this case splitting a region, are independent between the - * cloned table and the original. - */ - @Test - public void testOnlineSnapshotRegionOperationsIndependent() throws Exception { - createAndCloneSnapshot(true); - runTestRegionOperationsIndependent(); - } - - @Test - public void testOfflineSnapshotDeleteIndependent() throws Exception { - createAndCloneSnapshot(false); - runTestSnapshotDeleteIndependent(); - } - - @Test - public void testOnlineSnapshotDeleteIndependent() throws Exception { - createAndCloneSnapshot(true); - runTestSnapshotDeleteIndependent(); - } - - private static void waitOnSplit(Connection c, final Table t, int originalCount) throws Exception { - for (int i = 0; i < 200; i++) { - Threads.sleepWithoutInterrupt(500); - try (RegionLocator locator = c.getRegionLocator(t.getName())) { - if (locator.getAllRegionLocations().size() > originalCount) { - return; - } - } - } - throw new Exception("Split did not increase the number of regions"); - } - - /** - * Takes the snapshot of originalTable and clones the snapshot to another tables. If - * {@code online} is false, the original table is disabled during taking snapshot, so also enables - * it again. - * @param online - Whether the table is online or not during the snapshot - */ - private void createAndCloneSnapshot(boolean online) throws Exception { - SnapshotTestingUtils.createSnapshotAndValidate(admin, originalTableName, TEST_FAM_STR, - snapshotNameAsString, rootDir, fs, online); - - // If offline, enable the table disabled by snapshot testing util. - if (!online) { - admin.enableTable(originalTableName); - UTIL.waitTableAvailable(originalTableName); - } - - admin.cloneSnapshot(snapshotName, cloneTableName); - UTIL.waitUntilAllRegionsAssigned(cloneTableName); - } - - /** - * Verify that adding data to original table or clone table doesn't affect other table. - */ - private void runTestSnapshotAppendIndependent() throws Exception { - try (Table clonedTable = UTIL.getConnection().getTable(cloneTableName)) { - final int clonedTableRowCount = countRows(clonedTable); - - Assert.assertEquals( - "The line counts of original and cloned tables do not match after clone. ", - countOriginalTable, clonedTableRowCount); - - // Attempt to add data to the test - Put p = new Put(Bytes.toBytes("new-row-" + EnvironmentEdgeManager.currentTime())); - p.addColumn(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString")); - originalTable.put(p); - - // Verify that the new row is not in the restored table - Assert.assertEquals("The row count of the original table was not modified by the put", - countOriginalTable + 1, countRows(originalTable)); - Assert.assertEquals( - "The row count of the cloned table changed as a result of addition to the original", - clonedTableRowCount, countRows(clonedTable)); - - Put p2 = new Put(Bytes.toBytes("new-row-" + EnvironmentEdgeManager.currentTime())); - p2.addColumn(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString")); - clonedTable.put(p2); - - // Verify that the row is not added to the original table. - Assert.assertEquals( - "The row count of the original table was modified by the put to the clone", - countOriginalTable + 1, countRows(originalTable)); - Assert.assertEquals("The row count of the cloned table was not modified by the put", - clonedTableRowCount + 1, countRows(clonedTable)); - } - } - - /** - * Do a split, and verify that this only affects one table - */ - private void runTestRegionOperationsIndependent() throws Exception { - // Verify that region information is the same pre-split - UTIL.getConnection().clearRegionLocationCache(); - List originalTableHRegions = admin.getRegions(originalTableName); - - final int originalRegionCount = originalTableHRegions.size(); - final int cloneTableRegionCount = admin.getRegions(cloneTableName).size(); - Assert.assertEquals( - "The number of regions in the cloned table is different than in the original table.", - originalRegionCount, cloneTableRegionCount); - - // Split a region on the parent table - admin.splitRegionAsync(originalTableHRegions.get(0).getRegionName()).get(); - waitOnSplit(UTIL.getConnection(), originalTable, originalRegionCount); - - // Verify that the cloned table region is not split - final int cloneTableRegionCount2 = admin.getRegions(cloneTableName).size(); - Assert.assertEquals( - "The number of regions in the cloned table changed though none of its regions were split.", - cloneTableRegionCount, cloneTableRegionCount2); - } - - /** - * Add metadata, and verify that this only affects one table - */ - private void runTestSnapshotMetadataChangesIndependent() throws Exception { - // Add a new column family to the original table - byte[] TEST_FAM_2 = Bytes.toBytes("fam2"); - ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.of(TEST_FAM_2); - - admin.disableTable(originalTableName); - admin.addColumnFamily(originalTableName, familyDescriptor); - - // Verify that it is not in the snapshot - admin.enableTable(originalTableName); - UTIL.waitTableAvailable(originalTableName); - - // get a description of the cloned table - // get a list of its families - // assert that the family is there - TableDescriptor originalTableDescriptor = originalTable.getDescriptor(); - TableDescriptor clonedTableDescriptor = admin.getDescriptor(cloneTableName); - - Assert.assertTrue("The original family was not found. There is something wrong. ", - originalTableDescriptor.hasColumnFamily(TEST_FAM)); - Assert.assertTrue("The original family was not found in the clone. There is something wrong. ", - clonedTableDescriptor.hasColumnFamily(TEST_FAM)); - - Assert.assertTrue("The new family was not found. ", - originalTableDescriptor.hasColumnFamily(TEST_FAM_2)); - Assert.assertTrue("The new family was not found. ", - !clonedTableDescriptor.hasColumnFamily(TEST_FAM_2)); - } - - /** - * Verify that deleting the snapshot does not affect either table. - */ - private void runTestSnapshotDeleteIndependent() throws Exception { - // Ensure the original table does not reference the HFiles anymore - admin.majorCompact(originalTableName); - - // Deleting the snapshot used to break the cloned table by deleting in-use HFiles - admin.deleteSnapshot(snapshotName); - - // Wait for cleaner run and DFS heartbeats so that anything that is deletable is fully deleted - Pattern pattern = Pattern.compile(snapshotNameAsString); - do { - Thread.sleep(5000); - } while (!admin.listSnapshots(pattern).isEmpty()); - - try (Table original = UTIL.getConnection().getTable(originalTableName)) { - try (Table clonedTable = UTIL.getConnection().getTable(cloneTableName)) { - // Verify that all regions of both tables are readable - final int origTableRowCount = countRows(original); - final int clonedTableRowCount = countRows(clonedTable); - Assert.assertEquals(origTableRowCount, clonedTableRowCount); - } - } - } - - protected Table createTable(final TableName table, byte[] family) throws Exception { - Table t = UTIL.createTable(table, family); - // Wait for everything to be ready with the table - UTIL.waitUntilAllRegionsAssigned(table); - - // At this point the table should be good to go. - return t; - } - - public void loadData(final Table table, byte[]... families) throws Exception { - UTIL.loadTable(originalTable, TEST_FAM); - } - - protected int countRows(final Table table, final byte[]... families) throws Exception { - return UTIL.countRows(table, families); - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java index adc2613a4d27..6099106548af 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java @@ -21,15 +21,14 @@ import java.util.UUID; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; /** * This class tests that the use of a temporary snapshot directory supports snapshot functionality @@ -37,25 +36,26 @@ *

* This is an end-to-end test for the snapshot utility */ -@Category(LargeTests.class) -public class TestSnapshotDFSTemporaryDirectory extends TestSnapshotTemporaryDirectory { +@Tag(LargeTests.TAG) +@HBaseParameterizedTestTemplate(name = "[{index}]: manifestVersion = {0}") +public class TestSnapshotDFSTemporaryDirectory extends SnapshotTemporaryDirectoryTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotDFSTemporaryDirectory.class); + public TestSnapshotDFSTemporaryDirectory(int manifestVersion) { + super(manifestVersion); + } /** * Setup the config for the cluster * @throws Exception on failure */ - @BeforeClass + @BeforeAll public static void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniCluster(NUM_RS); admin = UTIL.getAdmin(); } - private static void setupConf(Configuration conf) throws IOException { + protected static void setupConf(Configuration conf) throws IOException { // disable the ui conf.setInt("hbase.regionsever.info.port", -1); // change the flush size to a small amount, regulating number of store files diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java index 8abb4d754a7a..cf6c5ea34151 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java @@ -17,464 +17,29 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.regex.Pattern; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNameTestRule; -import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; -import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; -import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; -import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException; -import org.apache.hadoop.hbase.snapshot.SnapshotManifestV1; -import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.CommonFSUtils; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameter; -import org.junit.runners.Parameterized.Parameters; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; - -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; - -/** - * Test create/using/deleting snapshots from the client - *

- * This is an end-to-end test for the snapshot utility - */ -@RunWith(Parameterized.class) -@Category({ LargeTests.class, ClientTests.class }) -public class TestSnapshotFromClient { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotFromClient.class); +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; - private static final Logger LOG = LoggerFactory.getLogger(TestSnapshotFromClient.class); +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: tracker={0}") +public class TestSnapshotFromClient extends SnapshotFromClientTestBase { - protected static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); - protected static final int NUM_RS = 2; - protected static final String STRING_TABLE_NAME = "test"; - protected static final byte[] TEST_FAM = Bytes.toBytes("fam"); - protected static final TableName TABLE_NAME = TableName.valueOf(STRING_TABLE_NAME); - private static final Pattern MATCH_ALL = Pattern.compile(".*"); - - @Rule - public TableNameTestRule name = new TableNameTestRule(); - - @Parameter - public StoreFileTrackerFactory.Trackers trackerImpl; - - @Parameters(name = "{index}: tracker={0}") - public static List params() { - return Arrays.asList(new Object[] { StoreFileTrackerFactory.Trackers.DEFAULT }, - new Object[] { StoreFileTrackerFactory.Trackers.FILE }); + public TestSnapshotFromClient(StoreFileTrackerFactory.Trackers trackerImpl) { + super(trackerImpl); } /** * Setup the config for the cluster * @throws Exception on failure */ - @BeforeClass + @BeforeAll public static void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniCluster(NUM_RS); } - - protected static void setupConf(Configuration conf) { - // disable the ui - conf.setInt("hbase.regionsever.info.port", -1); - // change the flush size to a small amount, regulating number of store files - conf.setInt("hbase.hregion.memstore.flush.size", 25000); - // so make sure we get a compaction when doing a load, but keep around some - // files in the store - conf.setInt("hbase.hstore.compaction.min", 10); - conf.setInt("hbase.hstore.compactionThreshold", 10); - // block writes if we get to 12 store files - conf.setInt("hbase.hstore.blockingStoreFiles", 12); - // Enable snapshot - conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); - conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, - ConstantSizeRegionSplitPolicy.class.getName()); - } - - @Before - public void setup() throws Exception { - createTable(); - } - - protected void createTable() throws Exception { - TableDescriptor htd = - TableDescriptorBuilder.newBuilder(TABLE_NAME).setRegionReplication(getNumReplicas()) - .setValue(StoreFileTrackerFactory.TRACKER_IMPL, trackerImpl.name()).build(); - UTIL.createTable(htd, new byte[][] { TEST_FAM }, null); - } - - protected int getNumReplicas() { - return 1; - } - - @After - public void tearDown() throws Exception { - UTIL.deleteTable(TABLE_NAME); - SnapshotTestingUtils.deleteAllSnapshots(UTIL.getAdmin()); - SnapshotTestingUtils.deleteArchiveDirectory(UTIL); - } - - @AfterClass - public static void cleanupTest() throws Exception { - try { - UTIL.shutdownMiniCluster(); - } catch (Exception e) { - LOG.warn("failure shutting down cluster", e); - } - } - - /** - * Test snapshotting not allowed hbase:meta and -ROOT- - */ - @Test(expected = IllegalArgumentException.class) - public void testMetaTablesSnapshot() throws Exception { - UTIL.getAdmin().snapshot("metaSnapshot", TableName.META_TABLE_NAME); - } - - /** - * Test HBaseAdmin#deleteSnapshots(String) which deletes snapshots whose names match the parameter - */ - @Test - public void testSnapshotDeletionWithRegex() throws Exception { - Admin admin = UTIL.getAdmin(); - // make sure we don't fail on listing snapshots - SnapshotTestingUtils.assertNoSnapshots(admin); - - // put some stuff in the table - Table table = UTIL.getConnection().getTable(TABLE_NAME); - UTIL.loadTable(table, TEST_FAM); - table.close(); - - String snapshot1 = "TableSnapshot1"; - admin.snapshot(snapshot1, TABLE_NAME); - LOG.debug("Snapshot1 completed."); - - String snapshot2 = "TableSnapshot2"; - admin.snapshot(snapshot2, TABLE_NAME); - LOG.debug("Snapshot2 completed."); - - String snapshot3 = "3rdTableSnapshot"; - admin.snapshot(snapshot3, TABLE_NAME); - LOG.debug(snapshot3 + " completed."); - - // delete the first two snapshots - admin.deleteSnapshots(Pattern.compile("TableSnapshot.*")); - List snapshots = admin.listSnapshots(); - assertEquals(1, snapshots.size()); - assertEquals(snapshot3, snapshots.get(0).getName()); - - admin.deleteSnapshot(snapshot3); - admin.close(); - } - - /** - * Test snapshotting a table that is offline - */ - @Test - public void testOfflineTableSnapshot() throws Exception { - Admin admin = UTIL.getAdmin(); - // make sure we don't fail on listing snapshots - SnapshotTestingUtils.assertNoSnapshots(admin); - - // put some stuff in the table - Table table = UTIL.getConnection().getTable(TABLE_NAME); - UTIL.loadTable(table, TEST_FAM, false); - - LOG.debug("FS state before disable:"); - CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), - CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); - // XXX if this is flakey, might want to consider using the async version and looping as - // disableTable can succeed and still timeout. - admin.disableTable(TABLE_NAME); - - LOG.debug("FS state before snapshot:"); - CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), - CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); - - // take a snapshot of the disabled table - final String SNAPSHOT_NAME = "offlineTableSnapshot"; - String snapshot = SNAPSHOT_NAME; - - admin.snapshot(new SnapshotDescription(SNAPSHOT_NAME, TABLE_NAME, SnapshotType.DISABLED, null, - -1, SnapshotManifestV1.DESCRIPTOR_VERSION, null)); - LOG.debug("Snapshot completed."); - - // make sure we have the snapshot - List snapshots = - SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME); - - // make sure its a valid snapshot - FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); - Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); - LOG.debug("FS state after snapshot:"); - CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), - CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); - SnapshotTestingUtils.confirmSnapshotValid( - ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM, rootDir, - admin, fs); - - admin.deleteSnapshot(snapshot); - snapshots = admin.listSnapshots(); - SnapshotTestingUtils.assertNoSnapshots(admin); - } - - @Test - public void testSnapshotFailsOnNonExistantTable() throws Exception { - Admin admin = UTIL.getAdmin(); - // make sure we don't fail on listing snapshots - SnapshotTestingUtils.assertNoSnapshots(admin); - String tableName = "_not_a_table"; - - // make sure the table doesn't exist - boolean fail = false; - do { - try { - admin.getDescriptor(TableName.valueOf(tableName)); - fail = true; - LOG.error("Table:" + tableName + " already exists, checking a new name"); - tableName = tableName + "!"; - } catch (TableNotFoundException e) { - fail = false; - } - } while (fail); - - // snapshot the non-existant table - try { - admin.snapshot("fail", TableName.valueOf(tableName)); - fail("Snapshot succeeded even though there is not table."); - } catch (SnapshotCreationException e) { - LOG.info("Correctly failed to snapshot a non-existant table:" + e.getMessage()); - } - } - - @Test - public void testOfflineTableSnapshotWithEmptyRegions() throws Exception { - // test with an empty table with one region - - Admin admin = UTIL.getAdmin(); - // make sure we don't fail on listing snapshots - SnapshotTestingUtils.assertNoSnapshots(admin); - - LOG.debug("FS state before disable:"); - CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), - CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); - admin.disableTable(TABLE_NAME); - - LOG.debug("FS state before snapshot:"); - CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), - CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); - - // take a snapshot of the disabled table - String snapshot = "testOfflineTableSnapshotWithEmptyRegions"; - admin.snapshot(snapshot, TABLE_NAME); - LOG.debug("Snapshot completed."); - - // make sure we have the snapshot - List snapshots = - SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME); - - // make sure its a valid snapshot - FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); - Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); - LOG.debug("FS state after snapshot:"); - CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), - CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); - - List emptyCfs = Lists.newArrayList(TEST_FAM); // no file in the region - List nonEmptyCfs = Lists.newArrayList(); - SnapshotTestingUtils.confirmSnapshotValid( - ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, nonEmptyCfs, - emptyCfs, rootDir, admin, fs); - - admin.deleteSnapshot(snapshot); - snapshots = admin.listSnapshots(); - SnapshotTestingUtils.assertNoSnapshots(admin); - } - - @Test - public void testListTableSnapshots() throws Exception { - Admin admin = null; - final TableName tableName = name.getTableName(); - try { - admin = UTIL.getAdmin(); - - TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName).build(); - UTIL.createTable(htd, new byte[][] { TEST_FAM }, UTIL.getConfiguration()); - - String table1Snapshot1 = "Table1Snapshot1"; - admin.snapshot(table1Snapshot1, TABLE_NAME); - LOG.debug("Snapshot1 completed."); - - String table1Snapshot2 = "Table1Snapshot2"; - admin.snapshot(table1Snapshot2, TABLE_NAME); - LOG.debug("Snapshot2 completed."); - - String table2Snapshot1 = "Table2Snapshot1"; - admin.snapshot(table2Snapshot1, tableName); - LOG.debug(table2Snapshot1 + " completed."); - - List listTableSnapshots = - admin.listTableSnapshots(Pattern.compile("test.*"), MATCH_ALL); - List listTableSnapshotNames = new ArrayList<>(); - assertEquals(3, listTableSnapshots.size()); - for (SnapshotDescription s : listTableSnapshots) { - listTableSnapshotNames.add(s.getName()); - } - assertTrue(listTableSnapshotNames.contains(table1Snapshot1)); - assertTrue(listTableSnapshotNames.contains(table1Snapshot2)); - assertTrue(listTableSnapshotNames.contains(table2Snapshot1)); - } finally { - if (admin != null) { - try { - admin.deleteSnapshots(Pattern.compile("Table.*")); - } catch (SnapshotDoesNotExistException ignore) { - } - if (admin.tableExists(tableName)) { - UTIL.deleteTable(tableName); - } - admin.close(); - } - } - } - - @Test - public void testListTableSnapshotsWithRegex() throws Exception { - Admin admin = null; - try { - admin = UTIL.getAdmin(); - - String table1Snapshot1 = "Table1Snapshot1"; - admin.snapshot(table1Snapshot1, TABLE_NAME); - LOG.debug("Snapshot1 completed."); - - String table1Snapshot2 = "Table1Snapshot2"; - admin.snapshot(table1Snapshot2, TABLE_NAME); - LOG.debug("Snapshot2 completed."); - - String table2Snapshot1 = "Table2Snapshot1"; - admin.snapshot(table2Snapshot1, TABLE_NAME); - LOG.debug(table2Snapshot1 + " completed."); - - List listTableSnapshots = - admin.listTableSnapshots(Pattern.compile("test.*"), Pattern.compile("Table1.*")); - List listTableSnapshotNames = new ArrayList<>(); - assertEquals(2, listTableSnapshots.size()); - for (SnapshotDescription s : listTableSnapshots) { - listTableSnapshotNames.add(s.getName()); - } - assertTrue(listTableSnapshotNames.contains(table1Snapshot1)); - assertTrue(listTableSnapshotNames.contains(table1Snapshot2)); - assertFalse(listTableSnapshotNames.contains(table2Snapshot1)); - } finally { - if (admin != null) { - try { - admin.deleteSnapshots(Pattern.compile("Table.*")); - } catch (SnapshotDoesNotExistException ignore) { - } - admin.close(); - } - } - } - - @Test - public void testDeleteTableSnapshots() throws Exception { - Admin admin = null; - final TableName tableName = name.getTableName(); - try { - admin = UTIL.getAdmin(); - - TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName).build(); - UTIL.createTable(htd, new byte[][] { TEST_FAM }, UTIL.getConfiguration()); - - String table1Snapshot1 = "Table1Snapshot1"; - admin.snapshot(table1Snapshot1, TABLE_NAME); - LOG.debug("Snapshot1 completed."); - - String table1Snapshot2 = "Table1Snapshot2"; - admin.snapshot(table1Snapshot2, TABLE_NAME); - LOG.debug("Snapshot2 completed."); - - String table2Snapshot1 = "Table2Snapshot1"; - admin.snapshot(table2Snapshot1, tableName); - LOG.debug(table2Snapshot1 + " completed."); - - Pattern tableNamePattern = Pattern.compile("test.*"); - admin.deleteTableSnapshots(tableNamePattern, MATCH_ALL); - assertEquals(0, admin.listTableSnapshots(tableNamePattern, MATCH_ALL).size()); - } finally { - if (admin != null) { - if (admin.tableExists(tableName)) { - UTIL.deleteTable(tableName); - } - admin.close(); - } - } - } - - @Test - public void testDeleteTableSnapshotsWithRegex() throws Exception { - Admin admin = null; - Pattern tableNamePattern = Pattern.compile("test.*"); - try { - admin = UTIL.getAdmin(); - - String table1Snapshot1 = "Table1Snapshot1"; - admin.snapshot(table1Snapshot1, TABLE_NAME); - LOG.debug("Snapshot1 completed."); - - String table1Snapshot2 = "Table1Snapshot2"; - admin.snapshot(table1Snapshot2, TABLE_NAME); - LOG.debug("Snapshot2 completed."); - - String table2Snapshot1 = "Table2Snapshot1"; - admin.snapshot(table2Snapshot1, TABLE_NAME); - LOG.debug(table2Snapshot1 + " completed."); - - admin.deleteTableSnapshots(tableNamePattern, Pattern.compile("Table1.*")); - assertEquals(1, admin.listTableSnapshots(tableNamePattern, MATCH_ALL).size()); - } finally { - if (admin != null) { - try { - admin.deleteTableSnapshots(tableNamePattern, MATCH_ALL); - } catch (SnapshotDoesNotExistException ignore) { - } - admin.close(); - } - } - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClientWithRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClientWithRegionReplicas.java index ed1830759af1..2ce004a94317 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClientWithRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClientWithRegionReplicas.java @@ -17,18 +17,31 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; -@Category({ LargeTests.class, ClientTests.class }) -public class TestSnapshotFromClientWithRegionReplicas extends TestSnapshotFromClient { +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: tracker={0}") +public class TestSnapshotFromClientWithRegionReplicas extends SnapshotFromClientTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotFromClientWithRegionReplicas.class); + public TestSnapshotFromClientWithRegionReplicas(StoreFileTrackerFactory.Trackers trackerImpl) { + super(trackerImpl); + } + + /** + * Setup the config for the cluster + * @throws Exception on failure + */ + @BeforeAll + public static void setupCluster() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(NUM_RS); + } @Override protected int getNumReplicas() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java index 0def9018e78d..7e6a80efe8e6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.ArrayList; import java.util.Collections; @@ -26,7 +26,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -39,26 +38,22 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Test class to verify that metadata is consistent before and after a snapshot attempt. */ -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestSnapshotMetadata { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotMetadata.class); - private static final Logger LOG = LoggerFactory.getLogger(TestSnapshotMetadata.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -96,7 +91,7 @@ public class TestSnapshotMetadata { private static FileSystem fs; private static Path rootDir; - @BeforeClass + @BeforeAll public static void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniCluster(NUM_RS); @@ -105,7 +100,7 @@ public static void setupCluster() throws Exception { rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); } - @AfterClass + @AfterAll public static void cleanupTest() throws Exception { try { UTIL.shutdownMiniCluster(); @@ -134,13 +129,13 @@ private static void setupConf(Configuration conf) { ConstantSizeRegionSplitPolicy.class.getName()); } - @Before + @BeforeEach public void setup() throws Exception { admin = UTIL.getAdmin(); createTableWithNonDefaultProperties(); } - @After + @AfterEach public void tearDown() throws Exception { SnapshotTestingUtils.deleteAllSnapshots(admin); } @@ -288,8 +283,8 @@ private void runRestoreWithAdditionalMetadata(boolean changeMetadata, boolean ad admin.disableTable(originalTableName); ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.of(newFamilyName); admin.addColumnFamily(originalTableName, familyDescriptor); - assertTrue("New column family was not added.", - admin.getDescriptor(originalTableName).toString().contains(newFamilyNameAsString)); + assertTrue(admin.getDescriptor(originalTableName).toString().contains(newFamilyNameAsString), + "New column family was not added."); } // restore it diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java index 8f8d0d554431..26d080e8df2f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java @@ -17,456 +17,27 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.UUID; -import java.util.regex.Pattern; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; -import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; -import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; -import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException; -import org.apache.hadoop.hbase.snapshot.SnapshotManifestV1; -import org.apache.hadoop.hbase.snapshot.SnapshotManifestV2; -import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.CommonFSUtils; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; - -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; -/** - * This class tests that the use of a temporary snapshot directory supports snapshot functionality - * while the temporary directory is on a different file system than the root directory - *

- * This is an end-to-end test for the snapshot utility - */ -@Category(LargeTests.class) -@RunWith(Parameterized.class) -public class TestSnapshotTemporaryDirectory { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotTemporaryDirectory.class); +@Tag(LargeTests.TAG) +@HBaseParameterizedTestTemplate(name = "[{index}]: manifestVersion = {0}") +public class TestSnapshotTemporaryDirectory extends SnapshotTemporaryDirectoryTestBase { - @Parameterized.Parameters - public static Iterable data() { - return Arrays.asList(SnapshotManifestV1.DESCRIPTOR_VERSION, - SnapshotManifestV2.DESCRIPTOR_VERSION); + public TestSnapshotTemporaryDirectory(int manifestVersion) { + super(manifestVersion); } - @Parameterized.Parameter - public int manifestVersion; - - private static final Logger LOG = LoggerFactory.getLogger(TestSnapshotTemporaryDirectory.class); - protected static final int NUM_RS = 2; - protected static String TEMP_DIR = - Paths.get("").toAbsolutePath().toString() + Path.SEPARATOR + UUID.randomUUID().toString(); - - protected static Admin admin; - protected static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); - protected static final String STRING_TABLE_NAME = "test"; - protected static final byte[] TEST_FAM = Bytes.toBytes("fam"); - protected static final TableName TABLE_NAME = TableName.valueOf(STRING_TABLE_NAME); - /** * Setup the config for the cluster * @throws Exception on failure */ - @BeforeClass + @BeforeAll public static void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniCluster(NUM_RS); admin = UTIL.getAdmin(); } - - private static void setupConf(Configuration conf) { - // disable the ui - conf.setInt("hbase.regionsever.info.port", -1); - // change the flush size to a small amount, regulating number of store files - conf.setInt("hbase.hregion.memstore.flush.size", 25000); - // so make sure we get a compaction when doing a load, but keep around some - // files in the store - conf.setInt("hbase.hstore.compaction.min", 10); - conf.setInt("hbase.hstore.compactionThreshold", 10); - // block writes if we get to 12 store files - conf.setInt("hbase.hstore.blockingStoreFiles", 12); - // Enable snapshot - conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); - conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, - ConstantSizeRegionSplitPolicy.class.getName()); - conf.set(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR, - "file://" + new Path(TEMP_DIR, ".tmpDir").toUri()); - } - - @Before - public void setup() throws Exception { - TableDescriptor htd = - TableDescriptorBuilder.newBuilder(TABLE_NAME).setRegionReplication(getNumReplicas()).build(); - UTIL.createTable(htd, new byte[][] { TEST_FAM }, UTIL.getConfiguration()); - } - - protected int getNumReplicas() { - return 1; - } - - @After - public void tearDown() throws Exception { - UTIL.deleteTable(TABLE_NAME); - SnapshotTestingUtils.deleteAllSnapshots(UTIL.getAdmin()); - SnapshotTestingUtils.deleteArchiveDirectory(UTIL); - } - - @AfterClass - public static void cleanupTest() { - try { - UTIL.shutdownMiniCluster(); - FileUtils.deleteDirectory(new File(TEMP_DIR)); - } catch (Exception e) { - LOG.warn("failure shutting down cluster", e); - } - } - - @Test - public void testRestoreDisabledSnapshot() throws IOException, InterruptedException { - long tid = EnvironmentEdgeManager.currentTime(); - TableName tableName = TableName.valueOf("testtb-" + tid); - String emptySnapshot = "emptySnaptb-" + tid; - String snapshotName0 = "snaptb0-" + tid; - String snapshotName1 = "snaptb1-" + tid; - int snapshot0Rows; - int snapshot1Rows; - - // create Table and disable it - SnapshotTestingUtils.createTable(UTIL, tableName, getNumReplicas(), TEST_FAM); - admin.disableTable(tableName); - - // take an empty snapshot - takeSnapshot(tableName, emptySnapshot, true); - - // enable table and insert data - admin.enableTable(tableName); - SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM); - try (Table table = UTIL.getConnection().getTable(tableName)) { - snapshot0Rows = UTIL.countRows(table); - } - admin.disableTable(tableName); - - // take a snapshot - takeSnapshot(tableName, snapshotName0, true); - - // enable table and insert more data - admin.enableTable(tableName); - SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM); - try (Table table = UTIL.getConnection().getTable(tableName)) { - snapshot1Rows = UTIL.countRows(table); - } - - SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows); - admin.disableTable(tableName); - takeSnapshot(tableName, snapshotName1, true); - - // Restore from snapshot-0 - admin.restoreSnapshot(snapshotName0); - admin.enableTable(tableName); - SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot0Rows); - SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas()); - - // Restore from emptySnapshot - admin.disableTable(tableName); - admin.restoreSnapshot(emptySnapshot); - admin.enableTable(tableName); - SnapshotTestingUtils.verifyRowCount(UTIL, tableName, 0); - SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas()); - - // Restore from snapshot-1 - admin.disableTable(tableName); - admin.restoreSnapshot(snapshotName1); - admin.enableTable(tableName); - SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows); - SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas()); - - // Restore from snapshot-1 - UTIL.deleteTable(tableName); - admin.restoreSnapshot(snapshotName1); - SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows); - SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas()); - } - - @Test - public void testRestoreEnabledSnapshot() throws IOException, InterruptedException { - long tid = EnvironmentEdgeManager.currentTime(); - TableName tableName = TableName.valueOf("testtb-" + tid); - String emptySnapshot = "emptySnaptb-" + tid; - String snapshotName0 = "snaptb0-" + tid; - String snapshotName1 = "snaptb1-" + tid; - int snapshot0Rows; - int snapshot1Rows; - - // create Table - SnapshotTestingUtils.createTable(UTIL, tableName, getNumReplicas(), TEST_FAM); - - // take an empty snapshot - takeSnapshot(tableName, emptySnapshot, false); - - // Insert data - SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM); - try (Table table = UTIL.getConnection().getTable(tableName)) { - snapshot0Rows = UTIL.countRows(table); - } - - // take a snapshot - takeSnapshot(tableName, snapshotName0, false); - - // Insert more data - SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM); - try (Table table = UTIL.getConnection().getTable(tableName)) { - snapshot1Rows = UTIL.countRows(table); - } - - SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows); - takeSnapshot(tableName, snapshotName1, false); - - // Restore from snapshot-0 - admin.disableTable(tableName); - admin.restoreSnapshot(snapshotName0); - admin.enableTable(tableName); - SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot0Rows); - SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas()); - - // Restore from emptySnapshot - admin.disableTable(tableName); - admin.restoreSnapshot(emptySnapshot); - admin.enableTable(tableName); - SnapshotTestingUtils.verifyRowCount(UTIL, tableName, 0); - SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas()); - - // Restore from snapshot-1 - admin.disableTable(tableName); - admin.restoreSnapshot(snapshotName1); - admin.enableTable(tableName); - SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows); - SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas()); - - // Restore from snapshot-1 - UTIL.deleteTable(tableName); - admin.restoreSnapshot(snapshotName1); - SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows); - SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas()); - } - - /** - * Test snapshotting a table that is offline - * @throws Exception if snapshot does not complete successfully - */ - @Test - public void testOfflineTableSnapshot() throws Exception { - Admin admin = UTIL.getAdmin(); - // make sure we don't fail on listing snapshots - SnapshotTestingUtils.assertNoSnapshots(admin); - - // put some stuff in the table - Table table = UTIL.getConnection().getTable(TABLE_NAME); - UTIL.loadTable(table, TEST_FAM, false); - - LOG.debug("FS state before disable:"); - CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), - CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); - // XXX if this is flakey, might want to consider using the async version and looping as - // disableTable can succeed and still timeout. - admin.disableTable(TABLE_NAME); - - LOG.debug("FS state before snapshot:"); - CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), - CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); - - // take a snapshot of the disabled table - final String SNAPSHOT_NAME = "offlineTableSnapshot"; - String snapshot = SNAPSHOT_NAME; - takeSnapshot(TABLE_NAME, SNAPSHOT_NAME, true); - LOG.debug("Snapshot completed."); - - // make sure we have the snapshot - List snapshots = - SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME); - - // make sure its a valid snapshot - FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); - Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); - LOG.debug("FS state after snapshot:"); - CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), - CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); - - SnapshotTestingUtils.confirmSnapshotValid( - ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM, rootDir, - admin, fs); - - admin.deleteSnapshot(snapshot); - SnapshotTestingUtils.assertNoSnapshots(admin); - } - - /** - * Tests that snapshot has correct contents by taking snapshot, cloning it, then affirming the - * contents of the original and cloned table match - * @throws Exception if snapshot does not complete successfully - */ - @Test - public void testSnapshotCloneContents() throws Exception { - // make sure we don't fail on listing snapshots - SnapshotTestingUtils.assertNoSnapshots(admin); - - // put some stuff in the table - Table table = UTIL.getConnection().getTable(TABLE_NAME); - UTIL.loadTable(table, TEST_FAM); - table.close(); - - String snapshot1 = "TableSnapshot1"; - takeSnapshot(TABLE_NAME, snapshot1, false); - LOG.debug("Snapshot1 completed."); - - TableName clone = TableName.valueOf("Table1Clone"); - admin.cloneSnapshot(snapshot1, clone, false); - - Scan original = new Scan(); - Scan cloned = new Scan(); - ResultScanner originalScan = admin.getConnection().getTable(TABLE_NAME).getScanner(original); - ResultScanner clonedScan = - admin.getConnection().getTable(TableName.valueOf("Table1Clone")).getScanner(cloned); - - Iterator i = originalScan.iterator(); - Iterator i2 = clonedScan.iterator(); - assertTrue(i.hasNext()); - while (i.hasNext()) { - assertTrue(i2.hasNext()); - assertEquals(Bytes.toString(i.next().getValue(TEST_FAM, new byte[] {})), - Bytes.toString(i2.next().getValue(TEST_FAM, new byte[] {}))); - } - assertFalse(i2.hasNext()); - admin.deleteSnapshot(snapshot1); - UTIL.deleteTable(clone); - admin.close(); - } - - @Test - public void testOfflineTableSnapshotWithEmptyRegion() throws Exception { - // test with an empty table with one region - - // make sure we don't fail on listing snapshots - SnapshotTestingUtils.assertNoSnapshots(admin); - - LOG.debug("FS state before disable:"); - CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), - CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); - admin.disableTable(TABLE_NAME); - - LOG.debug("FS state before snapshot:"); - CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), - CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); - - // take a snapshot of the disabled table - String snapshot = "testOfflineTableSnapshotWithEmptyRegion"; - takeSnapshot(TABLE_NAME, snapshot, true); - LOG.debug("Snapshot completed."); - - // make sure we have the snapshot - List snapshots = - SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME); - - // make sure its a valid snapshot - FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); - Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); - LOG.debug("FS state after snapshot:"); - CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), - CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); - - List emptyCfs = Lists.newArrayList(TEST_FAM); // no file in the region - List nonEmptyCfs = Lists.newArrayList(); - SnapshotTestingUtils.confirmSnapshotValid( - ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, nonEmptyCfs, - emptyCfs, rootDir, admin, fs); - - admin.deleteSnapshot(snapshot); - SnapshotTestingUtils.assertNoSnapshots(admin); - } - - // Ensures that the snapshot is transferred to the proper completed snapshot directory - @Test - public void testEnsureTemporaryDirectoryTransfer() throws Exception { - Admin admin = UTIL.getAdmin(); - TableName tableName2 = TableName.valueOf("testListTableSnapshots"); - try { - TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName2).build(); - UTIL.createTable(htd, new byte[][] { TEST_FAM }, UTIL.getConfiguration()); - - String table1Snapshot1 = "Table1Snapshot1"; - takeSnapshot(TABLE_NAME, table1Snapshot1, false); - LOG.debug("Snapshot1 completed."); - - String table1Snapshot2 = "Table1Snapshot2"; - takeSnapshot(TABLE_NAME, table1Snapshot2, false); - LOG.debug("Snapshot2 completed."); - - String table2Snapshot1 = "Table2Snapshot1"; - takeSnapshot(TABLE_NAME, table2Snapshot1, false); - LOG.debug("Table2Snapshot1 completed."); - - List listTableSnapshots = - admin.listTableSnapshots(Pattern.compile("test.*"), Pattern.compile(".*")); - List listTableSnapshotNames = new ArrayList(); - assertEquals(3, listTableSnapshots.size()); - for (SnapshotDescription s : listTableSnapshots) { - listTableSnapshotNames.add(s.getName()); - } - assertTrue(listTableSnapshotNames.contains(table1Snapshot1)); - assertTrue(listTableSnapshotNames.contains(table1Snapshot2)); - assertTrue(listTableSnapshotNames.contains(table2Snapshot1)); - } finally { - try { - admin.deleteSnapshots(Pattern.compile("Table.*")); - } catch (SnapshotDoesNotExistException ignore) { - } - if (admin.tableExists(tableName2)) { - UTIL.deleteTable(tableName2); - } - admin.close(); - } - } - - private void takeSnapshot(TableName tableName, String snapshotName, boolean disabled) - throws IOException { - SnapshotType type = disabled ? SnapshotType.DISABLED : SnapshotType.FLUSH; - SnapshotDescription desc = - new SnapshotDescription(snapshotName, tableName, type, null, -1, manifestVersion, null); - admin.snapshot(desc); - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectoryWithRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectoryWithRegionReplicas.java index da5e3a025a92..9bfa932c3192 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectoryWithRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectoryWithRegionReplicas.java @@ -17,18 +17,30 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; -@Category(LargeTests.class) +@Tag(LargeTests.TAG) +@HBaseParameterizedTestTemplate(name = "[{index}]: manifestVersion = {0}") public class TestSnapshotTemporaryDirectoryWithRegionReplicas - extends TestSnapshotTemporaryDirectory { + extends SnapshotTemporaryDirectoryTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotTemporaryDirectoryWithRegionReplicas.class); + public TestSnapshotTemporaryDirectoryWithRegionReplicas(int manifestVersion) { + super(manifestVersion); + } + + /** + * Setup the config for the cluster + * @throws Exception on failure + */ + @BeforeAll + public static void setupCluster() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(NUM_RS); + admin = UTIL.getAdmin(); + } @Override protected int getNumReplicas() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java index 06905ec8dc44..440cc34093fd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java @@ -17,20 +17,15 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; -@Category({ ClientTests.class, LargeTests.class }) +@Tag(ClientTests.TAG) +@Tag(LargeTests.TAG) public class TestSnapshotWithAcl extends SnapshotWithAclTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotWithAcl.class); - @Override protected void snapshot(String snapshotName, TableName tableName) throws Exception { TEST_UTIL.getAdmin().snapshot(snapshotName, tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAclAsyncAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAclAsyncAdmin.java index 169fa68cd0b6..b7779d2427c0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAclAsyncAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAclAsyncAdmin.java @@ -17,20 +17,15 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; -@Category({ ClientTests.class, LargeTests.class }) +@Tag(ClientTests.TAG) +@Tag(LargeTests.TAG) public class TestSnapshotWithAclAsyncAdmin extends SnapshotWithAclTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotWithAclAsyncAdmin.class); - @Override protected void snapshot(String snapshotName, TableName tableName) throws Exception { try (AsyncConnection conn = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithTTLFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithTTLFromClient.java index 19861a786b77..beae9be49cfc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithTTLFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithTTLFromClient.java @@ -17,17 +17,16 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; @@ -37,26 +36,22 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Test restore/clone snapshots with TTL from the client */ -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestSnapshotWithTTLFromClient { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotWithTTLFromClient.class); - private static final Logger LOG = LoggerFactory.getLogger(TestSnapshotWithTTLFromClient.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -72,7 +67,7 @@ public class TestSnapshotWithTTLFromClient { * Setup the config for the cluster * @throws Exception on failure */ - @BeforeClass + @BeforeAll public static void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniCluster(NUM_RS); @@ -86,7 +81,7 @@ protected static void setupConf(Configuration conf) { conf.setInt("hbase.master.cleaner.snapshot.interval", CHORE_INTERVAL_SECS * 60 * 1000); } - @Before + @BeforeEach public void setup() throws Exception { createTable(); } @@ -95,7 +90,7 @@ protected void createTable() throws Exception { UTIL.createTable(TABLE_NAME, new byte[][] { TEST_FAM }); } - @After + @AfterEach public void tearDown() throws Exception { UTIL.deleteTableIfAny(TABLE_NAME); UTIL.deleteTableIfAny(CLONED_TABLE_NAME); @@ -103,7 +98,7 @@ public void tearDown() throws Exception { SnapshotTestingUtils.deleteArchiveDirectory(UTIL); } - @AfterClass + @AfterAll public static void cleanupTest() throws Exception { try { UTIL.shutdownMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeAtTableLevel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeAtTableLevel.java index c5c31c1d6fd7..90c33878776c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeAtTableLevel.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeAtTableLevel.java @@ -17,58 +17,57 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.util.List; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; - -@Category({ MediumTests.class, ClientTests.class }) +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; + +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestSplitOrMergeAtTableLevel { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSplitOrMergeAtTableLevel.class); - private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static byte[] FAMILY = Bytes.toBytes("testFamily"); - @Rule - public TestName name = new TestName(); + private String methodName; private static Admin admin; - @BeforeClass + @BeforeEach + public void setUp(TestInfo testInfo) { + this.methodName = testInfo.getTestMethod().get().getName(); + } + + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(2); admin = TEST_UTIL.getAdmin(); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @Test public void testTableSplitSwitch() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).setSplitEnabled(false).build(); @@ -90,7 +89,7 @@ public void testTableSplitSwitch() throws Exception { @Test public void testTableSplitSwitchForPreSplittedTable() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // create a table with split disabled TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) @@ -112,7 +111,7 @@ public void testTableSplitSwitchForPreSplittedTable() throws Exception { @Test public void testTableMergeSwitch() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).setMergeEnabled(false).build(); @@ -135,7 +134,7 @@ public void testTableMergeSwitch() throws Exception { @Test public void testTableMergeSwitchForPreSplittedTable() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).setMergeEnabled(false).build(); @@ -166,7 +165,7 @@ private void trySplitAndEnsureItFails(final TableName tableName) throws Exceptio // expected to reach here // check and ensure that table does not get splitted assertTrue(admin.getRegions(tableName).size() == originalCount); - assertTrue("Expected DoNotRetryIOException!", ee.getCause() instanceof DoNotRetryIOException); + assertTrue(ee.getCause() instanceof DoNotRetryIOException, "Expected DoNotRetryIOException!"); } } @@ -226,7 +225,7 @@ private void tryMergeAndEnsureItFails(final TableName tableName) throws Exceptio // expected to reach here // check and ensure that region do not get merged assertTrue(admin.getRegions(tableName).size() == originalCount); - assertTrue("Expected DoNotRetryIOException!", ee.getCause() instanceof DoNotRetryIOException); + assertTrue(ee.getCause() instanceof DoNotRetryIOException, "Expected DoNotRetryIOException!"); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java index cd9ff64f55d1..247cc0ea79eb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java @@ -17,10 +17,10 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.List; @@ -28,7 +28,6 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -43,41 +42,36 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; - -@Category({ MediumTests.class, ClientTests.class }) +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; + +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestSplitOrMergeStatus { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSplitOrMergeStatus.class); - private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static byte[] FAMILY = Bytes.toBytes("testFamily"); - @Rule - public TestName name = new TestName(); + private String methodName; - @Before - public void setUp() throws Exception { + @BeforeEach + public void setUp(TestInfo info) throws Exception { + methodName = info.getTestMethod().get().getName(); TEST_UTIL.startMiniCluster(2); } - @After + @AfterEach public void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @Test public void testSplitSwitch() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); Table t = TEST_UTIL.createTable(tableName, FAMILY); TEST_UTIL.loadTable(t, FAMILY, false); @@ -105,10 +99,10 @@ public void testSplitSwitch() throws Exception { admin.close(); } - @Ignore + @Disabled @Test public void testMergeSwitch() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); Table t = TEST_UTIL.createTable(tableName, FAMILY); TEST_UTIL.loadTable(t, FAMILY, false); @@ -120,8 +114,8 @@ public void testMergeSwitch() throws Exception { while ((postSplitCount = admin.getRegions(tableName).size()) == originalCount) { Threads.sleep(1); } - assertTrue("originalCount=" + originalCount + ", newCount=" + postSplitCount, - originalCount != postSplitCount); + assertTrue(originalCount != postSplitCount, + "originalCount=" + originalCount + ", newCount=" + postSplitCount); // Merge switch is off so merge should NOT succeed. boolean result = admin.mergeSwitch(false, false); @@ -137,7 +131,7 @@ public void testMergeSwitch() throws Exception { // Expected. } int count = admin.getRegions(tableName).size(); - assertTrue("newCount=" + postSplitCount + ", count=" + count, postSplitCount == count); + assertTrue(postSplitCount == count, "newCount=" + postSplitCount + ", count=" + count); result = admin.mergeSwitch(true, false); regions = admin.getRegions(t.getName()); @@ -165,7 +159,7 @@ public void testMultiSwitches() throws IOException { public void testSplitRegionReplicaRitRecovery() throws Exception { int startRowNum = 11; int rowCount = 60; - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); final ProcedureExecutor procExec = getMasterProcedureExecutor(); TEST_UTIL.getAdmin().createTable(TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).setRegionReplication(2).build()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java index 12fd58643797..5d222a95a1fb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java @@ -20,11 +20,11 @@ import static org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position.PRIMARY; import static org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position.SECONDARY; import static org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position.TERTIARY; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.net.InetSocketAddress; @@ -32,7 +32,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; @@ -52,27 +51,22 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.Threads; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; -@Category({ ClientTests.class, MediumTests.class }) +@Tag(ClientTests.TAG) +@Tag(MediumTests.TAG) public class TestTableFavoredNodes { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableFavoredNodes.class); - private static final Logger LOG = LoggerFactory.getLogger(TestTableFavoredNodes.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -82,12 +76,15 @@ public class TestTableFavoredNodes { private Admin admin; private final byte[][] splitKeys = new byte[][] { Bytes.toBytes(1), Bytes.toBytes(9) }; - private final int NUM_REGIONS = splitKeys.length + 1; - @Rule - public TestName name = new TestName(); + private String methodName; + + @BeforeEach + public void setupMethodName(TestInfo testInfo) { + this.methodName = testInfo.getTestMethod().get().getName(); + } - @BeforeClass + @BeforeAll public static void setupBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); // Setting FavoredNodeBalancer will enable favored nodes @@ -99,13 +96,13 @@ public static void setupBeforeClass() throws Exception { TEST_UTIL.getMiniHBaseCluster().waitForActiveAndReadyMaster(WAIT_TIMEOUT); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); TEST_UTIL.cleanupTestDir(); } - @Before + @BeforeEach public void setup() throws IOException { fnm = TEST_UTIL.getMiniHBaseCluster().getMaster().getFavoredNodesManager(); admin = TEST_UTIL.getAdmin(); @@ -118,7 +115,7 @@ public void setup() throws IOException { */ @Test public void testCreateTable() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TEST_UTIL.createTable(tableName, Bytes.toBytes("f"), splitKeys); TEST_UTIL.waitUntilAllRegionsAssigned(tableName); @@ -137,7 +134,7 @@ public void testCreateTable() throws Exception { */ @Test public void testTruncateTable() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TEST_UTIL.createTable(tableName, Bytes.toBytes("f"), splitKeys); TEST_UTIL.waitUntilAllRegionsAssigned(tableName); @@ -162,7 +159,7 @@ public void testTruncateTable() throws Exception { */ @Test public void testSplitTable() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); Table t = TEST_UTIL.createTable(tableName, Bytes.toBytes("f"), splitKeys); TEST_UTIL.waitUntilAllRegionsAssigned(tableName); final int numberOfRegions = admin.getRegions(t.getName()).size(); @@ -173,7 +170,7 @@ public void testSplitTable() throws Exception { RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); RegionInfo parent = locator.getRegionLocation(splitPoint).getRegion(); List parentFN = fnm.getFavoredNodes(parent); - assertNotNull("FN should not be null for region: " + parent, parentFN); + assertNotNull(parentFN, "FN should not be null for region: " + parent); LOG.info("SPLITTING TABLE"); admin.split(tableName, splitPoint); @@ -194,15 +191,15 @@ public void testSplitTable() throws Exception { checkIfDaughterInherits2FN(parentFN, daughter1FN); checkIfDaughterInherits2FN(parentFN, daughter2FN); - assertEquals("Daughter's PRIMARY FN should be PRIMARY of parent", - parentFN.get(PRIMARY.ordinal()), daughter1FN.get(PRIMARY.ordinal())); - assertEquals("Daughter's SECONDARY FN should be SECONDARY of parent", - parentFN.get(SECONDARY.ordinal()), daughter1FN.get(SECONDARY.ordinal())); + assertEquals(parentFN.get(PRIMARY.ordinal()), daughter1FN.get(PRIMARY.ordinal()), + "Daughter's PRIMARY FN should be PRIMARY of parent"); + assertEquals(parentFN.get(SECONDARY.ordinal()), daughter1FN.get(SECONDARY.ordinal()), + "Daughter's SECONDARY FN should be SECONDARY of parent"); - assertEquals("Daughter's PRIMARY FN should be PRIMARY of parent", - parentFN.get(PRIMARY.ordinal()), daughter2FN.get(PRIMARY.ordinal())); - assertEquals("Daughter's SECONDARY FN should be TERTIARY of parent", - parentFN.get(TERTIARY.ordinal()), daughter2FN.get(SECONDARY.ordinal())); + assertEquals(parentFN.get(PRIMARY.ordinal()), daughter2FN.get(PRIMARY.ordinal()), + "Daughter's PRIMARY FN should be PRIMARY of parent"); + assertEquals(parentFN.get(TERTIARY.ordinal()), daughter2FN.get(SECONDARY.ordinal()), + "Daughter's SECONDARY FN should be TERTIARY of parent"); // Major compact table and run catalog janitor. Parent's FN should be removed TEST_UTIL.getMiniHBaseCluster().compact(tableName, true); @@ -211,7 +208,7 @@ public void testSplitTable() throws Exception { ProcedureTestingUtility .waitAllProcedures(TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor()); // assertEquals("Parent region should have been cleaned", 1, admin.runCatalogScan()); - assertNull("Parent FN should be null", fnm.getFavoredNodes(parent)); + assertNull(fnm.getFavoredNodes(parent), "Parent FN should be null"); List regions = admin.getRegions(tableName); // Split and Table Disable interfere with each other around region replicas @@ -228,7 +225,7 @@ public void testSplitTable() throws Exception { */ @Test public void testMergeTable() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TEST_UTIL.createTable(tableName, Bytes.toBytes("f"), splitKeys); TEST_UTIL.waitUntilAllRegionsAssigned(tableName); @@ -255,17 +252,17 @@ public void testMergeTable() throws Exception { RegionInfo mergedRegion = locator.getRegionLocation(HConstants.EMPTY_START_ROW).getRegion(); List mergedFN = fnm.getFavoredNodes(mergedRegion); - assertArrayEquals("Merged region doesn't match regionA's FN", regionAFN.toArray(), - mergedFN.toArray()); + assertArrayEquals(regionAFN.toArray(), mergedFN.toArray(), + "Merged region doesn't match regionA's FN"); // Major compact table and run catalog janitor. Parent FN should be removed TEST_UTIL.getMiniHBaseCluster().compact(tableName, true); - assertEquals("Merge parents should have been cleaned", 1, admin.runCatalogJanitor()); + assertEquals(1, admin.runCatalogJanitor(), "Merge parents should have been cleaned"); // Catalog cleanup is async. Wait on procedure to finish up. ProcedureTestingUtility .waitAllProcedures(TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor()); - assertNull("Parent FN should be null", fnm.getFavoredNodes(regionA)); - assertNull("Parent FN should be null", fnm.getFavoredNodes(regionB)); + assertNull(fnm.getFavoredNodes(regionA), "Parent FN should be null"); + assertNull(fnm.getFavoredNodes(regionB), "Parent FN should be null"); List regions = admin.getRegions(tableName); @@ -277,7 +274,7 @@ public void testMergeTable() throws Exception { private void checkNoFNForDeletedTable(List regions) { for (RegionInfo region : regions) { LOG.info("Testing if FN data for " + region); - assertNull("FN not null for deleted table's region: " + region, fnm.getFavoredNodes(region)); + assertNull(fnm.getFavoredNodes(region), "FN not null for deleted table's region: " + region); } } @@ -305,33 +302,31 @@ private void checkIfFavoredNodeInformationIsCorrect(TableName tableName) throws List fnList = fnm.getFavoredNodes(regionInfo); // 1. Does each region have favored node? - assertNotNull("Favored nodes should not be null for region:" + regionInfo, fnList); + assertNotNull(fnList, "Favored nodes should not be null for region:" + regionInfo); // 2. Do we have the right number of favored nodes? Is start code -1? - assertEquals("Incorrect favored nodes for region:" + regionInfo + " fnlist: " + fnList, - FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, fnList.size()); + assertEquals(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, fnList.size(), + "Incorrect favored nodes for region:" + regionInfo + " fnlist: " + fnList); for (ServerName sn : fnList) { - assertEquals("FN should not have startCode, fnlist:" + fnList, -1, sn.getStartcode()); + assertEquals(-1, sn.getStartCode(), "FN should not have startCode, fnlist:" + fnList); } // 3. Check if the regionServers have all the FN updated and in sync with Master HRegionServer regionServer = snRSMap.get(regionLocation.getServerName()); - assertNotNull("RS should not be null for regionLocation: " + regionLocation, regionServer); + assertNotNull(regionServer, "RS should not be null for regionLocation: " + regionLocation); InetSocketAddress[] rsFavNodes = regionServer.getFavoredNodesForRegion(regionInfo.getEncodedName()); - assertNotNull( - "RS " + regionLocation.getServerName() + " does not have FN for region: " + regionInfo, - rsFavNodes); - assertEquals( + assertNotNull(rsFavNodes, + "RS " + regionLocation.getServerName() + " does not have FN for region: " + regionInfo); + assertEquals(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, rsFavNodes.length, "Incorrect FN for region:" + regionInfo.getEncodedName() + " on server:" - + regionLocation.getServerName(), - FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, rsFavNodes.length); + + regionLocation.getServerName()); // 4. Does DN port match all FN node list? for (ServerName sn : fnm.getFavoredNodesWithDNPort(regionInfo)) { - assertEquals("FN should not have startCode, fnlist:" + fnList, -1, sn.getStartcode()); - assertEquals("FN port should belong to DN port, fnlist:" + fnList, dnPort, sn.getPort()); + assertEquals(-1, sn.getStartCode(), "FN should not have startCode, fnlist:" + fnList); + assertEquals(dnPort, sn.getPort(), "FN port should belong to DN port, fnlist:" + fnList); } } } @@ -341,7 +336,7 @@ private void checkIfFavoredNodeInformationIsCorrect(TableName tableName) throws */ @Test public void testSystemTables() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TEST_UTIL.createTable(tableName, Bytes.toBytes("f"), splitKeys); TEST_UTIL.waitUntilAllRegionsAssigned(tableName); @@ -352,7 +347,7 @@ public void testSystemTables() throws Exception { .listTableNamesByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { List regions = admin.getRegions(sysTable); for (RegionInfo region : regions) { - assertNull("FN should be null for sys region", fnm.getFavoredNodes(region)); + assertNull(fnm.getFavoredNodes(region), "FN should be null for sys region"); } } @@ -372,9 +367,8 @@ private void checkIfDaughterInherits2FN(List parentFN, List tableDoNotRetry.get(new Get(Bytes.toBytes("row")).addColumn(CF, CQ))); } - @Test(expected = DoNotRetryIOException.class) + @Test public void testPutWithDoNotRetryIOException() throws Exception { - tableDoNotRetry.put(new Put(Bytes.toBytes("row")).addColumn(CF, CQ, Bytes.toBytes("value"))); + assertThrows(DoNotRetryIOException.class, () -> tableDoNotRetry + .put(new Put(Bytes.toBytes("row")).addColumn(CF, CQ, Bytes.toBytes("value")))); } - @Test(expected = DoNotRetryIOException.class) + @Test public void testDeleteWithDoNotRetryIOException() throws Exception { - tableDoNotRetry.delete(new Delete(Bytes.toBytes("row")).addColumn(CF, CQ)); + assertThrows(DoNotRetryIOException.class, + () -> tableDoNotRetry.delete(new Delete(Bytes.toBytes("row")).addColumn(CF, CQ))); } - @Test(expected = DoNotRetryIOException.class) + @Test public void testAppendWithDoNotRetryIOException() throws Exception { - tableDoNotRetry - .append(new Append(Bytes.toBytes("row")).addColumn(CF, CQ, Bytes.toBytes("value"))); + assertThrows(DoNotRetryIOException.class, () -> tableDoNotRetry + .append(new Append(Bytes.toBytes("row")).addColumn(CF, CQ, Bytes.toBytes("value")))); } - @Test(expected = DoNotRetryIOException.class) + @Test public void testIncrementWithDoNotRetryIOException() throws Exception { - tableDoNotRetry.increment(new Increment(Bytes.toBytes("row")).addColumn(CF, CQ, 1)); + assertThrows(DoNotRetryIOException.class, + () -> tableDoNotRetry.increment(new Increment(Bytes.toBytes("row")).addColumn(CF, CQ, 1))); } - @Test(expected = RetriesExhaustedException.class) + @Test public void testGetWithIOException() throws Exception { - tableRetry.get(new Get(Bytes.toBytes("row")).addColumn(CF, CQ)); + assertThrows(RetriesExhaustedException.class, + () -> tableRetry.get(new Get(Bytes.toBytes("row")).addColumn(CF, CQ))); } - @Test(expected = RetriesExhaustedException.class) + @Test public void testPutWithIOException() throws Exception { - tableRetry.put(new Put(Bytes.toBytes("row")).addColumn(CF, CQ, Bytes.toBytes("value"))); + assertThrows(RetriesExhaustedException.class, () -> tableRetry + .put(new Put(Bytes.toBytes("row")).addColumn(CF, CQ, Bytes.toBytes("value")))); } - @Test(expected = RetriesExhaustedException.class) + @Test public void testDeleteWithIOException() throws Exception { - tableRetry.delete(new Delete(Bytes.toBytes("row")).addColumn(CF, CQ)); + assertThrows(RetriesExhaustedException.class, + () -> tableRetry.delete(new Delete(Bytes.toBytes("row")).addColumn(CF, CQ))); } - @Test(expected = RetriesExhaustedException.class) + @Test public void testAppendWithIOException() throws Exception { - tableRetry.append(new Append(Bytes.toBytes("row")).addColumn(CF, CQ, Bytes.toBytes("value"))); + assertThrows(RetriesExhaustedException.class, () -> tableRetry + .append(new Append(Bytes.toBytes("row")).addColumn(CF, CQ, Bytes.toBytes("value")))); } - @Test(expected = RetriesExhaustedException.class) + @Test public void testIncrementWithIOException() throws Exception { - tableRetry.increment(new Increment(Bytes.toBytes("row")).addColumn(CF, CQ, 1)); + assertThrows(RetriesExhaustedException.class, + () -> tableRetry.increment(new Increment(Bytes.toBytes("row")).addColumn(CF, CQ, 1))); } public static class ThrowDoNotRetryIOExceptionCoprocessor diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java index 621ecc087785..4b835719ecef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java @@ -19,6 +19,11 @@ import static org.apache.hadoop.hbase.client.metrics.ScanMetrics.REGIONS_SCANNED_METRIC_NAME; import static org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.Arrays; @@ -31,7 +36,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.StartTestingClusterOption; import org.apache.hadoop.hbase.TableName; @@ -55,24 +59,18 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestTableSnapshotScanner { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableSnapshotScanner.class); - private static final Logger LOG = LoggerFactory.getLogger(TestTableSnapshotScanner.class); private final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static final int NUM_REGION_SERVERS = 2; @@ -84,8 +82,7 @@ public class TestTableSnapshotScanner { private Path rootDir; private boolean clusterUp; - @Rule - public TestName name = new TestName(); + private String methodName; public static void blockUntilSplitFinished(HBaseTestingUtil util, TableName tableName, int expectedRegionSize) throws Exception { @@ -98,8 +95,9 @@ public static void blockUntilSplitFinished(HBaseTestingUtil util, TableName tabl } } - @Before - public void setupCluster() throws Exception { + @BeforeEach + public void setupCluster(TestInfo testInfo) throws Exception { + methodName = testInfo.getTestMethod().get().getName(); setupConf(UTIL.getConfiguration()); StartTestingClusterOption option = StartTestingClusterOption.builder().numRegionServers(NUM_REGION_SERVERS) @@ -110,7 +108,7 @@ public void setupCluster() throws Exception { fs = rootDir.getFileSystem(UTIL.getConfiguration()); } - @After + @AfterEach public void tearDownCluster() throws Exception { if (clusterUp) { UTIL.shutdownMiniCluster(); @@ -208,7 +206,7 @@ public void testNoDuplicateResultsWhenSplitting() throws Exception { @Test public void testScanLimit() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); final String snapshotName = tableName + "Snapshot"; TableSnapshotScanner scanner = null; try { @@ -225,7 +223,7 @@ public void testScanLimit() throws Exception { } count++; } - Assert.assertEquals(100, count); + assertEquals(100, count); } finally { if (scanner != null) { scanner.close(); @@ -252,8 +250,8 @@ public void testWithOfflineHBaseMultiRegion() throws Exception { private ScanMetrics createTableSnapshotScannerAndGetScanMetrics(boolean enableScanMetrics, boolean enableScanMetricsByRegion, byte[] endKey) throws Exception { - TableName tableName = TableName.valueOf(name.getMethodName() + "_TABLE"); - String snapshotName = name.getMethodName() + "_SNAPSHOT"; + TableName tableName = TableName.valueOf(methodName + "_TABLE"); + String snapshotName = methodName + "_SNAPSHOT"; try { createTableAndSnapshot(UTIL, tableName, snapshotName, 50); Path restoreDir = UTIL.getDataTestDirOnTestFS(snapshotName); @@ -275,13 +273,13 @@ private ScanMetrics createTableSnapshotScannerAndGetScanMetrics(boolean enableSc @Test public void testScanMetricsDisabled() throws Exception { ScanMetrics scanMetrics = createTableSnapshotScannerAndGetScanMetrics(false, false, yyy); - Assert.assertNull(scanMetrics); + assertNull(scanMetrics); } @Test public void testScanMetricsWithScanMetricsByRegionDisabled() throws Exception { ScanMetrics scanMetrics = createTableSnapshotScannerAndGetScanMetrics(true, false, yyy); - Assert.assertNotNull(scanMetrics); + assertNotNull(scanMetrics); int rowsScanned = 0; for (byte[] row : HBaseTestingUtil.ROWS) { if (Bytes.compareTo(row, bbb) >= 0 && Bytes.compareTo(row, yyy) < 0) { @@ -289,7 +287,7 @@ public void testScanMetricsWithScanMetricsByRegionDisabled() throws Exception { } } Map metricsMap = scanMetrics.getMetricsMap(); - Assert.assertEquals(rowsScanned, (long) metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); + assertEquals(rowsScanned, (long) metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); } @Test @@ -297,34 +295,34 @@ public void testScanMetricsByRegionForSingleRegion() throws Exception { // Scan single row with row key bbb byte[] bbc = Bytes.toBytes("bbc"); ScanMetrics scanMetrics = createTableSnapshotScannerAndGetScanMetrics(true, true, bbc); - Assert.assertNotNull(scanMetrics); + assertNotNull(scanMetrics); Map> scanMetricsByRegion = scanMetrics.collectMetricsByRegion(); - Assert.assertEquals(1, scanMetricsByRegion.size()); + assertEquals(1, scanMetricsByRegion.size()); for (Map.Entry> entry : scanMetricsByRegion .entrySet()) { ScanMetricsRegionInfo scanMetricsRegionInfo = entry.getKey(); Map metricsMap = entry.getValue(); - Assert.assertNull(scanMetricsRegionInfo.getServerName()); - Assert.assertNotNull(scanMetricsRegionInfo.getEncodedRegionName()); - Assert.assertEquals(1, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); - Assert.assertEquals(1, (long) metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); + assertNull(scanMetricsRegionInfo.getServerName()); + assertNotNull(scanMetricsRegionInfo.getEncodedRegionName()); + assertEquals(1, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); + assertEquals(1, (long) metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); } } @Test public void testScanMetricsByRegionForMultiRegion() throws Exception { ScanMetrics scanMetrics = createTableSnapshotScannerAndGetScanMetrics(true, true, yyy); - Assert.assertNotNull(scanMetrics); + assertNotNull(scanMetrics); Map> scanMetricsByRegion = scanMetrics.collectMetricsByRegion(); for (Map.Entry> entry : scanMetricsByRegion .entrySet()) { ScanMetricsRegionInfo scanMetricsRegionInfo = entry.getKey(); Map metricsMap = entry.getValue(); - Assert.assertNull(scanMetricsRegionInfo.getServerName()); - Assert.assertNotNull(scanMetricsRegionInfo.getEncodedRegionName()); - Assert.assertEquals(1, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); + assertNull(scanMetricsRegionInfo.getServerName()); + assertNotNull(scanMetricsRegionInfo.getEncodedRegionName()); + assertEquals(1, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); } } @@ -425,14 +423,14 @@ private static void verifyRow(Result result) throws IOException { Cell cell = scanner.current(); // assert that all Cells in the Result have the same key - Assert.assertEquals(0, Bytes.compareTo(row, 0, row.length, cell.getRowArray(), - cell.getRowOffset(), cell.getRowLength())); + assertEquals(0, Bytes.compareTo(row, 0, row.length, cell.getRowArray(), cell.getRowOffset(), + cell.getRowLength())); } for (int j = 0; j < FAMILIES.length; j++) { byte[] actual = result.getValue(FAMILIES[j], FAMILIES[j]); - Assert.assertArrayEquals("Row in snapshot does not match, expected:" + Bytes.toString(row) - + " ,actual:" + Bytes.toString(actual), row, actual); + assertArrayEquals(row, actual, "Row in snapshot does not match, expected:" + + Bytes.toString(row) + " ,actual:" + Bytes.toString(actual)); } } @@ -449,7 +447,7 @@ public void testMergeRegion() throws Exception { // create table with 3 regions Table table = UTIL.createTable(tableName, FAMILIES, 1, bbb, yyy, 3); List regions = admin.getRegions(tableName); - Assert.assertEquals(3, regions.size()); + assertEquals(3, regions.size()); RegionInfo region0 = regions.get(0); RegionInfo region1 = regions.get(1); RegionInfo region2 = regions.get(2); @@ -489,7 +487,7 @@ public void testMergeRegion() throws Exception { : mergedRegions.get(0); // snapshot admin.snapshot(snapshotName, tableName); - Assert.assertEquals(1, admin.listSnapshots().size()); + assertEquals(1, admin.listSnapshots().size()); // major compact admin.compactionSwitch(true, serverList); admin.majorCompactRegion(mergedRegion.getRegionName()); @@ -558,13 +556,13 @@ public void testMergeRegion() throws Exception { } } catch (Exception e) { LOG.error("scan snapshot error", e); - Assert.fail("Should not throw Exception: " + e.getMessage()); + fail("Should not throw Exception: " + e.getMessage()); } } @Test public void testDeleteTableWithMergedRegions() throws Exception { - final TableName tableName = TableName.valueOf(this.name.getMethodName()); + final TableName tableName = TableName.valueOf(this.methodName); String snapshotName = tableName.getNameAsString() + "_snapshot"; Configuration conf = UTIL.getConfiguration(); try (Admin admin = UTIL.getConnection().getAdmin()) { @@ -574,14 +572,14 @@ public void testDeleteTableWithMergedRegions() throws Exception { // create table Table table = UTIL.createTable(tableName, FAMILIES, 1, bbb, yyy, 3); List regions = admin.getRegions(tableName); - Assert.assertEquals(3, regions.size()); + assertEquals(3, regions.size()); // write some data UTIL.loadTable(table, FAMILIES); // merge region admin.mergeRegionsAsync(new byte[][] { regions.get(0).getEncodedNameAsBytes(), regions.get(1).getEncodedNameAsBytes() }, false).get(); regions = admin.getRegions(tableName); - Assert.assertEquals(2, regions.size()); + assertEquals(2, regions.size()); // snapshot admin.snapshot(snapshotName, tableName); // verify snapshot diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScannerWithSFT.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScannerWithSFT.java index 5601b0e4e3d2..0d5a9f9c997c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScannerWithSFT.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScannerWithSFT.java @@ -18,20 +18,15 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; -@Category({ LargeTests.class, ClientTests.class }) +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) public class TestTableSnapshotScannerWithSFT extends TestTableSnapshotScanner { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableSnapshotScannerWithSFT.class); - @Override protected void setupConf(Configuration conf) { super.setupConf(conf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java index 187bae4ae65f..e63a0a9d683c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -26,7 +26,6 @@ import java.util.List; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.filter.Filter; @@ -34,44 +33,37 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; /** * Run tests related to {@link TimestampsFilter} using HBase client APIs. Sets up the HBase mini * cluster once at start. Each creates a table named for the method and does its stuff against that. */ -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestTimestampsFilter { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTimestampsFilter.class); - private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - @Rule - public TestName name = new TestName(); + private String methodName; - /** - * @throws java.lang.Exception - */ - @BeforeClass + @BeforeEach + public void setUpMethodName(TestInfo testInfo) { + this.methodName = testInfo.getTestMethod().get().getName(); + } + + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(); } - /** - * @throws java.lang.Exception - */ - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @@ -79,7 +71,7 @@ public static void tearDownAfterClass() throws Exception { /** * @throws java.lang.Exception */ - @Before + @BeforeEach public void setUp() throws Exception { // Nothing to do. } @@ -87,7 +79,7 @@ public void setUp() throws Exception { /** * @throws java.lang.Exception */ - @After + @AfterEach public void tearDown() throws Exception { // Nothing to do. } @@ -99,7 +91,7 @@ public void tearDown() throws Exception { */ @Test public void testTimestampsFilter() throws Exception { - final byte[] TABLE = Bytes.toBytes(name.getMethodName()); + final byte[] TABLE = Bytes.toBytes(methodName); byte[] FAMILY = Bytes.toBytes("event_log"); byte[][] FAMILIES = new byte[][] { FAMILY }; Cell kvs[]; @@ -154,12 +146,12 @@ public void testTimestampsFilter() throws Exception { // Scan rows 0..4. For each row, get all its columns, but only // those versions of the columns with the specified timestamps. Result[] results = scanNVersions(ht, FAMILY, 0, 4, Arrays.asList(6L, 106L, 306L)); - assertEquals("# of rows returned from scan", 5, results.length); + assertEquals(5, results.length, "# of rows returned from scan"); for (int rowIdx = 0; rowIdx < 5; rowIdx++) { kvs = results[rowIdx].rawCells(); // each row should have 5 columns. // And we have requested 3 versions for each. - assertEquals("Number of KeyValues in result for row:" + rowIdx, 3 * 5, kvs.length); + assertEquals(3 * 5, kvs.length, "Number of KeyValues in result for row:" + rowIdx); for (int colIdx = 0; colIdx < 5; colIdx++) { int offset = colIdx * 3; checkOneCell(kvs[offset + 0], FAMILY, rowIdx, colIdx, 306); @@ -172,7 +164,7 @@ public void testTimestampsFilter() throws Exception { @Test public void testMultiColumns() throws Exception { - final byte[] TABLE = Bytes.toBytes(name.getMethodName()); + final byte[] TABLE = Bytes.toBytes(methodName); byte[] FAMILY = Bytes.toBytes("event_log"); byte[][] FAMILIES = new byte[][] { FAMILY }; @@ -229,8 +221,7 @@ public void testWithVersionDeletes() throws Exception { } private void testWithVersionDeletes(boolean flushTables) throws IOException { - final byte[] TABLE = - Bytes.toBytes(name.getMethodName() + "_" + (flushTables ? "flush" : "noflush")); + final byte[] TABLE = Bytes.toBytes(methodName + "_" + (flushTables ? "flush" : "noflush")); byte[] FAMILY = Bytes.toBytes("event_log"); byte[][] FAMILIES = new byte[][] { FAMILY }; @@ -291,19 +282,19 @@ private void checkOneCell(Cell kv, byte[] cf, int rowIdx, int colIdx, long ts) { String ctx = "rowIdx=" + rowIdx + "; colIdx=" + colIdx + "; ts=" + ts; - assertEquals("Row mismatch which checking: " + ctx, "row:" + rowIdx, - Bytes.toString(CellUtil.cloneRow(kv))); + assertEquals("row:" + rowIdx, Bytes.toString(CellUtil.cloneRow(kv)), + "Row mismatch which checking: " + ctx); - assertEquals("ColumnFamily mismatch while checking: " + ctx, Bytes.toString(cf), - Bytes.toString(CellUtil.cloneFamily(kv))); + assertEquals(Bytes.toString(cf), Bytes.toString(CellUtil.cloneFamily(kv)), + "ColumnFamily mismatch while checking: " + ctx); - assertEquals("Column qualifier mismatch while checking: " + ctx, "column:" + colIdx, - Bytes.toString(CellUtil.cloneQualifier(kv))); + assertEquals("column:" + colIdx, Bytes.toString(CellUtil.cloneQualifier(kv)), + "Column qualifier mismatch while checking: " + ctx); - assertEquals("Timestamp mismatch while checking: " + ctx, ts, kv.getTimestamp()); + assertEquals(ts, kv.getTimestamp(), "Timestamp mismatch while checking: " + ctx); - assertEquals("Value mismatch while checking: " + ctx, "value-version-" + ts, - Bytes.toString(CellUtil.cloneValue(kv))); + assertEquals("value-version-" + ts, Bytes.toString(CellUtil.cloneValue(kv)), + "Value mismatch while checking: " + ctx); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java index fd708e1bc805..30a4cb393049 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java @@ -17,35 +17,29 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartTestingClusterOption; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ MediumTests.class }) +@Tag(MediumTests.TAG) public class TestUpdateConfiguration extends AbstractTestUpdateConfiguration { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestUpdateConfiguration.class); - private static final Logger LOG = LoggerFactory.getLogger(TestUpdateConfiguration.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - @BeforeClass + @BeforeAll public static void setup() throws Exception { setUpConfigurationFiles(TEST_UTIL); StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(2).build(); @@ -53,12 +47,16 @@ public static void setup() throws Exception { addResourceToRegionServerConfiguration(TEST_UTIL); } - @Rule - public TestName name = new TestName(); + private String methodName; + + @BeforeEach + public void setUpMethodName(TestInfo testInfo) { + this.methodName = testInfo.getTestMethod().get().getName(); + } @Test public void testOnlineConfigChange() throws IOException { - LOG.debug("Starting the test {}", name.getMethodName()); + LOG.debug("Starting the test {}", methodName); Admin admin = TEST_UTIL.getAdmin(); ServerName server = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); admin.updateConfiguration(server); @@ -66,7 +64,7 @@ public void testOnlineConfigChange() throws IOException { @Test public void testMasterOnlineConfigChange() throws IOException { - LOG.debug("Starting the test {}", name.getMethodName()); + LOG.debug("Starting the test {}", methodName); replaceHBaseSiteXML(); Admin admin = TEST_UTIL.getAdmin(); ServerName server = TEST_UTIL.getHBaseCluster().getMaster().getServerName(); @@ -79,14 +77,14 @@ public void testMasterOnlineConfigChange() throws IOException { @Test public void testAllOnlineConfigChange() throws IOException { - LOG.debug("Starting the test {} ", name.getMethodName()); + LOG.debug("Starting the test {} ", methodName); Admin admin = TEST_UTIL.getAdmin(); admin.updateConfiguration(); } @Test public void testAllCustomOnlineConfigChange() throws IOException { - LOG.debug("Starting the test {}", name.getMethodName()); + LOG.debug("Starting the test {}", methodName); replaceHBaseSiteXML(); Admin admin = TEST_UTIL.getAdmin(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java index 6d585245e959..5734726d9dd3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java @@ -19,17 +19,16 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNotSame; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotSame; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.concurrent.ExecutionException; import java.util.stream.IntStream; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; @@ -39,36 +38,32 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestZKConnectionRegistry { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestZKConnectionRegistry.class); - static final Logger LOG = LoggerFactory.getLogger(TestZKConnectionRegistry.class); static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static ZKConnectionRegistry REGISTRY; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(3); HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); REGISTRY = new ZKConnectionRegistry(TEST_UTIL.getConfiguration(), null); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { Closeables.close(REGISTRY, true); TEST_UTIL.shutdownMiniCluster(); @@ -79,8 +74,8 @@ public void test() throws InterruptedException, ExecutionException, IOException LOG.info("STARTED TEST"); String clusterId = REGISTRY.getClusterId().get(); String expectedClusterId = TEST_UTIL.getHBaseCluster().getMaster().getClusterId(); - assertEquals("Expected " + expectedClusterId + ", found=" + clusterId, expectedClusterId, - clusterId); + assertEquals(expectedClusterId, clusterId, + "Expected " + expectedClusterId + ", found=" + clusterId); assertEquals(TEST_UTIL.getHBaseCluster().getMaster().getServerName(), REGISTRY.getActiveMaster().get()); RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); @@ -88,7 +83,7 @@ public void test() throws InterruptedException, ExecutionException, IOException assertEquals(3, locs.getRegionLocations().length); IntStream.range(0, 3).forEach(i -> { HRegionLocation loc = locs.getRegionLocation(i); - assertNotNull("Replica " + i + " doesn't have location", loc); + assertNotNull(loc, "Replica " + i + " doesn't have location"); assertEquals(TableName.META_TABLE_NAME, loc.getRegion().getTable()); assertEquals(i, loc.getRegion().getReplicaId()); }); @@ -101,11 +96,11 @@ public void testIndependentZKConnections() throws IOException { otherConf.set(HConstants.ZOOKEEPER_QUORUM, MiniZooKeeperCluster.HOST); try (ZKConnectionRegistry otherRegistry = new ZKConnectionRegistry(otherConf, null)) { ReadOnlyZKClient zk2 = otherRegistry.getZKClient(); - assertNotSame("Using a different configuration / quorum should result in different " - + "backing zk connection.", zk1, zk2); - assertNotEquals( - "Using a different configrution / quorum should be reflected in the zk connection.", - zk1.getConnectString(), zk2.getConnectString()); + assertNotSame(zk1, zk2, + "Using a different configuration / quorum should result in different " + + "backing zk connection."); + assertNotEquals(zk1.getConnectString(), zk2.getConnectString(), + "Using a different configrution / quorum should be reflected in the zk connection."); } } finally { LOG.info("DONE!"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java index d205b44f7491..da5a05e3b57e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.client.locking; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.isA; @@ -32,7 +32,6 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.client.PerClientRandomNonceGenerator; @@ -40,10 +39,9 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; import org.mockito.Mockito; import org.slf4j.Logger; @@ -58,13 +56,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType; -@Category({ ClientTests.class, SmallTests.class }) +@Tag(ClientTests.TAG) +@Tag(SmallTests.TAG) public class TestEntityLocks { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestEntityLocks.class); - private static final Logger LOG = LoggerFactory.getLogger(TestEntityLocks.class); private final Configuration conf = HBaseConfiguration.create(); @@ -90,7 +85,7 @@ LockServiceClient getAdmin() throws Exception { return new LockServiceClient(conf, master, PerClientRandomNonceGenerator.get()); } - @Before + @BeforeEach public void setUp() throws Exception { admin = getAdmin(); lockReqArgCaptor = ArgumentCaptor.forClass(LockRequest.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestBadReplicationPeer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestBadReplicationPeer.java index 04093355af35..ea5730167a7e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestBadReplicationPeer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestBadReplicationPeer.java @@ -19,7 +19,6 @@ import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Admin; @@ -29,29 +28,21 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfigBuilder; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestBadReplicationPeer { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBadReplicationPeer.class); private static final Logger LOG = LoggerFactory.getLogger(TestBadReplicationPeer.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static Configuration conf; - @Rule - public TestName name = new TestName(); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); TEST_UTIL.getConfiguration().setBoolean("replication.source.regionserver.abort", false); @@ -59,7 +50,7 @@ public static void setUpBeforeClass() throws Exception { conf = TEST_UTIL.getConfiguration(); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminForSyncReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminForSyncReplication.java index daa29908251a..388b1c75d263 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminForSyncReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminForSyncReplication.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.client.replication; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.util.ArrayList; @@ -25,7 +25,6 @@ import java.util.List; import java.util.Map; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -35,21 +34,17 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestReplicationAdminForSyncReplication { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationAdminForSyncReplication.class); - private static final Logger LOG = LoggerFactory.getLogger(TestReplicationAdminForSyncReplication.class); @@ -57,14 +52,14 @@ public class TestReplicationAdminForSyncReplication { private static Admin hbaseAdmin; - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); TEST_UTIL.startMiniCluster(); hbaseAdmin = TEST_UTIL.getAdmin(); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { hbaseAdmin.close(); TEST_UTIL.shutdownMiniCluster(); @@ -104,7 +99,7 @@ public void testAddPeerWithSameTable() throws Exception { successCount++; } } - assertEquals("Only one peer can be added successfully", 1, successCount); + assertEquals(1, successCount, "Only one peer can be added successfully"); } private ReplicationPeerConfig buildSyncReplicationPeerConfig(String clusterKey, From e0cc08fbab247f3affd7da4462c00285e091b36b Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 8 May 2026 11:53:23 +0800 Subject: [PATCH 2/2] remove junit4 import --- .../java/org/apache/hadoop/hbase/client/TestConnection.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnection.java index 91eef7354814..183cd4892d16 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnection.java @@ -17,10 +17,10 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertSame; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue;