diff --git a/hbase-mapreduce/pom.xml b/hbase-mapreduce/pom.xml index 9854ccf98330..8b2063e64b8d 100644 --- a/hbase-mapreduce/pom.xml +++ b/hbase-mapreduce/pom.xml @@ -161,7 +161,7 @@ org.mockito - mockito-core + mockito-junit-jupiter test @@ -191,11 +191,6 @@ junit-jupiter-params test - - org.junit.vintage - junit-vintage-engine - test - org.slf4j jcl-over-slf4j diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java index 6c49a43bf463..8b8d240088da 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java @@ -20,21 +20,17 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.util.ProgramDriver; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; -@Category({ MapReduceTests.class, SmallTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(SmallTests.TAG) public class TestDriver { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestDriver.class); - @Test public void testDriverMainMethod() throws Throwable { ProgramDriver programDriverMock = mock(ProgramDriver.class); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java index 2912fd4d025c..5f1a05684456 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java @@ -17,8 +17,9 @@ */ package org.apache.hadoop.hbase.mapred; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertNull; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -32,7 +33,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -42,20 +42,15 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; -import org.junit.Assert; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; -@Category({ MapReduceTests.class, SmallTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(SmallTests.TAG) public class TestGroupingTableMap { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGroupingTableMap.class); - @Test @SuppressWarnings("unchecked") public void shouldNotCallCollectonSinceFindUniqueKeyValueMoreThanOnes() throws Exception { @@ -156,7 +151,7 @@ public void collect(ImmutableBytesWritable arg, Result result) throws IOExceptio gTableMap.map(null, result, outputCollector, reporter); verify(result).listCells(); - Assert.assertTrue("Output not received", outputCollected.get()); + assertTrue(outputCollected.get(), "Output not received"); final byte[] firstPartValue = Bytes.toBytes("238947928"); final byte[] secondPartValue = Bytes.toBytes("4678456942345"); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java index 96e25b51f659..14df668d79a6 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java @@ -22,25 +22,20 @@ import static org.mockito.Mockito.verify; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; -@Category({ MapReduceTests.class, SmallTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(SmallTests.TAG) public class TestIdentityTableMap { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIdentityTableMap.class); - @Test @SuppressWarnings({ "deprecation", "unchecked" }) public void shouldCollectPredefinedTimes() throws IOException { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java index c042bd35a56d..065acfafc0dc 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java @@ -17,14 +17,13 @@ */ package org.apache.hadoop.hbase.mapred; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.Iterator; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -37,21 +36,17 @@ import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.RunningJob; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -@Category({ VerySlowMapReduceTests.class, LargeTests.class }) +@Tag(VerySlowMapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestMultiTableSnapshotInputFormat extends org.apache.hadoop.hbase.mapreduce.TestMultiTableSnapshotInputFormat { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiTableSnapshotInputFormat.class); - private static final Logger LOG = LoggerFactory.getLogger(TestMultiTableSnapshotInputFormat.class); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java index 0f71055c6a72..51bae4b52139 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.mapred; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.mock; @@ -28,7 +28,6 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -38,20 +37,16 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.hbase.thirdparty.com.google.common.base.Joiner; -@Category({ MapReduceTests.class, MediumTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(MediumTests.TAG) public class TestRowCounter { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRowCounter.class); - @Test @SuppressWarnings("deprecation") public void shouldPrintUsage() throws Exception { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java index 3e093430a92e..60ba178eea58 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java @@ -17,31 +17,21 @@ */ package org.apache.hadoop.hbase.mapred; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.Assert; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; - -@Category({ MapReduceTests.class, SmallTests.class }) -public class TestSplitTable { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSplitTable.class); +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; - @Rule - public TestName name = new TestName(); +@Tag(MapReduceTests.TAG) +@Tag(SmallTests.TAG) +public class TestSplitTable { @Test @SuppressWarnings({ "deprecation", "SelfComparison" }) @@ -104,16 +94,16 @@ public void testSplitTableEquals() { @Test @SuppressWarnings("deprecation") - public void testToString() { - TableSplit split = new TableSplit(TableName.valueOf(name.getMethodName()), + public void testToString(TestInfo testInfo) { + TableSplit split = new TableSplit(TableName.valueOf(testInfo.getTestMethod().get().getName()), Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location"); - String str = "HBase table split(table name: " + name.getMethodName() + String str = "HBase table split(table name: " + testInfo.getTestMethod().get().getName() + ", start row: row-start, " + "end row: row-end, region location: location)"; - Assert.assertEquals(str, split.toString()); + assertEquals(str, split.toString()); split = new TableSplit((TableName) null, null, null, null); str = "HBase table split(table name: null, start row: null, " + "end row: null, region location: null)"; - Assert.assertEquals(str, split.toString()); + assertEquals(str, split.toString()); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java index d15d3a574640..8358b3472a7e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java @@ -17,9 +17,10 @@ */ package org.apache.hadoop.hbase.mapred; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; @@ -33,7 +34,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.NotServingRegionException; @@ -61,12 +61,11 @@ import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.RunningJob; import org.apache.hadoop.mapred.lib.NullOutputFormat; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import org.slf4j.Logger; @@ -75,13 +74,10 @@ /** * This tests the TableInputFormat and its recovery semantics */ -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestTableInputFormat { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormat.class); - private static final Logger LOG = LoggerFactory.getLogger(TestTableInputFormat.class); private final static HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -90,17 +86,17 @@ public class TestTableInputFormat { private static final byte[][] columns = new byte[][] { FAMILY }; - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { UTIL.startMiniCluster(); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { UTIL.shutdownMiniCluster(); } - @Before + @BeforeEach public void before() throws IOException { LOG.info("before"); UTIL.ensureSomeRegionServersAvailable(1); @@ -265,10 +261,10 @@ public void testTableRecordReaderScannerFail() throws IOException { /** * Run test assuming Scanner IOException failure using mapred api, */ - @Test(expected = IOException.class) + @Test public void testTableRecordReaderScannerFailTwice() throws IOException { Table htable = createIOEScannerTable(Bytes.toBytes("table3"), 2); - runTestMapred(htable); + assertThrows(IOException.class, () -> runTestMapred(htable)); } /** @@ -285,10 +281,11 @@ public void testTableRecordReaderScannerTimeout() throws IOException { * Run test assuming NotServingRegionException using mapred api. * @throws org.apache.hadoop.hbase.DoNotRetryIOException */ - @Test(expected = org.apache.hadoop.hbase.NotServingRegionException.class) + @Test public void testTableRecordReaderScannerTimeoutTwice() throws IOException { Table htable = createDNRIOEScannerTable(Bytes.toBytes("table5"), 2); - runTestMapred(htable); + assertThrows(org.apache.hadoop.hbase.NotServingRegionException.class, + () -> runTestMapred(htable)); } /** @@ -329,19 +326,25 @@ void testInputFormat(Class clazz) throws IOException { job.setNumReduceTasks(0); LOG.debug("submitting job."); final RunningJob run = JobClient.runJob(job); - assertTrue("job failed!", run.isSuccessful()); - assertEquals("Saw the wrong number of instances of the filtered-for row.", 2, run.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getCounter()); - assertEquals("Saw any instances of the filtered out row.", 0, run.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getCounter()); - assertEquals("Saw the wrong number of instances of columnA.", 1, run.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getCounter()); - assertEquals("Saw the wrong number of instances of columnB.", 1, run.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getCounter()); - assertEquals("Saw the wrong count of values for the filtered-for row.", 2, run.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getCounter()); - assertEquals("Saw the wrong count of values for the filtered-out row.", 0, run.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getCounter()); + assertTrue(run.isSuccessful(), "job failed!"); + assertEquals(2, run.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getCounter(), + "Saw the wrong number of instances of the filtered-for row."); + assertEquals(0, run.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getCounter(), + "Saw any instances of the filtered out row."); + assertEquals(1, run.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getCounter(), + "Saw the wrong number of instances of columnA."); + assertEquals(1, run.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getCounter(), + "Saw the wrong number of instances of columnB."); + assertEquals(2, run.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getCounter(), + "Saw the wrong count of values for the filtered-for row."); + assertEquals(0, run.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getCounter(), + "Saw the wrong count of values for the filtered-out row."); } public static class ExampleVerifier implements TableMap { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java index 2820d9111277..92b6301d0b4d 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java @@ -17,12 +17,11 @@ */ package org.apache.hadoop.hbase.mapred; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; import java.io.IOException; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; @@ -37,8 +36,7 @@ import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.RunningJob; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,14 +45,11 @@ * simple - take every row in the table, reverse the value of a particular cell, and write it back * to the table. */ -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) @SuppressWarnings("deprecation") public class TestTableMapReduce extends TestTableMapReduceBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableMapReduce.class); - private static final Logger LOG = LoggerFactory.getLogger(TestTableMapReduce.class.getName()); protected Logger getLog() { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java index 77ac55a1b6d0..1ec3df6152cd 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java @@ -17,8 +17,9 @@ */ package org.apache.hadoop.hbase.mapred; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; import java.io.IOException; @@ -28,7 +29,6 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Put; @@ -44,26 +44,21 @@ import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.RunningJob; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestTableMapReduceUtil { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableMapReduceUtil.class); - private static final Logger LOG = LoggerFactory.getLogger(TestTableMapReduceUtil.class); private static Table presidentsTable; @@ -88,18 +83,18 @@ public class TestTableMapReduceUtil { private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { UTIL.startMiniCluster(); presidentsTable = createAndFillTable(TableName.valueOf(TABLE_NAME)); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { UTIL.shutdownMiniCluster(); } - @Before + @BeforeEach public void before() throws IOException { LOG.info("before"); UTIL.ensureSomeRegionServersAvailable(1); @@ -136,7 +131,7 @@ private static void createPutCommand(Table table) throws IOException { */ @Test public void shouldNumberOfReduceTaskNotExceedNumberOfRegionsForGivenTable() throws IOException { - Assert.assertNotNull(presidentsTable); + assertNotNull(presidentsTable); Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); TableMapReduceUtil.setNumReduceTasks(TABLE_NAME, jobConf); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java index fec2c8cf0204..056058251499 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java @@ -17,22 +17,20 @@ */ package org.apache.hadoop.hbase.mapred; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RecordWriter; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -41,13 +39,9 @@ * we can have many instances and not leak connections. This test creates a few TableOutputFormats * and shouldn't fail due to ZK connection exhaustion. */ -@Category(MediumTests.class) +@Tag(MediumTests.TAG) public class TestTableOutputFormatConnectionExhaust { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableOutputFormatConnectionExhaust.class); - private static final Logger LOG = LoggerFactory.getLogger(TestTableOutputFormatConnectionExhaust.class); @@ -55,7 +49,7 @@ public class TestTableOutputFormatConnectionExhaust { static final String TABLE = "TestTableOutputFormatConnectionExhaust"; static final String FAMILY = "family"; - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { // Default in ZookeeperMiniCluster is 1000, setting artificially low to trigger exhaustion. // need min of 7 to properly start the default mini HBase cluster @@ -63,12 +57,12 @@ public static void beforeClass() throws Exception { UTIL.startMiniCluster(); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { UTIL.shutdownMiniCluster(); } - @Before + @BeforeEach public void before() throws IOException { LOG.info("before"); UTIL.ensureSomeRegionServersAvailable(1); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java index 3c1b717d5abf..9f008b82857f 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java @@ -18,12 +18,13 @@ package org.apache.hadoop.hbase.mapred; import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import java.io.IOException; import java.util.Iterator; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -45,28 +46,27 @@ import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.RunningJob; import org.apache.hadoop.mapred.lib.NullOutputFormat; -import org.junit.Assert; -import org.junit.ClassRule; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; - -@Category({ VerySlowMapReduceTests.class, LargeTests.class }) +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; + +@Tag(VerySlowMapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableSnapshotInputFormat.class); - private static final byte[] aaa = Bytes.toBytes("aaa"); private static final byte[] after_zzz = Bytes.toBytes("zz{"); // 'z' + 1 => '{' private static final String COLUMNS = Bytes.toString(FAMILIES[0]) + " " + Bytes.toString(FAMILIES[1]); - @Rule - public TestName name = new TestName(); + private String methodName; + + @BeforeEach + public void beforeEach(TestInfo testInfo) { + methodName = testInfo.getTestMethod().get().getName(); + } @Override protected byte[] getStartRow() { @@ -108,7 +108,7 @@ public void close() { @Test public void testInitTableSnapshotMapperJobConfig() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); String snapshotName = "foo"; try { @@ -122,11 +122,11 @@ public void testInitTableSnapshotMapperJobConfig() throws Exception { // TODO: would be better to examine directly the cache instance that results from this // config. Currently this is not possible because BlockCache initialization is static. - Assert.assertEquals("Snapshot job should be configured for default LruBlockCache.", - HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT, - job.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, -1), 0.01); - Assert.assertEquals("Snapshot job should not use BucketCache.", 0, - job.getFloat("hbase.bucketcache.size", -1), 0.01); + assertEquals(HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT, + job.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, -1), 0.01, + "Snapshot job should be configured for default LruBlockCache."); + assertEquals(0, job.getFloat("hbase.bucketcache.size", -1), 0.01, + "Snapshot job should not use BucketCache."); } finally { UTIL.getAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); @@ -168,7 +168,7 @@ public void testRestoreSnapshotDoesNotCreateBackRefLinksInit(TableName tableName @Override protected void testWithMockedMapReduce(HBaseTestingUtil util, String snapshotName, int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); try { createTableAndSnapshot(util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); @@ -202,7 +202,7 @@ private void verifyWithMockedMapReduce(JobConf job, int numRegions, int expected TableSnapshotInputFormat tsif = new TableSnapshotInputFormat(); InputSplit[] splits = tsif.getSplits(job, 0); - Assert.assertEquals(expectedNumSplits, splits.length); + assertEquals(expectedNumSplits, splits.length); HBaseTestingUtil.SeenRowTracker rowTracker = new HBaseTestingUtil.SeenRowTracker(startRow, stopRow); @@ -214,7 +214,7 @@ private void verifyWithMockedMapReduce(JobConf job, int numRegions, int expected for (int i = 0; i < splits.length; i++) { // validate input split InputSplit split = splits[i]; - Assert.assertTrue(split instanceof TableSnapshotInputFormat.TableSnapshotRegionSplit); + assertTrue(split instanceof TableSnapshotInputFormat.TableSnapshotRegionSplit); if (localityEnabled) { // When localityEnabled is true, meant to verify split.getLocations() // by the following statement: @@ -222,9 +222,9 @@ private void verifyWithMockedMapReduce(JobConf job, int numRegions, int expected // However, getLocations() of some splits could return an empty array (length is 0), // so drop the verification on length. // TODO: investigate how to verify split.getLocations() when localityEnabled is true - Assert.assertTrue(split.getLocations() != null); + assertTrue(split.getLocations() != null); } else { - Assert.assertTrue(split.getLocations() != null && split.getLocations().length == 0); + assertTrue(split.getLocations() != null && split.getLocations().length == 0); } // validate record reader @@ -290,7 +290,7 @@ public static void doTestWithMapReduce(HBaseTestingUtil util, TableName tableNam jobConf.setOutputFormat(NullOutputFormat.class); RunningJob job = JobClient.runJob(jobConf); - Assert.assertTrue(job.isSuccessful()); + assertTrue(job.isSuccessful()); } finally { if (!shutdownCluster) { util.getAdmin().deleteSnapshot(snapshotName); @@ -299,7 +299,7 @@ public static void doTestWithMapReduce(HBaseTestingUtil util, TableName tableNam } } - @Ignore // Ignored in mapred package because it keeps failing but allowed in mapreduce package. + @Disabled // Ignored in mapred package because it keeps failing but allowed in mapreduce package. @Test public void testWithMapReduceMultipleMappersPerRegion() throws Exception { testWithMapReduce(UTIL, "testWithMapReduceMultiRegion", 10, 5, 50, false); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/CopyTableTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/CopyTableTestBase.java index d7648c26406d..d00f11fd2a39 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/CopyTableTestBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/CopyTableTestBase.java @@ -17,10 +17,10 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import org.apache.commons.lang3.ArrayUtils; @@ -39,8 +39,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.ToolRunner; -import org.junit.Rule; -import org.junit.rules.TestName; +import org.junit.jupiter.api.TestInfo; /** * Base class for testing CopyTable MR tool. @@ -55,9 +54,6 @@ public abstract class CopyTableTestBase { protected static final byte[] FAMILY_B = Bytes.toBytes(FAMILY_B_STRING); protected static final byte[] QUALIFIER = Bytes.toBytes("q"); - @Rule - public TestName name = new TestName(); - protected abstract Table createSourceTable(TableDescriptor desc) throws Exception; protected abstract Table createTargetTable(TableDescriptor desc) throws Exception; @@ -91,9 +87,10 @@ protected final void verifyRows(Table t, byte[] family, byte[] column) throws IO } } - protected final void doCopyTableTest(Configuration conf, boolean bulkload) throws Exception { - TableName tableName1 = TableName.valueOf(name.getMethodName() + "1"); - TableName tableName2 = TableName.valueOf(name.getMethodName() + "2"); + protected final void doCopyTableTest(Configuration conf, boolean bulkload, TestInfo testInfo) + throws Exception { + TableName tableName1 = TableName.valueOf(testInfo.getTestMethod().get().getName() + "1"); + TableName tableName2 = TableName.valueOf(testInfo.getTestMethod().get().getName() + "2"); byte[] family = Bytes.toBytes("family"); byte[] column = Bytes.toBytes("c1"); TableDescriptor desc1 = TableDescriptorBuilder.newBuilder(tableName1) @@ -123,10 +120,10 @@ protected final void doCopyTableTest(Configuration conf, boolean bulkload) throw } } - protected final void doCopyTableTestWithMob(Configuration conf, boolean bulkload) - throws Exception { - TableName tableName1 = TableName.valueOf(name.getMethodName() + "1"); - TableName tableName2 = TableName.valueOf(name.getMethodName() + "2"); + protected final void doCopyTableTestWithMob(Configuration conf, boolean bulkload, + TestInfo testInfo) throws Exception { + TableName tableName1 = TableName.valueOf(testInfo.getTestMethod().get().getName() + "1"); + TableName tableName2 = TableName.valueOf(testInfo.getTestMethod().get().getName() + "2"); byte[] family = Bytes.toBytes("mob"); byte[] column = Bytes.toBytes("c1"); @@ -163,15 +160,15 @@ protected final void doCopyTableTestWithMob(Configuration conf, boolean bulkload Result r = t2.get(g); assertEquals(1, r.size()); assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], column)); - assertEquals("compare row values between two tables", - t1.getDescriptor().getValue("row" + i), t2.getDescriptor().getValue("row" + i)); + assertEquals(t1.getDescriptor().getValue("row" + i), t2.getDescriptor().getValue("row" + i), + "compare row values between two tables"); } - assertEquals("compare count of mob rows after table copy", MobTestUtil.countMobRows(t1), - MobTestUtil.countMobRows(t2)); - assertEquals("compare count of mob row values between two tables", - t1.getDescriptor().getValues().size(), t2.getDescriptor().getValues().size()); - assertTrue("The mob row count is 0 but should be > 0", MobTestUtil.countMobRows(t2) > 0); + assertEquals(MobTestUtil.countMobRows(t1), MobTestUtil.countMobRows(t2), + "compare count of mob rows after table copy"); + assertEquals(t1.getDescriptor().getValues().size(), t2.getDescriptor().getValues().size(), + "compare count of mob row values between two tables"); + assertTrue(MobTestUtil.countMobRows(t2) > 0, "The mob row count is 0 but should be > 0"); } finally { dropSourceTable(tableName1); dropTargetTable(tableName2); @@ -183,9 +180,9 @@ protected final boolean runCopy(Configuration conf, String[] args) throws Except return status == 0; } - protected final void testStartStopRow(Configuration conf) throws Exception { - final TableName tableName1 = TableName.valueOf(name.getMethodName() + "1"); - final TableName tableName2 = TableName.valueOf(name.getMethodName() + "2"); + protected final void testStartStopRow(Configuration conf, TestInfo testInfo) throws Exception { + final TableName tableName1 = TableName.valueOf(testInfo.getTestMethod().get().getName() + "1"); + final TableName tableName2 = TableName.valueOf(testInfo.getTestMethod().get().getName() + "2"); final byte[] family = Bytes.toBytes("family"); final byte[] column = Bytes.toBytes("c1"); final byte[] row0 = Bytes.toBytesBinary("\\x01row0"); @@ -231,9 +228,9 @@ protected final void testStartStopRow(Configuration conf) throws Exception { } } - protected final void testRenameFamily(Configuration conf) throws Exception { - TableName sourceTable = TableName.valueOf(name.getMethodName() + "-source"); - TableName targetTable = TableName.valueOf(name.getMethodName() + "-target"); + protected final void testRenameFamily(Configuration conf, TestInfo testInfo) throws Exception { + TableName sourceTable = TableName.valueOf(testInfo.getTestMethod().get().getName() + "-source"); + TableName targetTable = TableName.valueOf(testInfo.getTestMethod().get().getName() + "-target"); TableDescriptor desc1 = TableDescriptorBuilder.newBuilder(sourceTable) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_A)) diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/CopyTableToPeerClusterTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/CopyTableToPeerClusterTestBase.java index d9219c9420f4..e089b3037d81 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/CopyTableToPeerClusterTestBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/CopyTableToPeerClusterTestBase.java @@ -17,16 +17,17 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertFalse; +import static org.junit.jupiter.api.Assertions.assertFalse; import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; /** * Test CopyTable between clusters @@ -37,13 +38,13 @@ public abstract class CopyTableToPeerClusterTestBase extends CopyTableTestBase { protected static final HBaseTestingUtil UTIL2 = new HBaseTestingUtil(); - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { UTIL1.startMiniCluster(3); UTIL2.startMiniCluster(3); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { UTIL1.shutdownMiniCluster(); UTIL2.shutdownMiniCluster(); @@ -78,35 +79,35 @@ protected String[] getPeerClusterOptions() throws Exception { * Simple end-to-end test */ @Test - public void testCopyTable() throws Exception { - doCopyTableTest(UTIL1.getConfiguration(), false); + public void testCopyTable(TestInfo testInfo) throws Exception { + doCopyTableTest(UTIL1.getConfiguration(), false, testInfo); } /** * Simple end-to-end test on table with MOB */ @Test - public void testCopyTableWithMob() throws Exception { - doCopyTableTestWithMob(UTIL1.getConfiguration(), false); + public void testCopyTableWithMob(TestInfo testInfo) throws Exception { + doCopyTableTestWithMob(UTIL1.getConfiguration(), false, testInfo); } @Test - public void testStartStopRow() throws Exception { - testStartStopRow(UTIL1.getConfiguration()); + public void testStartStopRow(TestInfo testInfo) throws Exception { + testStartStopRow(UTIL1.getConfiguration(), testInfo); } /** * Test copy of table from sourceTable to targetTable all rows from family a */ @Test - public void testRenameFamily() throws Exception { - testRenameFamily(UTIL1.getConfiguration()); + public void testRenameFamily(TestInfo testInfo) throws Exception { + testRenameFamily(UTIL1.getConfiguration(), testInfo); } @Test - public void testBulkLoadNotSupported() throws Exception { - TableName tableName1 = TableName.valueOf(name.getMethodName() + "1"); - TableName tableName2 = TableName.valueOf(name.getMethodName() + "2"); + public void testBulkLoadNotSupported(TestInfo testInfo) throws Exception { + TableName tableName1 = TableName.valueOf(testInfo.getTestMethod().get().getName() + "1"); + TableName tableName2 = TableName.valueOf(testInfo.getTestMethod().get().getName() + "2"); try (Table t1 = UTIL1.createTable(tableName1, FAMILY_A); Table t2 = UTIL2.createTable(tableName2, FAMILY_A)) { String[] args = ArrayUtils.addAll(getPeerClusterOptions(), @@ -119,9 +120,9 @@ public void testBulkLoadNotSupported() throws Exception { } @Test - public void testSnapshotNotSupported() throws Exception { - TableName tableName1 = TableName.valueOf(name.getMethodName() + "1"); - TableName tableName2 = TableName.valueOf(name.getMethodName() + "2"); + public void testSnapshotNotSupported(TestInfo testInfo) throws Exception { + TableName tableName1 = TableName.valueOf(testInfo.getTestMethod().get().getName() + "1"); + TableName tableName2 = TableName.valueOf(testInfo.getTestMethod().get().getName() + "2"); String snapshot = tableName1.getNameAsString() + "_snapshot"; try (Table t1 = UTIL1.createTable(tableName1, FAMILY_A); Table t2 = UTIL2.createTable(tableName2, FAMILY_A)) { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2TestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2TestBase.java index ac9810a8825a..2e459402e94a 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2TestBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2TestBase.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.List; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MRIncrementalLoadTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MRIncrementalLoadTestBase.java index ad2f841c19df..1024f8723b4e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MRIncrementalLoadTestBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MRIncrementalLoadTestBase.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -44,11 +44,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.tool.BulkLoadHFiles; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.Test; -import org.junit.runners.Parameterized.Parameter; +import org.junit.jupiter.api.AfterAll; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,13 +56,10 @@ public class MRIncrementalLoadTestBase extends HFileOutputFormat2TestBase { private static String[] HOSTNAMES; - @Parameter(0) public boolean shouldChangeRegions; - @Parameter(1) public boolean putSortReducer; - @Parameter(2) public List tableStr; private Map allTables; @@ -94,12 +87,11 @@ protected static void setupCluster(boolean shouldKeepLocality) throws Exception } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws IOException { UTIL.shutdownMiniCluster(); } - @Before public void setUp() throws IOException { int regionNum = SHOULD_KEEP_LOCALITY ? 20 : 5; allTables = new HashMap<>(tableStr.size()); @@ -110,9 +102,9 @@ public void setUp() throws IOException { Table table = UTIL.createTable(tableName, FAMILIES, splitKeys); RegionLocator r = UTIL.getConnection().getRegionLocator(tableName); - assertEquals("Should start with empty table", 0, HBaseTestingUtil.countRows(table)); + assertEquals(0, HBaseTestingUtil.countRows(table), "Should start with empty table"); int numRegions = r.getStartKeys().length; - assertEquals("Should make " + regionNum + " regions", numRegions, regionNum); + assertEquals(numRegions, regionNum, "Should make " + regionNum + " regions"); allTables.put(tableStrSingle, table); tableInfo.add(new HFileOutputFormat2.TableInfo(table.getDescriptor(), r)); @@ -120,7 +112,6 @@ public void setUp() throws IOException { testDir = UTIL.getDataTestDirOnTestFS(tableStr.get(0)); } - @After public void tearDown() throws IOException { for (HFileOutputFormat2.TableInfo tableInfoSingle : tableInfo) { tableInfoSingle.getRegionLocator().close(); @@ -132,7 +123,19 @@ public void tearDown() throws IOException { } } - @Test + protected void runTest(boolean shouldChangeRegions, boolean putSortReducer, List tableStr) + throws Exception { + this.shouldChangeRegions = shouldChangeRegions; + this.putSortReducer = putSortReducer; + this.tableStr = tableStr; + setUp(); + try { + doIncrementalLoadTest(); + } finally { + tearDown(); + } + } + public void doIncrementalLoadTest() throws Exception { boolean writeMultipleTables = tableStr.size() > 1; // Generate the bulk load files @@ -143,8 +146,8 @@ public void doIncrementalLoadTest() throws Exception { for (Table tableSingle : allTables.values()) { // This doesn't write into the table, just makes files - assertEquals("HFOF should not touch actual table", 0, - HBaseTestingUtil.countRows(tableSingle)); + assertEquals(0, HBaseTestingUtil.countRows(tableSingle), + "HFOF should not touch actual table"); } int numTableDirs = 0; FileStatus[] fss = testDir.getFileSystem(UTIL.getConfiguration()).listStatus(testDir); @@ -169,10 +172,10 @@ public void doIncrementalLoadTest() throws Exception { } } } - assertEquals("Column family not found in FS.", FAMILIES.length, dir); + assertEquals(FAMILIES.length, dir, "Column family not found in FS."); } if (writeMultipleTables) { - assertEquals("Dir for all input tables not created", numTableDirs, allTables.size()); + assertEquals(numTableDirs, allTables.size(), "Dir for all input tables not created"); } Admin admin = UTIL.getAdmin(); @@ -207,12 +210,12 @@ public void doIncrementalLoadTest() throws Exception { int expectedRows = 0; if (putSortReducer) { // no rows should be extracted - assertEquals("BulkLoadHFiles should put expected data in table", expectedRows, - HBaseTestingUtil.countRows(currentTable)); + assertEquals(expectedRows, HBaseTestingUtil.countRows(currentTable), + "BulkLoadHFiles should put expected data in table"); } else { expectedRows = NMapInputFormat.getNumMapTasks(UTIL.getConfiguration()) * ROWSPERSPLIT; - assertEquals("BulkLoadHFiles should put expected data in table", expectedRows, - HBaseTestingUtil.countRows(currentTable)); + assertEquals(expectedRows, HBaseTestingUtil.countRows(currentTable), + "BulkLoadHFiles should put expected data in table"); Scan scan = new Scan(); ResultScanner results = currentTable.getScanner(scan); for (Result res : results) { @@ -245,8 +248,8 @@ public void doIncrementalLoadTest() throws Exception { } admin.enableTable(currentTableName); UTIL.waitTableAvailable(currentTableName); - assertEquals("Data should remain after reopening of regions", tableDigestBefore, - UTIL.checksumRows(currentTable)); + assertEquals(tableDigestBefore, UTIL.checksumRows(currentTable), + "Data should remain after reopening of regions"); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java index 0e7ff24a1dab..c18a5c307663 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; import java.io.IOException; @@ -42,10 +42,10 @@ import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -70,7 +70,7 @@ public abstract class MultiTableInputFormatTestBase { } } - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { // switch TIF to log at DEBUG level Log4jUtils.enableDebug(MultiTableInputFormatBase.class); @@ -85,12 +85,12 @@ public static void setUpBeforeClass() throws Exception { } } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @After + @AfterEach public void tearDown() throws Exception { Configuration c = TEST_UTIL.getConfiguration(); FileUtil.fullyDelete(new File(c.get("hadoop.tmp.dir"))); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java index 7a0615a5ff8e..c2002069d93f 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java @@ -17,7 +17,9 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertFalse; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import java.io.IOException; import java.util.Arrays; @@ -42,10 +44,9 @@ import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -58,7 +59,7 @@ public abstract class TableSnapshotInputFormatTestBase { protected FileSystem fs; protected Path rootDir; - @Before + @BeforeEach public void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); StartTestingClusterOption option = @@ -69,7 +70,7 @@ public void setupCluster() throws Exception { fs = rootDir.getFileSystem(UTIL.getConfiguration()); } - @After + @AfterEach public void tearDownCluster() throws Exception { UTIL.shutdownMiniCluster(); } @@ -142,11 +143,11 @@ public void testRestoreSnapshotDoesNotCreateBackRefLinks() throws Exception { Path path = HFileLink.getBackReferencesDir(storeDir, status.getPath().getName()); // assert back references directory is empty - assertFalse("There is a back reference in " + path, fs.exists(path)); + assertFalse(fs.exists(path), "There is a back reference in " + path); path = HFileLink.getBackReferencesDir(archiveStoreDir, status.getPath().getName()); // assert back references directory is empty - assertFalse("There is a back reference in " + path, fs.exists(path)); + assertFalse(fs.exists(path), "There is a back reference in " + path); } } } @@ -176,14 +177,14 @@ protected static void verifyRowFromMap(ImmutableBytesWritable key, Result result Cell cell = scanner.current(); // assert that all Cells in the Result have the same key - Assert.assertEquals(0, Bytes.compareTo(row, 0, row.length, cell.getRowArray(), - cell.getRowOffset(), cell.getRowLength())); + assertEquals(0, Bytes.compareTo(row, 0, row.length, cell.getRowArray(), cell.getRowOffset(), + cell.getRowLength())); } for (byte[] family : FAMILIES) { byte[] actual = result.getValue(family, family); - Assert.assertArrayEquals("Row in snapshot does not match, expected:" + Bytes.toString(row) - + " ,actual:" + Bytes.toString(actual), row, actual); + assertArrayEquals(row, actual, "Row in snapshot does not match, expected:" + + Bytes.toString(row) + " ,actual:" + Bytes.toString(actual)); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java index 7fbb5bc16255..45b855663b24 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.ByteArrayOutputStream; import java.io.File; @@ -30,7 +30,6 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -42,19 +41,15 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.LauncherSecurityManager; import org.apache.hadoop.util.ToolRunner; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestCellCounter { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCellCounter.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static final byte[] ROW1 = Bytes.toBytesBinary("\\x01row1"); @@ -70,17 +65,14 @@ public class TestCellCounter { "target" + File.separator + "test-data" + File.separator + "output"; private static long now = EnvironmentEdgeManager.currentTime(); - @Rule - public TestName name = new TestName(); - - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { UTIL.startMiniCluster(); FQ_OUTPUT_DIR = new Path(OUTPUT_DIR).makeQualified(new LocalFileSystem()); FileUtil.fullyDelete(new File(OUTPUT_DIR)); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { UTIL.shutdownMiniCluster(); } @@ -89,8 +81,8 @@ public static void afterClass() throws Exception { * Test CellCounter all data should print to output */ @Test - public void testCellCounter() throws Exception { - final TableName sourceTable = TableName.valueOf(name.getMethodName()); + public void testCellCounter(TestInfo testInfo) throws Exception { + final TableName sourceTable = TableName.valueOf(testInfo.getTestMethod().get().getName()); byte[][] families = { FAMILY_A, FAMILY_B }; try (Table t = UTIL.createTable(sourceTable, families)) { Put p = new Put(ROW1); @@ -125,8 +117,8 @@ public void testCellCounter() throws Exception { * Test CellCounter all data should print to output */ @Test - public void testCellCounterPrefix() throws Exception { - final TableName sourceTable = TableName.valueOf(name.getMethodName()); + public void testCellCounterPrefix(TestInfo testInfo) throws Exception { + final TableName sourceTable = TableName.valueOf(testInfo.getTestMethod().get().getName()); byte[][] families = { FAMILY_A, FAMILY_B }; try (Table t = UTIL.createTable(sourceTable, families)) { Put p = new Put(ROW1); @@ -161,8 +153,8 @@ public void testCellCounterPrefix() throws Exception { * Test CellCounter with time range all data should print to output */ @Test - public void testCellCounterStartTimeRange() throws Exception { - final TableName sourceTable = TableName.valueOf(name.getMethodName()); + public void testCellCounterStartTimeRange(TestInfo testInfo) throws Exception { + final TableName sourceTable = TableName.valueOf(testInfo.getTestMethod().get().getName()); byte[][] families = { FAMILY_A, FAMILY_B }; try (Table t = UTIL.createTable(sourceTable, families)) { Put p = new Put(ROW1); @@ -198,8 +190,8 @@ public void testCellCounterStartTimeRange() throws Exception { * Test CellCounter with time range all data should print to output */ @Test - public void testCellCounteEndTimeRange() throws Exception { - final TableName sourceTable = TableName.valueOf(name.getMethodName()); + public void testCellCounteEndTimeRange(TestInfo testInfo) throws Exception { + final TableName sourceTable = TableName.valueOf(testInfo.getTestMethod().get().getName()); byte[][] families = { FAMILY_A, FAMILY_B }; try (Table t = UTIL.createTable(sourceTable, families)) { Put p = new Put(ROW1); @@ -235,8 +227,8 @@ public void testCellCounteEndTimeRange() throws Exception { * Test CellCounter with time range all data should print to output */ @Test - public void testCellCounteOutOfTimeRange() throws Exception { - final TableName sourceTable = TableName.valueOf(name.getMethodName()); + public void testCellCounteOutOfTimeRange(TestInfo testInfo) throws Exception { + final TableName sourceTable = TableName.valueOf(testInfo.getTestMethod().get().getName()); byte[][] families = { FAMILY_A, FAMILY_B }; try (Table t = UTIL.createTable(sourceTable, families)) { Put p = new Put(ROW1); @@ -307,8 +299,8 @@ public void testCellCounterMain() throws Exception { * Test CellCounter for complete table all data should print to output */ @Test - public void testCellCounterForCompleteTable() throws Exception { - final TableName sourceTable = TableName.valueOf(name.getMethodName()); + public void testCellCounterForCompleteTable(TestInfo testInfo) throws Exception { + final TableName sourceTable = TableName.valueOf(testInfo.getTestMethod().get().getName()); String outputPath = OUTPUT_DIR + sourceTable; LocalFileSystem localFileSystem = new LocalFileSystem(); Path outputDir = new Path(outputPath).makeQualified(localFileSystem.getUri(), @@ -360,7 +352,7 @@ public void testCellCounterForCompleteTable() throws Exception { @Test public void TestCellCounterWithoutOutputDir() throws Exception { String[] args = new String[] { "tableName" }; - assertEquals("CellCounter should exit with -1 as output directory is not specified.", -1, - ToolRunner.run(HBaseConfiguration.create(), new CellCounter(), args)); + assertEquals(-1, ToolRunner.run(HBaseConfiguration.create(), new CellCounter(), args), + "CellCounter should exit with -1 as output directory is not specified."); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestConfigurePartitioner.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestConfigurePartitioner.java index 49c08a463abe..f2e51a8a73bb 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestConfigurePartitioner.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestConfigurePartitioner.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.verify; import java.io.IOException; @@ -28,7 +28,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.testclassification.MapReduceTests; @@ -36,32 +35,28 @@ import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner; import org.apache.hadoop.security.UserGroupInformation; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ MapReduceTests.class, MediumTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(MediumTests.TAG) public class TestConfigurePartitioner { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestConfigurePartitioner.class); - private static final Logger LOG = LoggerFactory.getLogger(TestConfigurePartitioner.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); - @Before + @BeforeEach public void setUp() throws Exception { UTIL.startMiniDFSCluster(1); } - @After + @AfterEach public void tearDown() throws IOException { UTIL.shutdownMiniDFSCluster(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java index 5c3e9b65079d..b07fd6c0e95d 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java @@ -17,18 +17,17 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; import java.util.HashMap; import java.util.Map; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -45,35 +44,27 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.LauncherSecurityManager; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; /** * Basic test for the CopyTable M/R tool */ -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestCopyTable extends CopyTableTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCopyTable.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - @Rule - public TestName name = new TestName(); - - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { TEST_UTIL.startMiniCluster(3); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @@ -107,45 +98,45 @@ protected String[] getPeerClusterOptions() throws Exception { * Simple end-to-end test */ @Test - public void testCopyTable() throws Exception { - doCopyTableTest(TEST_UTIL.getConfiguration(), false); + public void testCopyTable(TestInfo testInfo) throws Exception { + doCopyTableTest(TEST_UTIL.getConfiguration(), false, testInfo); } /** * Simple end-to-end test with bulkload. */ @Test - public void testCopyTableWithBulkload() throws Exception { - doCopyTableTest(TEST_UTIL.getConfiguration(), true); + public void testCopyTableWithBulkload(TestInfo testInfo) throws Exception { + doCopyTableTest(TEST_UTIL.getConfiguration(), true, testInfo); } /** * Simple end-to-end test on table with MOB */ @Test - public void testCopyTableWithMob() throws Exception { - doCopyTableTestWithMob(TEST_UTIL.getConfiguration(), false); + public void testCopyTableWithMob(TestInfo testInfo) throws Exception { + doCopyTableTestWithMob(TEST_UTIL.getConfiguration(), false, testInfo); } /** * Simple end-to-end test with bulkload on table with MOB. */ @Test - public void testCopyTableWithBulkloadWithMob() throws Exception { - doCopyTableTestWithMob(TEST_UTIL.getConfiguration(), true); + public void testCopyTableWithBulkloadWithMob(TestInfo testInfo) throws Exception { + doCopyTableTestWithMob(TEST_UTIL.getConfiguration(), true, testInfo); } @Test - public void testStartStopRow() throws Exception { - testStartStopRow(TEST_UTIL.getConfiguration()); + public void testStartStopRow(TestInfo testInfo) throws Exception { + testStartStopRow(TEST_UTIL.getConfiguration(), testInfo); } /** * Test copy of table from sourceTable to targetTable all rows from family a */ @Test - public void testRenameFamily() throws Exception { - testRenameFamily(TEST_UTIL.getConfiguration()); + public void testRenameFamily(TestInfo testInfo) throws Exception { + testRenameFamily(TEST_UTIL.getConfiguration(), testInfo); } /** diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithClusterKey.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithClusterKey.java index 6ff9afda5357..1a615b8ca61f 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithClusterKey.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithClusterKey.java @@ -17,19 +17,14 @@ */ package org.apache.hadoop.hbase.mapreduce; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestCopyTableToPeerClusterWithClusterKey extends CopyTableToPeerClusterTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCopyTableToPeerClusterWithClusterKey.class); - @Override protected String[] getPeerClusterOptions() throws Exception { return new String[] { "--peer.adr=" + UTIL2.getClusterKey() }; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithRpcUri.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithRpcUri.java index 4e6293712ec2..11e8755077a8 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithRpcUri.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithRpcUri.java @@ -17,22 +17,16 @@ */ package org.apache.hadoop.hbase.mapreduce; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestCopyTableToPeerClusterWithRpcUri extends CopyTableToPeerClusterTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCopyTableToPeerClusterWithRpcUri.class); - @Override protected String[] getPeerClusterOptions() throws Exception { return new String[] { "--peer.uri=" + UTIL2.getZkConnectionURI() }; } - } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithZkUri.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithZkUri.java index 720c367eb739..2ffc3c50435a 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithZkUri.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTableToPeerClusterWithZkUri.java @@ -17,19 +17,14 @@ */ package org.apache.hadoop.hbase.mapreduce; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestCopyTableToPeerClusterWithZkUri extends CopyTableToPeerClusterTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCopyTableToPeerClusterWithZkUri.class); - @Override protected String[] getPeerClusterOptions() throws Exception { return new String[] { "--peer.uri=" + UTIL2.getRpcConnnectionURI() }; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java index 34d197be02fa..a850681e7997 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java @@ -23,7 +23,6 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -31,17 +30,13 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Mapper; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MapReduceTests.class, SmallTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(SmallTests.TAG) public class TestGroupingTableMapper { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGroupingTableMapper.class); - /** * Test GroupingTableMapper class */ diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java index 37dd817f94a3..5086d2badabc 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java @@ -17,25 +17,21 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.HashMap; import java.util.Map; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestHBaseMRTestingUtility { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHBaseMRTestingUtility.class); @Test public void testMRYarnConfigsPopulation() throws IOException { @@ -55,20 +51,18 @@ public void testMRYarnConfigsPopulation() throws IOException { } for (Map.Entry entry : dummyProps.entrySet()) { - assertTrue( + assertTrue(hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue()), "The Configuration for key " + entry.getKey() + " and value: " + entry.getValue() - + " is not populated correctly", - hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue())); + + " is not populated correctly"); } hbt.startMiniMapReduceCluster(); // Confirm that MiniMapReduceCluster overwrites the mr properties and updates the Configuration for (Map.Entry entry : dummyProps.entrySet()) { - assertFalse( + assertFalse(hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue()), "The MR prop: " + entry.getValue() + " is not overwritten when map reduce mini" - + "cluster is started", - hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue())); + + "cluster is started"); } hbt.shutdownMiniMapReduceCluster(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 37096e408a74..72bfb46ef4f2 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -18,12 +18,12 @@ package org.apache.hadoop.hbase.mapreduce; import static org.apache.hadoop.hbase.regionserver.HStoreFile.BLOOM_FILTER_TYPE_KEY; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNotSame; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotSame; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.lang.reflect.Field; @@ -50,7 +50,6 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.ExtendedCell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -108,10 +107,8 @@ import org.apache.hadoop.mapreduce.RecordWriter; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; -import org.junit.ClassRule; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -121,13 +118,10 @@ * output. Creates a few inner classes to implement splits and an inputformat that emits keys and * values. */ -@Category({ VerySlowMapReduceTests.class, LargeTests.class }) +@org.junit.jupiter.api.Tag(VerySlowMapReduceTests.TAG) +@org.junit.jupiter.api.Tag(LargeTests.TAG) public class TestHFileOutputFormat2 extends HFileOutputFormat2TestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileOutputFormat2.class); - private static final Logger LOG = LoggerFactory.getLogger(TestHFileOutputFormat2.class); /** @@ -135,7 +129,7 @@ public class TestHFileOutputFormat2 extends HFileOutputFormat2TestBase { * timestamp is {@link HConstants#LATEST_TIMESTAMP}. * @see HBASE-2615 */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Disabled("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void test_LATEST_TIMESTAMP_isReplaced() throws Exception { Configuration conf = new Configuration(this.UTIL.getConfiguration()); @@ -185,7 +179,7 @@ private TaskAttemptContext createTestTaskAttemptContext(final Job job) throws Ex * Test that {@link HFileOutputFormat2} creates an HFile with TIMERANGE metadata used by * time-restricted scans. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Disabled("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void test_TIMERANGE() throws Exception { Configuration conf = new Configuration(this.UTIL.getConfiguration()); @@ -249,7 +243,7 @@ public void test_TIMERANGE() throws Exception { /** * Run small MR job. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Disabled("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void testWritingPEData() throws Exception { Configuration conf = UTIL.getConfiguration(); @@ -302,10 +296,10 @@ public void testWritingPEData() throws Exception { kvCount += reader.getEntries(); scanner.seekTo(); long perKVSize = scanner.getCell().getSerializedSize(); - assertTrue("Data size of each file should not be too large.", - perKVSize * reader.getEntries() <= hregionMaxFilesize); + assertTrue(perKVSize * reader.getEntries() <= hregionMaxFilesize, + "Data size of each file should not be too large."); } - assertEquals("Should write expected data in output file.", ROWSPERSPLIT, kvCount); + assertEquals(ROWSPERSPLIT, kvCount, "Should write expected data in output file."); } } @@ -358,7 +352,7 @@ public void test_WritingTagData() throws Exception { } } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Disabled("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void testJobConfiguration() throws Exception { Configuration conf = new Configuration(this.UTIL.getConfiguration()); @@ -371,14 +365,14 @@ public void testJobConfiguration() throws Exception { setupMockStartKeys(regionLocator); setupMockTableName(regionLocator); HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator); - assertEquals(job.getNumReduceTasks(), 4); + assertEquals(4, job.getNumReduceTasks()); } /** * Test for {@link HFileOutputFormat2#createFamilyCompressionMap(Configuration)}. Tests that the * family compression map is correctly serialized into and deserialized from configuration */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Disabled("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void testSerializeDeserializeFamilyCompressionMap() throws IOException { for (int numCfs = 0; numCfs <= 3; numCfs++) { @@ -398,8 +392,9 @@ public void testSerializeDeserializeFamilyCompressionMap() throws IOException { // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToCompression.entrySet()) { - assertEquals("Compression configuration incorrect for column family:" + entry.getKey(), - entry.getValue(), retrievedFamilyToCompressionMap.get(Bytes.toBytes(entry.getKey()))); + assertEquals(entry.getValue(), + retrievedFamilyToCompressionMap.get(Bytes.toBytes(entry.getKey())), + "Compression configuration incorrect for column family:" + entry.getKey()); } } } @@ -444,7 +439,7 @@ private Map getMockColumnFamiliesForCompression(i * Test for {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. Tests that the * family bloom type map is correctly serialized into and deserialized from configuration */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Disabled("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void testSerializeDeserializeFamilyBloomTypeMap() throws IOException { for (int numCfs = 0; numCfs <= 2; numCfs++) { @@ -464,8 +459,9 @@ public void testSerializeDeserializeFamilyBloomTypeMap() throws IOException { // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToBloomType.entrySet()) { - assertEquals("BloomType configuration incorrect for column family:" + entry.getKey(), - entry.getValue(), retrievedFamilyToBloomTypeMap.get(Bytes.toBytes(entry.getKey()))); + assertEquals(entry.getValue(), + retrievedFamilyToBloomTypeMap.get(Bytes.toBytes(entry.getKey())), + "BloomType configuration incorrect for column family:" + entry.getKey()); } } } @@ -505,7 +501,7 @@ private Map getMockColumnFamiliesForBloomType(int numCfs) { * Test for {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. Tests that the * family block size map is correctly serialized into and deserialized from configuration */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Disabled("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void testSerializeDeserializeFamilyBlockSizeMap() throws IOException { for (int numCfs = 0; numCfs <= 3; numCfs++) { @@ -525,8 +521,9 @@ public void testSerializeDeserializeFamilyBlockSizeMap() throws IOException { // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToBlockSize.entrySet()) { - assertEquals("BlockSize configuration incorrect for column family:" + entry.getKey(), - entry.getValue(), retrievedFamilyToBlockSizeMap.get(Bytes.toBytes(entry.getKey()))); + assertEquals(entry.getValue(), + retrievedFamilyToBlockSizeMap.get(Bytes.toBytes(entry.getKey())), + "BlockSize configuration incorrect for column family:" + entry.getKey()); } } } @@ -570,7 +567,7 @@ private Map getMockColumnFamiliesForBlockSize(int numCfs) { * the family data block encoding map is correctly serialized into and deserialized from * configuration */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Disabled("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void testSerializeDeserializeFamilyDataBlockEncodingMap() throws IOException { for (int numCfs = 0; numCfs <= 3; numCfs++) { @@ -592,10 +589,9 @@ public void testSerializeDeserializeFamilyDataBlockEncodingMap() throws IOExcept // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToDataBlockEncoding.entrySet()) { - assertEquals( - "DataBlockEncoding configuration incorrect for column family:" + entry.getKey(), - entry.getValue(), - retrievedFamilyToDataBlockEncodingMap.get(Bytes.toBytes(entry.getKey()))); + assertEquals(entry.getValue(), + retrievedFamilyToDataBlockEncodingMap.get(Bytes.toBytes(entry.getKey())), + "DataBlockEncoding configuration incorrect for column family:" + entry.getKey()); } } } @@ -650,7 +646,7 @@ private void setupMockTableName(RegionLocator table) throws IOException { * Test that {@link HFileOutputFormat2} RecordWriter uses compression and bloom filter settings * from the column family descriptor */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Disabled("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void testColumnFamilySettings() throws Exception { Configuration conf = new Configuration(this.UTIL.getConfiguration()); @@ -715,12 +711,11 @@ public void testColumnFamilySettings() throws Exception { byte[] bloomFilter = fileInfo.get(BLOOM_FILTER_TYPE_KEY); if (bloomFilter == null) bloomFilter = Bytes.toBytes("NONE"); - assertEquals( - "Incorrect bloom filter used for column family " + familyStr + "(reader: " + reader + ")", - hcd.getBloomFilterType(), BloomType.valueOf(Bytes.toString(bloomFilter))); - assertEquals( - "Incorrect compression used for column family " + familyStr + "(reader: " + reader + ")", - hcd.getCompressionType(), reader.getFileContext().getCompression()); + assertEquals(hcd.getBloomFilterType(), BloomType.valueOf(Bytes.toString(bloomFilter)), + "Incorrect bloom filter used for column family " + familyStr + "(reader: " + reader + + ")"); + assertEquals(hcd.getCompressionType(), reader.getFileContext().getCompression(), + "Incorrect compression used for column family " + familyStr + "(reader: " + reader + ")"); } } finally { dir.getFileSystem(conf).delete(dir, true); @@ -757,7 +752,7 @@ private void writeRandomKeyValues(RecordWriter wri * excluded from minor compaction. Without the fix of HBASE-6901, an * ArrayIndexOutOfBoundsException will be thrown. */ - @Ignore("Flakey: See HBASE-9051") + @Disabled("Flakey: See HBASE-9051") @Test public void testExcludeAllFromMinorCompaction() throws Exception { Configuration conf = UTIL.getConfiguration(); @@ -769,7 +764,7 @@ public void testExcludeAllFromMinorCompaction() throws Exception { Table table = UTIL.createTable(TABLE_NAMES[0], FAMILIES); RegionLocator locator = conn.getRegionLocator(TABLE_NAMES[0])) { final FileSystem fs = UTIL.getDFSCluster().getFileSystem(); - assertEquals("Should start with empty table", 0, UTIL.countRows(table)); + assertEquals(0, UTIL.countRows(table), "Should start with empty table"); // deep inspection: get the StoreFile dir final Path storePath = @@ -793,8 +788,8 @@ public void testExcludeAllFromMinorCompaction() throws Exception { // Ensure data shows up int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; - assertEquals("BulkLoadHFiles should put expected data in table", expectedRows, - UTIL.countRows(table)); + assertEquals(expectedRows, UTIL.countRows(table), + "BulkLoadHFiles should put expected data in table"); // should have a second StoreFile now assertEquals(2, fs.listStatus(storePath).length); @@ -839,7 +834,7 @@ public Boolean call() throws Exception { } } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Disabled("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void testExcludeMinorCompaction() throws Exception { Configuration conf = UTIL.getConfiguration(); @@ -852,7 +847,7 @@ public void testExcludeMinorCompaction() throws Exception { Path testDir = UTIL.getDataTestDirOnTestFS("testExcludeMinorCompaction"); final FileSystem fs = UTIL.getDFSCluster().getFileSystem(); Table table = UTIL.createTable(TABLE_NAMES[0], FAMILIES); - assertEquals("Should start with empty table", 0, UTIL.countRows(table)); + assertEquals(0, UTIL.countRows(table), "Should start with empty table"); // deep inspection: get the StoreFile dir final Path storePath = @@ -887,8 +882,8 @@ public Boolean call() throws Exception { // Ensure data shows up int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; - assertEquals("BulkLoadHFiles should put expected data in table", expectedRows + 1, - UTIL.countRows(table)); + assertEquals(expectedRows + 1, UTIL.countRows(table), + "BulkLoadHFiles should put expected data in table"); // should have a second StoreFile now assertEquals(2, fs.listStatus(storePath).length); @@ -1075,8 +1070,8 @@ public void TestConfigureCompression() throws Exception { LocatedFileStatus keyFileStatus = iterator.next(); HFile.Reader reader = HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); - assertEquals(reader.getTrailer().getCompressionCodec().getName(), - hfileoutputformatCompression); + assertEquals(hfileoutputformatCompression, + reader.getTrailer().getCompressionCodec().getName()); } } finally { if (writer != null && context != null) { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2WithSecurity.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2WithSecurity.java index ac767f23775c..88840939ee41 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2WithSecurity.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2WithSecurity.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.mapreduce; import static org.apache.hadoop.security.UserGroupInformation.loginUserFromKeytab; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.Closeable; import java.io.File; @@ -28,7 +28,6 @@ import java.util.Map; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionLocator; @@ -42,20 +41,17 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; /** * Tests for {@link HFileOutputFormat2} with secure mode. */ -@Category({ VerySlowMapReduceTests.class, LargeTests.class }) +@Tag(VerySlowMapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestHFileOutputFormat2WithSecurity extends HFileOutputFormat2TestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileOutputFormat2WithSecurity.class); private static final byte[] FAMILIES = Bytes.toBytes("test_cf"); @@ -71,7 +67,7 @@ public class TestHFileOutputFormat2WithSecurity extends HFileOutputFormat2TestBa private List clusters = new ArrayList<>(); - @Before + @BeforeEach public void setupSecurityClusters() throws Exception { utilA = new HBaseTestingUtil(); confA = utilA.getConfiguration(); @@ -93,7 +89,7 @@ public void setupSecurityClusters() throws Exception { clusters.add(utilB.startSecureMiniCluster(kdc, userPrincipal, HTTP_PRINCIPAL)); } - @After + @AfterEach public void teardownSecurityClusters() { IOUtils.closeQuietly(clusters); clusters.clear(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java index 9cffb4089bd7..813758bdca92 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java @@ -17,42 +17,33 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; - -@Category({ MapReduceTests.class, MediumTests.class }) +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; + +@Tag(MapReduceTests.TAG) +@Tag(MediumTests.TAG) public class TestHRegionPartitioner { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHRegionPartitioner.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); - @Rule - public TestName name = new TestName(); - - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { UTIL.startMiniCluster(); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { UTIL.shutdownMiniCluster(); } @@ -61,16 +52,17 @@ public static void afterClass() throws Exception { * Test HRegionPartitioner */ @Test - public void testHRegionPartitioner() throws Exception { + public void testHRegionPartitioner(TestInfo testInfo) throws Exception { byte[][] families = { Bytes.toBytes("familyA"), Bytes.toBytes("familyB") }; - UTIL.createTable(TableName.valueOf(name.getMethodName()), families, 1, Bytes.toBytes("aa"), + String tableName = testInfo.getTestMethod().get().getName(); + UTIL.createTable(TableName.valueOf(tableName), families, 1, Bytes.toBytes("aa"), Bytes.toBytes("cc"), 3); HRegionPartitioner partitioner = new HRegionPartitioner<>(); Configuration configuration = UTIL.getConfiguration(); - configuration.set(TableOutputFormat.OUTPUT_TABLE, name.getMethodName()); + configuration.set(TableOutputFormat.OUTPUT_TABLE, tableName); partitioner.setConf(configuration); ImmutableBytesWritable writable = new ImmutableBytesWritable(Bytes.toBytes("bb")); @@ -79,10 +71,11 @@ public void testHRegionPartitioner() throws Exception { } @Test - public void testHRegionPartitionerMoreRegions() throws Exception { + public void testHRegionPartitionerMoreRegions(TestInfo testInfo) throws Exception { byte[][] families = { Bytes.toBytes("familyA"), Bytes.toBytes("familyB") }; - TableName tableName = TableName.valueOf(name.getMethodName()); + String tableNameStr = testInfo.getTestMethod().get().getName(); + TableName tableName = TableName.valueOf(tableNameStr); UTIL.createTable(tableName, families, 1, Bytes.toBytes("aa"), Bytes.toBytes("cc"), 5); Configuration configuration = UTIL.getConfiguration(); @@ -90,7 +83,7 @@ public void testHRegionPartitionerMoreRegions() throws Exception { assertEquals(5, numberOfRegions); HRegionPartitioner partitioner = new HRegionPartitioner<>(); - configuration.set(TableOutputFormat.OUTPUT_TABLE, name.getMethodName()); + configuration.set(TableOutputFormat.OUTPUT_TABLE, tableNameStr); partitioner.setConf(configuration); // Get some rowKey for the lastRegion diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java index 05736f939e13..ec4d3ce3f02a 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java @@ -17,14 +17,14 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; import java.util.HashMap; import java.util.Map; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Put; @@ -33,14 +33,11 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.MapFile; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,32 +47,26 @@ /** * Basic test for the HashTable M/R tool */ -@Category(LargeTests.class) +@Tag(LargeTests.TAG) public class TestHashTable { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHashTable.class); private static final Logger LOG = LoggerFactory.getLogger(TestHashTable.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - @Rule - public TestName name = new TestName(); - - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { TEST_UTIL.startMiniCluster(3); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @Test - public void testHashTable() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + public void testHashTable(TestInfo testInfo) throws Exception { + final TableName tableName = TableName.valueOf(testInfo.getTestMethod().get().getName()); final byte[] family = Bytes.toBytes("family"); final byte[] column1 = Bytes.toBytes("c1"); final byte[] column2 = Bytes.toBytes("c2"); @@ -110,7 +101,7 @@ public void testHashTable() throws Exception { int code = hashTable.run(new String[] { "--batchsize=" + batchSize, "--numhashfiles=" + numHashFiles, "--scanbatch=2", tableName.getNameAsString(), testDir.toString() }); - assertEquals("test job failed", 0, code); + assertEquals(0, code, "test job failed"); FileSystem fs = TEST_UTIL.getTestFileSystem(); @@ -165,7 +156,7 @@ ImmutableMap. builder() intKey = Bytes.toInt(key.get(), key.getOffset(), key.getLength()); } if (actualHashes.containsKey(intKey)) { - Assert.fail("duplicate key in data files: " + intKey); + fail("duplicate key in data files: " + intKey); } actualHashes.put(intKey, new ImmutableBytesWritable(hash.copyBytes())); } @@ -185,7 +176,7 @@ ImmutableMap. builder() if (!expectedHashes.equals(actualHashes)) { LOG.error("Diff: " + Maps.difference(expectedHashes, actualHashes)); } - Assert.assertEquals(expectedHashes, actualHashes); + assertEquals(expectedHashes, actualHashes); TEST_UTIL.deleteTable(tableName); TEST_UTIL.cleanupDataTestDirOnTestFS(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index ab12dad94f55..26741f6f4d5e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -21,12 +21,13 @@ import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; /** * Tests the table import and table export MR job functionality */ -@org.junit.jupiter.api.Tag(VerySlowMapReduceTests.TAG) -@org.junit.jupiter.api.Tag(LargeTests.TAG) +@Tag(VerySlowMapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestImportExport extends TestImportExportBase { @BeforeAll diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java index 7b005089732c..9b425ff666b4 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -32,7 +32,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -55,23 +54,18 @@ import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestImportTSVWithOperationAttributes implements Configurable { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImportTSVWithOperationAttributes.class); - private static final Logger LOG = LoggerFactory.getLogger(TestImportTSVWithOperationAttributes.class); protected static final String NAME = TestImportTsv.class.getSimpleName(); @@ -93,9 +87,6 @@ public class TestImportTSVWithOperationAttributes implements Configurable { private final String FAMILY = "FAM"; - @Rule - public TestName name = new TestName(); - @Override public Configuration getConf() { return util.getConfiguration(); @@ -106,7 +97,7 @@ public void setConf(Configuration conf) { throw new IllegalArgumentException("setConf not supported"); } - @BeforeClass + @BeforeAll public static void provisionCluster() throws Exception { conf = util.getConfiguration(); conf.set("hbase.coprocessor.master.classes", OperationAttributesTestController.class.getName()); @@ -114,14 +105,15 @@ public static void provisionCluster() throws Exception { util.startMiniCluster(); } - @AfterClass + @AfterAll public static void releaseCluster() throws Exception { util.shutdownMiniCluster(); } @Test - public void testMROnTable() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); + public void testMROnTable(TestInfo testInfo) throws Exception { + final TableName tableName = + TableName.valueOf(testInfo.getTestMethod().get().getName() + util.getRandomUUID()); // Prepare the arguments required for the test. String[] args = new String[] { @@ -136,8 +128,9 @@ public void testMROnTable() throws Exception { } @Test - public void testMROnTableWithInvalidOperationAttr() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); + public void testMROnTableWithInvalidOperationAttr(TestInfo testInfo) throws Exception { + final TableName tableName = + TableName.valueOf(testInfo.getTestMethod().get().getName() + util.getRandomUUID()); // Prepare the arguments required for the test. String[] args = new String[] { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java index 9ac8f35a91de..cfc3655f9e92 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.util.ArrayList; @@ -29,7 +29,6 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Durability; @@ -45,23 +44,18 @@ import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestImportTSVWithTTLs implements Configurable { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImportTSVWithTTLs.class); - protected static final Logger LOG = LoggerFactory.getLogger(TestImportTSVWithTTLs.class); protected static final String NAME = TestImportTsv.class.getSimpleName(); protected static HBaseTestingUtil util = new HBaseTestingUtil(); @@ -79,9 +73,6 @@ public class TestImportTSVWithTTLs implements Configurable { private final String FAMILY = "FAM"; private static Configuration conf; - @Rule - public TestName name = new TestName(); - @Override public Configuration getConf() { return util.getConfiguration(); @@ -92,7 +83,7 @@ public void setConf(Configuration conf) { throw new IllegalArgumentException("setConf not supported"); } - @BeforeClass + @BeforeAll public static void provisionCluster() throws Exception { conf = util.getConfiguration(); // We don't check persistence in HFiles in this test, but if we ever do we will @@ -102,14 +93,15 @@ public static void provisionCluster() throws Exception { util.startMiniCluster(); } - @AfterClass + @AfterAll public static void releaseCluster() throws Exception { util.shutdownMiniCluster(); } @Test - public void testMROnTable() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); + public void testMROnTable(TestInfo testInfo) throws Exception { + final TableName tableName = + TableName.valueOf(testInfo.getTestMethod().get().getName() + util.getRandomUUID()); // Prepare the arguments required for the test. String[] args = new String[] { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java index e15181e9c94d..5743bc9e8fe5 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.security.PrivilegedExceptionAction; @@ -35,7 +35,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -64,25 +63,20 @@ import org.apache.hadoop.mapred.Utils.OutputFileUtils.OutputFilesFilter; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestImportTSVWithVisibilityLabels implements Configurable { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImportTSVWithVisibilityLabels.class); - private static final Logger LOG = LoggerFactory.getLogger(TestImportTSVWithVisibilityLabels.class); protected static final String NAME = TestImportTsv.class.getSimpleName(); @@ -107,9 +101,6 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { private static User SUPERUSER; private static Configuration conf; - @Rule - public TestName name = new TestName(); - @Override public Configuration getConf() { return util.getConfiguration(); @@ -120,7 +111,7 @@ public void setConf(Configuration conf) { throw new IllegalArgumentException("setConf not supported"); } - @BeforeClass + @BeforeAll public static void provisionCluster() throws Exception { conf = util.getConfiguration(); SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); @@ -153,14 +144,15 @@ public VisibilityLabelsResponse run() throws Exception { SUPERUSER.runAs(action); } - @AfterClass + @AfterAll public static void releaseCluster() throws Exception { util.shutdownMiniCluster(); } @Test - public void testMROnTable() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); + public void testMROnTable(TestInfo testInfo) throws Exception { + final TableName tableName = + TableName.valueOf(testInfo.getTestMethod().get().getName() + util.getRandomUUID()); // Prepare the arguments required for the test. String[] args = new String[] { @@ -174,8 +166,9 @@ public void testMROnTable() throws Exception { } @Test - public void testMROnTableWithDeletes() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); + public void testMROnTableWithDeletes(TestInfo testInfo) throws Exception { + final TableName tableName = + TableName.valueOf(testInfo.getTestMethod().get().getName() + util.getRandomUUID()); // Prepare the arguments required for the test. String[] args = new String[] { @@ -226,8 +219,9 @@ private void issueDeleteAndVerifyData(TableName tableName) throws IOException { } @Test - public void testMROnTableWithBulkload() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); + public void testMROnTableWithBulkload(TestInfo testInfo) throws Exception { + final TableName tableName = + TableName.valueOf(testInfo.getTestMethod().get().getName() + util.getRandomUUID()); Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. String[] args = new String[] { "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), @@ -240,8 +234,9 @@ public void testMROnTableWithBulkload() throws Exception { } @Test - public void testBulkOutputWithTsvImporterTextMapper() throws Exception { - final TableName table = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); + public void testBulkOutputWithTsvImporterTextMapper(TestInfo testInfo) throws Exception { + final TableName table = + TableName.valueOf(testInfo.getTestMethod().get().getName() + util.getRandomUUID()); String FAMILY = "FAM"; Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. @@ -257,8 +252,9 @@ public void testBulkOutputWithTsvImporterTextMapper() throws Exception { } @Test - public void testMRWithOutputFormat() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); + public void testMRWithOutputFormat(TestInfo testInfo) throws Exception { + final TableName tableName = + TableName.valueOf(testInfo.getTestMethod().get().getName() + util.getRandomUUID()); Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. String[] args = new String[] { @@ -273,8 +269,9 @@ public void testMRWithOutputFormat() throws Exception { } @Test - public void testBulkOutputWithInvalidLabels() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); + public void testBulkOutputWithInvalidLabels(TestInfo testInfo) throws Exception { + final TableName tableName = + TableName.valueOf(testInfo.getTestMethod().get().getName() + util.getRandomUUID()); Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. String[] args = new String[] { "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), @@ -290,8 +287,10 @@ public void testBulkOutputWithInvalidLabels() throws Exception { } @Test - public void testBulkOutputWithTsvImporterTextMapperWithInvalidLabels() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); + public void testBulkOutputWithTsvImporterTextMapperWithInvalidLabels(TestInfo testInfo) + throws Exception { + final TableName tableName = + TableName.valueOf(testInfo.getTestMethod().get().getName() + util.getRandomUUID()); Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. String[] args = new String[] { @@ -391,22 +390,22 @@ private static void validateHFiles(FileSystem fs, String outputPath, String fami String[] elements = cfStatus.getPath().toString().split(Path.SEPARATOR); String cf = elements[elements.length - 1]; foundFamilies.add(cf); - assertTrue(String.format( - "HFile ouput contains a column family (%s) not present in input families (%s)", cf, - configFamilies), configFamilies.contains(cf)); + assertTrue(configFamilies.contains(cf), + String.format( + "HFile ouput contains a column family (%s) not present in input families (%s)", cf, + configFamilies)); for (FileStatus hfile : fs.listStatus(cfStatus.getPath())) { - assertTrue(String.format("HFile %s appears to contain no data.", hfile.getPath()), - hfile.getLen() > 0); + assertTrue(hfile.getLen() > 0, + String.format("HFile %s appears to contain no data.", hfile.getPath())); if (expectedKVCount > -1) { actualKVCount += getKVCountFromHfile(fs, hfile.getPath()); } } } if (expectedKVCount > -1) { - assertTrue( + assertTrue(actualKVCount == expectedKVCount, String.format("KV count in output hfile=<%d> doesn't match with expected KV count=<%d>", - actualKVCount, expectedKVCount), - actualKVCount == expectedKVCount); + actualKVCount, expectedKVCount)); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java index 04fc2c8d3b8f..37749bbd74df 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java @@ -17,9 +17,10 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.Arrays; @@ -37,7 +38,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -61,24 +61,18 @@ import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.ExpectedException; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ VerySlowMapReduceTests.class, LargeTests.class }) +@Tag(VerySlowMapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestImportTsv implements Configurable { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImportTsv.class); - private static final Logger LOG = LoggerFactory.getLogger(TestImportTsv.class); protected static final String NAME = TestImportTsv.class.getSimpleName(); protected static HBaseTestingUtil util = new HBaseTestingUtil(); @@ -95,9 +89,6 @@ public class TestImportTsv implements Configurable { private TableName tn; private Map args; - @Rule - public ExpectedException exception = ExpectedException.none(); - public Configuration getConf() { return util.getConfiguration(); } @@ -106,17 +97,17 @@ public void setConf(Configuration conf) { throw new IllegalArgumentException("setConf not supported"); } - @BeforeClass + @BeforeAll public static void provisionCluster() throws Exception { util.startMiniCluster(); } - @AfterClass + @AfterAll public static void releaseCluster() throws Exception { util.shutdownMiniCluster(); } - @Before + @BeforeEach public void setup() throws Exception { tn = TableName.valueOf("test-" + util.getRandomUUID()); args = new HashMap<>(); @@ -198,17 +189,16 @@ public void testJobConfigurationsWithTsvImporterTextMapper() throws Exception { "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,", "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), tn.getNameAsString(), INPUT_FILE }; - assertEquals("running test job configuration failed.", 0, - ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { - @Override - public int run(String[] args) throws Exception { - Job job = createSubmittableJob(getConf(), args); - assertTrue(job.getMapperClass().equals(TsvImporterTextMapper.class)); - assertTrue(job.getReducerClass().equals(TextSortReducer.class)); - assertTrue(job.getMapOutputValueClass().equals(Text.class)); - return 0; - } - }, args)); + assertEquals(0, ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { + @Override + public int run(String[] args) throws Exception { + Job job = createSubmittableJob(getConf(), args); + assertTrue(job.getMapperClass().equals(TsvImporterTextMapper.class)); + assertTrue(job.getReducerClass().equals(TextSortReducer.class)); + assertTrue(job.getMapOutputValueClass().equals(Text.class)); + return 0; + } + }, args), "running test job configuration failed."); // Delete table created by createSubmittableJob. util.deleteTable(tn); } @@ -231,15 +221,15 @@ public void testWithoutAnExistingTableAndCreateTableSetToNo() throws Exception { conf.set(ImportTsv.COLUMNS_CONF_KEY, "HBASE_ROW_KEY,FAM:A"); conf.set(ImportTsv.BULK_OUTPUT_CONF_KEY, "/output"); conf.set(ImportTsv.CREATE_TABLE_CONF_KEY, "no"); - exception.expect(TableNotFoundException.class); - assertEquals("running test job configuration failed.", 0, + assertThrows(TableNotFoundException.class, () -> { ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { @Override public int run(String[] args) throws Exception { createSubmittableJob(getConf(), args); return 0; } - }, args)); + }, args); + }); } @Test @@ -250,15 +240,15 @@ public void testMRNoMatchedColumnFamily() throws Exception { "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM01_ERROR:A,FAM01_ERROR:B,FAM02_ERROR:C", tn.getNameAsString(), "/inputFile" }; - exception.expect(NoSuchColumnFamilyException.class); - assertEquals("running test job configuration failed.", 0, + assertThrows(NoSuchColumnFamilyException.class, () -> { ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { @Override public int run(String[] args) throws Exception { createSubmittableJob(getConf(), args); return 0; } - }, args)); + }, args); + }); util.deleteTable(tn); } @@ -267,15 +257,15 @@ public int run(String[] args) throws Exception { public void testMRWithoutAnExistingTable() throws Exception { String[] args = new String[] { tn.getNameAsString(), "/inputFile" }; - exception.expect(TableNotFoundException.class); - assertEquals("running test job configuration failed.", 0, + assertThrows(TableNotFoundException.class, () -> { ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { @Override public int run(String[] args) throws Exception { createSubmittableJob(getConf(), args); return 0; } - }, args)); + }, args); + }); } @Test @@ -288,15 +278,14 @@ public void testJobConfigurationsWithDryMode() throws Exception { "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,", "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), "-D" + ImportTsv.DRY_RUN_CONF_KEY + "=true", tn.getNameAsString(), INPUT_FILE }; - assertEquals("running test job configuration failed.", 0, - ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { - @Override - public int run(String[] args) throws Exception { - Job job = createSubmittableJob(getConf(), args); - assertTrue(job.getOutputFormatClass().equals(NullOutputFormat.class)); - return 0; - } - }, argsArray)); + assertEquals(0, ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { + @Override + public int run(String[] args) throws Exception { + Job job = createSubmittableJob(getConf(), args); + assertTrue(job.getOutputFormatClass().equals(NullOutputFormat.class)); + return 0; + } + }, argsArray), "running test job configuration failed."); // Delete table created by createSubmittableJob. util.deleteTable(tn); } @@ -317,8 +306,7 @@ public void testDryModeWithoutBulkOutputAndTableExists() throws Exception { @Test public void testDryModeWithoutBulkOutputAndTableDoesNotExists() throws Exception { args.put(ImportTsv.DRY_RUN_CONF_KEY, "true"); - exception.expect(TableNotFoundException.class); - doMROnTableTest(null, 1); + assertThrows(TableNotFoundException.class, () -> doMROnTableTest(null, 1)); } @Test @@ -345,8 +333,7 @@ public void testDryModeWithBulkOutputAndTableDoesNotExistsCreateTableSetToNo() t args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles.toString()); args.put(ImportTsv.DRY_RUN_CONF_KEY, "true"); args.put(ImportTsv.CREATE_TABLE_CONF_KEY, "no"); - exception.expect(TableNotFoundException.class); - doMROnTableTest(null, 1); + assertThrows(TableNotFoundException.class, () -> doMROnTableTest(null, 1)); } @Test @@ -358,8 +345,7 @@ public void testDryModeWithBulkModeAndTableDoesNotExistsCreateTableSetToYes() th args.put(ImportTsv.CREATE_TABLE_CONF_KEY, "yes"); doMROnTableTest(null, 1); // Verify temporary table was deleted. - exception.expect(TableNotFoundException.class); - util.deleteTable(tn); + assertThrows(TableNotFoundException.class, () -> util.deleteTable(tn)); } /** @@ -453,8 +439,8 @@ protected static Tool doMROnTableTest(HBaseTestingUtil util, TableName table, St && "true".equalsIgnoreCase(args.get(ImportTsv.DRY_RUN_CONF_KEY)); if (args.containsKey(ImportTsv.BULK_OUTPUT_CONF_KEY)) { if (isDryRun) { - assertFalse(String.format("Dry run mode, %s should not have been created.", - ImportTsv.BULK_OUTPUT_CONF_KEY), fs.exists(new Path(ImportTsv.BULK_OUTPUT_CONF_KEY))); + assertFalse(fs.exists(new Path(ImportTsv.BULK_OUTPUT_CONF_KEY)), String.format( + "Dry run mode, %s should not have been created.", ImportTsv.BULK_OUTPUT_CONF_KEY)); } else { validateHFiles(fs, args.get(ImportTsv.BULK_OUTPUT_CONF_KEY), family, expectedKVCount); } @@ -536,25 +522,25 @@ private static void validateHFiles(FileSystem fs, String outputPath, String fami String[] elements = cfStatus.getPath().toString().split(Path.SEPARATOR); String cf = elements[elements.length - 1]; foundFamilies.add(cf); - assertTrue(String.format( - "HFile output contains a column family (%s) not present in input families (%s)", cf, - configFamilies), configFamilies.contains(cf)); + assertTrue(configFamilies.contains(cf), + String.format( + "HFile output contains a column family (%s) not present in input families (%s)", cf, + configFamilies)); for (FileStatus hfile : fs.listStatus(cfStatus.getPath())) { - assertTrue(String.format("HFile %s appears to contain no data.", hfile.getPath()), - hfile.getLen() > 0); + assertTrue(hfile.getLen() > 0, + String.format("HFile %s appears to contain no data.", hfile.getPath())); // count the number of KVs from all the hfiles if (expectedKVCount > -1) { actualKVCount += getKVCountFromHfile(fs, hfile.getPath()); } } } - assertTrue(String.format("HFile output does not contain the input family '%s'.", family), - foundFamilies.contains(family)); + assertTrue(foundFamilies.contains(family), + String.format("HFile output does not contain the input family '%s'.", family)); if (expectedKVCount > -1) { - assertTrue( + assertTrue(actualKVCount == expectedKVCount, String.format("KV count in ouput hfile=<%d> doesn't match with expected KV count=<%d>", - actualKVCount, expectedKVCount), - actualKVCount == expectedKVCount); + actualKVCount, expectedKVCount)); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java index adb0589c9805..5e608db6aa6a 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java @@ -17,14 +17,14 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.util.ArrayList; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser; import org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser.BadTsvLineException; @@ -33,9 +33,8 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.base.Joiner; import org.apache.hbase.thirdparty.com.google.common.base.Splitter; @@ -44,11 +43,9 @@ /** * Tests for {@link TsvParser}. */ -@Category({ MapReduceTests.class, SmallTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(SmallTests.TAG) public class TestImportTsvParser { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImportTsvParser.class); private void assertBytesEquals(byte[] a, byte[] b) { assertEquals(Bytes.toStringBinary(a), Bytes.toStringBinary(b)); @@ -171,50 +168,50 @@ public void testTsvParserWithTimestamp() throws BadTsvLineException { /** * Test cases that throw BadTsvLineException */ - @Test(expected = BadTsvLineException.class) + @Test public void testTsvParserBadTsvLineExcessiveColumns() throws BadTsvLineException { TsvParser parser = new TsvParser("HBASE_ROW_KEY,col_a", "\t"); byte[] line = Bytes.toBytes("val_a\tval_b\tval_c"); - parser.parse(line, line.length); + assertThrows(BadTsvLineException.class, () -> parser.parse(line, line.length)); } - @Test(expected = BadTsvLineException.class) + @Test public void testTsvParserBadTsvLineZeroColumn() throws BadTsvLineException { TsvParser parser = new TsvParser("HBASE_ROW_KEY,col_a", "\t"); byte[] line = Bytes.toBytes(""); - parser.parse(line, line.length); + assertThrows(BadTsvLineException.class, () -> parser.parse(line, line.length)); } - @Test(expected = BadTsvLineException.class) + @Test public void testTsvParserBadTsvLineOnlyKey() throws BadTsvLineException { TsvParser parser = new TsvParser("HBASE_ROW_KEY,col_a", "\t"); byte[] line = Bytes.toBytes("key_only"); - parser.parse(line, line.length); + assertThrows(BadTsvLineException.class, () -> parser.parse(line, line.length)); } - @Test(expected = BadTsvLineException.class) + @Test public void testTsvParserBadTsvLineNoRowKey() throws BadTsvLineException { TsvParser parser = new TsvParser("col_a,HBASE_ROW_KEY", "\t"); byte[] line = Bytes.toBytes("only_cola_data_and_no_row_key"); - parser.parse(line, line.length); + assertThrows(BadTsvLineException.class, () -> parser.parse(line, line.length)); } - @Test(expected = BadTsvLineException.class) + @Test public void testTsvParserInvalidTimestamp() throws BadTsvLineException { TsvParser parser = new TsvParser("HBASE_ROW_KEY,HBASE_TS_KEY,col_a,", "\t"); assertEquals(1, parser.getTimestampKeyColumnIndex()); byte[] line = Bytes.toBytes("rowkey\ttimestamp\tval_a"); ParsedLine parsed = parser.parse(line, line.length); - assertEquals(-1, parsed.getTimestamp(-1)); + assertThrows(BadTsvLineException.class, () -> parsed.getTimestamp(-1)); checkParsing(parsed, Splitter.on("\t").split(Bytes.toString(line))); } - @Test(expected = BadTsvLineException.class) + @Test public void testTsvParserNoTimestampValue() throws BadTsvLineException { TsvParser parser = new TsvParser("HBASE_ROW_KEY,col_a,HBASE_TS_KEY", "\t"); assertEquals(2, parser.getTimestampKeyColumnIndex()); byte[] line = Bytes.toBytes("rowkey\tval_a"); - parser.parse(line, line.length); + assertThrows(BadTsvLineException.class, () -> parser.parse(line, line.length)); } @Test @@ -225,30 +222,24 @@ public void testTsvParserParseRowKey() throws BadTsvLineException { Pair rowKeyOffsets = parser.parseRowKey(line, line.length); assertEquals(0, rowKeyOffsets.getFirst().intValue()); assertEquals(6, rowKeyOffsets.getSecond().intValue()); - try { - line = Bytes.toBytes("\t\tval_a\t1234"); - parser.parseRowKey(line, line.length); - fail("Should get BadTsvLineException on empty rowkey."); - } catch (BadTsvLineException ignored) { - } - parser = new TsvParser("col_a,HBASE_ROW_KEY,HBASE_TS_KEY", "\t"); - assertEquals(1, parser.getRowKeyColumnIndex()); + byte[] line2 = Bytes.toBytes("\t\tval_a\t1234"); + assertThrows(BadTsvLineException.class, () -> parser.parseRowKey(line2, line2.length)); + + TsvParser parser2 = new TsvParser("col_a,HBASE_ROW_KEY,HBASE_TS_KEY", "\t"); + assertEquals(1, parser2.getRowKeyColumnIndex()); line = Bytes.toBytes("val_a\trowkey\t1234"); - rowKeyOffsets = parser.parseRowKey(line, line.length); + rowKeyOffsets = parser2.parseRowKey(line, line.length); assertEquals(6, rowKeyOffsets.getFirst().intValue()); assertEquals(6, rowKeyOffsets.getSecond().intValue()); - try { - line = Bytes.toBytes("val_a"); - rowKeyOffsets = parser.parseRowKey(line, line.length); - fail("Should get BadTsvLineException when number of columns less than rowkey position."); - } catch (BadTsvLineException ignored) { - } - parser = new TsvParser("col_a,HBASE_TS_KEY,HBASE_ROW_KEY", "\t"); - assertEquals(2, parser.getRowKeyColumnIndex()); + byte[] line3 = Bytes.toBytes("val_a"); + assertThrows(BadTsvLineException.class, () -> parser2.parseRowKey(line3, line3.length)); + + TsvParser parser3 = new TsvParser("col_a,HBASE_TS_KEY,HBASE_ROW_KEY", "\t"); + assertEquals(2, parser3.getRowKeyColumnIndex()); line = Bytes.toBytes("val_a\t1234\trowkey"); - rowKeyOffsets = parser.parseRowKey(line, line.length); + rowKeyOffsets = parser3.parseRowKey(line, line.length); assertEquals(11, rowKeyOffsets.getFirst().intValue()); assertEquals(6, rowKeyOffsets.getSecond().intValue()); } @@ -263,27 +254,21 @@ public void testTsvParseAttributesKey() throws BadTsvLineException { assertEquals(3, parser.getAttributesKeyColumnIndex()); String[] attributes = parse.getIndividualAttributes(); assertEquals("key=>value", attributes[0]); - try { - line = Bytes.toBytes("rowkey\tval_a\t1234"); - parser.parse(line, line.length); - fail("Should get BadTsvLineException on empty rowkey."); - } catch (BadTsvLineException ignored) { - } + byte[] line2 = Bytes.toBytes("rowkey\tval_a\t1234"); + TsvParser finalParser = parser; + assertThrows(BadTsvLineException.class, () -> finalParser.parse(line2, line2.length)); - parser = new TsvParser("HBASE_ATTRIBUTES_KEY,col_a,HBASE_ROW_KEY,HBASE_TS_KEY", "\t"); - assertEquals(2, parser.getRowKeyColumnIndex()); + TsvParser parser2 = + new TsvParser("HBASE_ATTRIBUTES_KEY,col_a,HBASE_ROW_KEY,HBASE_TS_KEY", "\t"); + assertEquals(2, parser2.getRowKeyColumnIndex()); line = Bytes.toBytes("key=>value\tval_a\trowkey\t1234"); - parse = parser.parse(line, line.length); + parse = parser2.parse(line, line.length); assertEquals(0, parse.getAttributeKeyOffset()); - assertEquals(0, parser.getAttributesKeyColumnIndex()); + assertEquals(0, parser2.getAttributesKeyColumnIndex()); attributes = parse.getIndividualAttributes(); assertEquals("key=>value", attributes[0]); - try { - line = Bytes.toBytes("val_a"); - ParsedLine parse2 = parser.parse(line, line.length); - fail("Should get BadTsvLineException when number of columns less than rowkey position."); - } catch (BadTsvLineException ignored) { - } + byte[] line3 = Bytes.toBytes("val_a"); + assertThrows(BadTsvLineException.class, () -> parser2.parse(line3, line3.length)); parser = new TsvParser("col_a,HBASE_ATTRIBUTES_KEY,HBASE_TS_KEY,HBASE_ROW_KEY", "\t"); assertEquals(3, parser.getRowKeyColumnIndex()); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java index 87461c2735f0..94d056e03e65 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java @@ -30,24 +30,18 @@ import java.util.jar.JarInputStream; import java.util.jar.JarOutputStream; import java.util.jar.Manifest; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Assert; -import org.junit.ClassRule; import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; import org.slf4j.LoggerFactory; /** * This file was forked from hadoop/common/branches/branch-2@1350012. */ -@Category(SmallTests.class) +@Tag(SmallTests.TAG) public class TestJarFinder { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestJarFinder.class); - @Test public void testJar() throws Exception { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMRIncrementalLoad.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMRIncrementalLoad.java index 1e7cb0e41037..daf11aea5bb6 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMRIncrementalLoad.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMRIncrementalLoad.java @@ -20,31 +20,23 @@ import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; -@RunWith(Parameterized.class) -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestMRIncrementalLoad extends MRIncrementalLoadTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMRIncrementalLoad.class); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { setupCluster(false); } - @Parameters(name = "{index}: shouldChangeRegions={0}, putSortReducer={1}," + " tableStr={2}") public static List params() { return Arrays.asList(new Object[] { false, false, Arrays.asList("testMRIncrementalLoad") }, new Object[] { true, false, Arrays.asList("testMRIncrementalLoadWithSplit") }, @@ -52,4 +44,11 @@ public static List params() { new Object[] { false, true, Arrays.stream(TABLE_NAMES).map(TableName::getNameAsString).collect(Collectors.toList()) }); } + + @ParameterizedTest + @MethodSource("params") + public void testMRIncrementalLoad(boolean shouldChangeRegions, boolean putSortReducer, + List tableStr) throws Exception { + runTest(shouldChangeRegions, putSortReducer, tableStr); + } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMRIncrementalLoadWithLocality.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMRIncrementalLoadWithLocality.java index e27273b15101..ec7e20019b1d 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMRIncrementalLoadWithLocality.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMRIncrementalLoadWithLocality.java @@ -19,33 +19,32 @@ import java.util.Arrays; import java.util.List; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; -@RunWith(Parameterized.class) -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestMRIncrementalLoadWithLocality extends MRIncrementalLoadTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMRIncrementalLoadWithLocality.class); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { setupCluster(true); } - @Parameters(name = "{index}: shouldChangeRegions={0}, putSortReducer={1}," + " tableStr={2}") public static List params() { return Arrays.asList( new Object[] { false, false, Arrays.asList("testMRIncrementalLoadWithLocality1") }, new Object[] { true, false, Arrays.asList("testMRIncrementalLoadWithLocality2") }); } + + @ParameterizedTest + @MethodSource("params") + public void testMRIncrementalLoadWithLocality(boolean shouldChangeRegions, boolean putSortReducer, + List tableStr) throws Exception { + runTest(shouldChangeRegions, putSortReducer, tableStr); + } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java index 5aa14c3561af..898e77a02287 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java @@ -19,29 +19,24 @@ import java.io.IOException; import java.util.List; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.logging.Log4jUtils; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; import org.apache.hadoop.mapreduce.Job; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; /** * Tests various scan start and stop row scenarios. This is set in a scan and tested in a MapReduce * job to see if that is handed over and done properly too. */ -@Category({ VerySlowMapReduceTests.class, LargeTests.class }) +@Tag(VerySlowMapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestMultiTableInputFormat extends MultiTableInputFormatTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiTableInputFormat.class); - - @BeforeClass + @BeforeAll public static void setupLogging() { Log4jUtils.enableDebug(MultiTableInputFormat.class); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java index 7c136fa2a19f..1cb82ce50a58 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.mapreduce; import static org.apache.hadoop.hbase.client.Scan.SCAN_ATTRIBUTES_TABLE_NAME; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -27,7 +29,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; @@ -55,12 +56,9 @@ import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; -import org.junit.Assert; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -68,23 +66,16 @@ /** * Tests of MultiTableInputFormatBase. */ -@Category({ SmallTests.class }) +@Tag(SmallTests.TAG) public class TestMultiTableInputFormatBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiTableInputFormatBase.class); - - @Rule - public final TestName name = new TestName(); - /** * Test getSplits only puts up one Connection. In past it has put up many Connections. Each * Connection setup comes with a fresh new cache so we have to do fresh hit on hbase:meta. Should * only do one Connection when doing getSplits even if a MultiTableInputFormat. */ @Test - public void testMRSplitsConnectionCount() throws IOException { + public void testMRSplitsConnectionCount(TestInfo testInfo) throws IOException { // Make instance of MTIFB. MultiTableInputFormatBase mtif = new MultiTableInputFormatBase() { @Override @@ -104,17 +95,17 @@ public RecordReader createRecordReader(InputSpli List scans = new ArrayList<>(); for (int i = 0; i < 10; i++) { Scan scan = new Scan(); - String tableName = this.name.getMethodName() + i; + String tableName = testInfo.getTestMethod().get().getName() + i; scan.setAttribute(SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName)); scans.add(scan); } mtif.setScans(scans); // Get splits. Assert that that more than one. List splits = mtif.getSplits(mockedJobContext); - Assert.assertTrue(splits.size() > 0); + assertTrue(splits.size() > 0); // Assert only one Connection was made (see the static counter we have in the mocked // Connection MRSplitsConnection Constructor. - Assert.assertEquals(1, MRSplitsConnection.creations.get()); + assertEquals(1, MRSplitsConnection.creations.get()); } /** diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java index fbf9e7ef64c8..0aefa5d4832b 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java @@ -23,7 +23,6 @@ import java.util.List; import java.util.Map; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -34,25 +33,21 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.mapreduce.Job; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; import org.apache.hbase.thirdparty.com.google.common.base.Function; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hbase.thirdparty.com.google.common.collect.Multimaps; -@Category({ VerySlowMapReduceTests.class, LargeTests.class }) +@Tag(VerySlowMapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestMultiTableSnapshotInputFormat extends MultiTableInputFormatTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiTableSnapshotInputFormat.class); - protected Path restoreDir; - @BeforeClass + @BeforeAll public static void setUpSnapshots() throws Exception { Log4jUtils.enableDebug(MultiTableSnapshotInputFormat.class); Log4jUtils.enableDebug(MultiTableSnapshotInputFormatImpl.class); @@ -66,7 +61,7 @@ public static void setUpSnapshots() throws Exception { } } - @Before + @BeforeEach public void setUp() throws Exception { this.restoreDir = TEST_UTIL.getRandomDir(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java index 409c8d7f195d..fdd8923a0497 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doNothing; @@ -30,15 +30,13 @@ import java.util.Objects; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; @@ -46,20 +44,16 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; -@Category({ SmallTests.class }) +@Tag(SmallTests.TAG) public class TestMultiTableSnapshotInputFormatImpl { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiTableSnapshotInputFormatImpl.class); - private MultiTableSnapshotInputFormatImpl subject; private Map> snapshotScans; private Path restoreDir; private Configuration conf; private Path rootDir; - @Before + @BeforeEach public void setUp() throws Exception { this.subject = Mockito.spy(new MultiTableSnapshotInputFormatImpl()); @@ -173,8 +167,8 @@ public void testSetInputCreatesRestoreDirectoriesUnderRootRestoreDir() throws Ex Map restoreDirs = subject.getSnapshotDirs(conf); for (Path snapshotDir : restoreDirs.values()) { - assertEquals("Expected " + snapshotDir + " to be a child of " + restoreDir, restoreDir, - snapshotDir.getParent()); + assertEquals(restoreDir, snapshotDir.getParent(), + "Expected " + snapshotDir + " to be a child of " + restoreDir); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java index f5f0fdf169a9..66d72bf85771 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.File; import java.io.IOException; @@ -29,7 +29,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -44,11 +43,10 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -57,13 +55,10 @@ * simple - take every row in the table, reverse the value of a particular cell, and write it back * to the table. */ -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestMultithreadedTableMapper { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultithreadedTableMapper.class); - private static final Logger LOG = LoggerFactory.getLogger(TestMultithreadedTableMapper.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); static final TableName MULTI_REGION_TABLE_NAME = TableName.valueOf("mrtest"); @@ -71,7 +66,7 @@ public class TestMultithreadedTableMapper { static final byte[] OUTPUT_FAMILY = Bytes.toBytes("text"); static final int NUMBER_OF_THREADS = 10; - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { // Up the handlers; this test needs more than usual. UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10); @@ -82,7 +77,7 @@ public static void beforeClass() throws Exception { UTIL.waitUntilAllRegionsAssigned(MULTI_REGION_TABLE_NAME); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { UTIL.shutdownMiniCluster(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java index 583223691da8..aa48a3e3fb72 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.mapreduce; import static org.apache.hadoop.hbase.HConstants.DEFAULT_REGIONSERVER_PORT; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.when; import java.io.IOException; @@ -27,7 +27,6 @@ import java.util.Collections; import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionMetrics; import org.apache.hadoop.hbase.ServerName; @@ -39,18 +38,14 @@ import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; -@Category({ MiscTests.class, SmallTests.class }) +@Tag(MiscTests.TAG) +@Tag(SmallTests.TAG) public class TestRegionSizeCalculator { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionSizeCalculator.class); - private Configuration configuration = new Configuration(); private final long megabyte = 1024L * 1024L; private final ServerName sn = diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java index 34bd76937d33..0eef2b15b1fb 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -28,7 +28,6 @@ import java.util.Comparator; import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Scan; @@ -36,19 +35,15 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.JobContext; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; /** * Basic test of {@link RoundRobinTableInputFormat}; i.e. RRTIF. */ -@Category({ SmallTests.class }) +@Tag(SmallTests.TAG) public class TestRoundRobinTableInputFormat { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRoundRobinTableInputFormat.class); private static final int SERVERS_COUNT = 5; private static final String[] KEYS = { "aa", "ab", "ac", "ad", "ae", "ba", "bb", "bc", "bd", "be", @@ -127,7 +122,7 @@ private void assertLengthDescending(List list) long previousLength = Long.MAX_VALUE; for (InputSplit is : list) { long length = is.getLength(); - assertTrue(previousLength + " " + length, previousLength > length); + assertTrue(previousLength > length, previousLength + " " + length); previousLength = length; } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java index 1922b89bc2c8..f6dcef164fe4 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -27,7 +27,6 @@ import java.util.ArrayList; import java.util.Arrays; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Delete; @@ -41,24 +40,20 @@ import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.Job; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Test the rowcounter map reduce job. */ -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestRowCounter { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRowCounter.class); - private static final Logger LOG = LoggerFactory.getLogger(TestRowCounter.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private final static String TABLE_NAME = "testRowCounter"; @@ -73,7 +68,7 @@ public class TestRowCounter { /** * @throws java.lang.Exception */ - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(); Table table = TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME), Bytes.toBytes(COL_FAM)); @@ -84,7 +79,7 @@ public static void setUpBeforeClass() throws Exception { /** * @throws java.lang.Exception */ - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java index 9bb10d9dbf46..c146e7b56192 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java @@ -17,32 +17,27 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.nio.charset.StandardCharsets; import java.util.Base64; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; /** * Test of simple partitioner. */ -@Category({ MapReduceTests.class, SmallTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(SmallTests.TAG) public class TestSimpleTotalOrderPartitioner { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSimpleTotalOrderPartitioner.class); - protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); Configuration conf = TEST_UTIL.getConfiguration(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java index 2434df6adf51..7bd65da54c15 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.util.Arrays; import java.util.function.BooleanSupplier; @@ -29,7 +29,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Put; @@ -43,24 +42,20 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.mapreduce.Counters; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Basic test for the SyncTable M/R tool */ -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestSyncTable { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSyncTable.class); private static final Logger LOG = LoggerFactory.getLogger(TestSyncTable.class); @@ -68,16 +63,13 @@ public class TestSyncTable { private static final HBaseTestingUtil UTIL2 = new HBaseTestingUtil(); - @Rule - public TestName name = new TestName(); - - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { UTIL1.startMiniCluster(3); UTIL2.startMiniCluster(3); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { UTIL2.shutdownMiniCluster(); UTIL1.shutdownMiniCluster(); @@ -91,11 +83,13 @@ private static byte[][] generateSplits(int numRows, int numRegions) { return splitRows; } - private void testSyncTable(HBaseTestingUtil source, HBaseTestingUtil target, String... options) - throws Exception { - final TableName sourceTableName = TableName.valueOf(name.getMethodName() + "_source"); - final TableName targetTableName = TableName.valueOf(name.getMethodName() + "_target"); - Path testDir = source.getDataTestDirOnTestFS(name.getMethodName()); + private void testSyncTable(TestInfo testInfo, HBaseTestingUtil source, HBaseTestingUtil target, + String... options) throws Exception { + final TableName sourceTableName = + TableName.valueOf(testInfo.getTestMethod().get().getName() + "_source"); + final TableName targetTableName = + TableName.valueOf(testInfo.getTestMethod().get().getName() + "_target"); + Path testDir = source.getDataTestDirOnTestFS(testInfo.getTestMethod().get().getName()); writeTestData(source, sourceTableName, target, targetTableName); hashSourceTable(source, sourceTableName, testDir); @@ -115,32 +109,35 @@ private void testSyncTable(HBaseTestingUtil source, HBaseTestingUtil target, Str } @Test - public void testSyncTable() throws Exception { - testSyncTable(UTIL1, UTIL1); + public void testSyncTable(TestInfo testInfo) throws Exception { + testSyncTable(testInfo, UTIL1, UTIL1); } @Test - public void testSyncTableToPeerCluster() throws Exception { - testSyncTable(UTIL1, UTIL2, "--sourceuri=" + UTIL1.getRpcConnnectionURI()); + public void testSyncTableToPeerCluster(TestInfo testInfo) throws Exception { + testSyncTable(testInfo, UTIL1, UTIL2, "--sourceuri=" + UTIL1.getRpcConnnectionURI()); } @Test - public void testSyncTableFromSourceToPeerCluster() throws Exception { - testSyncTable(UTIL2, UTIL1, "--sourceuri=" + UTIL2.getRpcConnnectionURI(), + public void testSyncTableFromSourceToPeerCluster(TestInfo testInfo) throws Exception { + testSyncTable(testInfo, UTIL2, UTIL1, "--sourceuri=" + UTIL2.getRpcConnnectionURI(), "--targeturi=" + UTIL1.getZkConnectionURI()); } @Test - public void testSyncTableFromSourceToPeerClusterWithClusterKey() throws Exception { - testSyncTable(UTIL2, UTIL1, "--sourcezkcluster=" + UTIL2.getClusterKey(), + public void testSyncTableFromSourceToPeerClusterWithClusterKey(TestInfo testInfo) + throws Exception { + testSyncTable(testInfo, UTIL2, UTIL1, "--sourcezkcluster=" + UTIL2.getClusterKey(), "--targetzkcluster=" + UTIL1.getClusterKey()); } @Test - public void testSyncTableDoDeletesFalse() throws Exception { - final TableName sourceTableName = TableName.valueOf(name.getMethodName() + "_source"); - final TableName targetTableName = TableName.valueOf(name.getMethodName() + "_target"); - Path testDir = UTIL1.getDataTestDirOnTestFS(name.getMethodName()); + public void testSyncTableDoDeletesFalse(TestInfo testInfo) throws Exception { + final TableName sourceTableName = + TableName.valueOf(testInfo.getTestMethod().get().getName() + "_source"); + final TableName targetTableName = + TableName.valueOf(testInfo.getTestMethod().get().getName() + "_target"); + Path testDir = UTIL1.getDataTestDirOnTestFS(testInfo.getTestMethod().get().getName()); writeTestData(UTIL1, sourceTableName, UTIL1, targetTableName); hashSourceTable(UTIL1, sourceTableName, testDir); @@ -160,10 +157,12 @@ public void testSyncTableDoDeletesFalse() throws Exception { } @Test - public void testSyncTableDoPutsFalse() throws Exception { - final TableName sourceTableName = TableName.valueOf(name.getMethodName() + "_source"); - final TableName targetTableName = TableName.valueOf(name.getMethodName() + "_target"); - Path testDir = UTIL2.getDataTestDirOnTestFS(name.getMethodName()); + public void testSyncTableDoPutsFalse(TestInfo testInfo) throws Exception { + final TableName sourceTableName = + TableName.valueOf(testInfo.getTestMethod().get().getName() + "_source"); + final TableName targetTableName = + TableName.valueOf(testInfo.getTestMethod().get().getName() + "_target"); + Path testDir = UTIL2.getDataTestDirOnTestFS(testInfo.getTestMethod().get().getName()); writeTestData(UTIL2, sourceTableName, UTIL2, targetTableName); hashSourceTable(UTIL2, sourceTableName, testDir); @@ -183,10 +182,12 @@ public void testSyncTableDoPutsFalse() throws Exception { } @Test - public void testSyncTableIgnoreTimestampsTrue() throws Exception { - final TableName sourceTableName = TableName.valueOf(name.getMethodName() + "_source"); - final TableName targetTableName = TableName.valueOf(name.getMethodName() + "_target"); - Path testDir = UTIL1.getDataTestDirOnTestFS(name.getMethodName()); + public void testSyncTableIgnoreTimestampsTrue(TestInfo testInfo) throws Exception { + final TableName sourceTableName = + TableName.valueOf(testInfo.getTestMethod().get().getName() + "_source"); + final TableName targetTableName = + TableName.valueOf(testInfo.getTestMethod().get().getName() + "_target"); + Path testDir = UTIL1.getDataTestDirOnTestFS(testInfo.getTestMethod().get().getName()); long current = EnvironmentEdgeManager.currentTime(); writeTestData(UTIL1, sourceTableName, UTIL2, targetTableName, current - 1000, current); hashSourceTable(UTIL1, sourceTableName, testDir, "--ignoreTimestamps=true"); @@ -206,18 +207,18 @@ public void testSyncTableIgnoreTimestampsTrue() throws Exception { } private void assertCellEquals(Cell sourceCell, Cell targetCell, BooleanSupplier checkTimestamp) { - assertTrue("Rows don't match, source: " + sourceCell + ", target: " + targetCell, - CellUtil.matchingRows(sourceCell, targetCell)); - assertTrue("Families don't match, source: " + sourceCell + ", target: " + targetCell, - CellUtil.matchingFamily(sourceCell, targetCell)); - assertTrue("Qualifiers don't match, source: " + sourceCell + ", target: " + targetCell, - CellUtil.matchingQualifier(sourceCell, targetCell)); + assertTrue(CellUtil.matchingRows(sourceCell, targetCell), + "Rows don't match, source: " + sourceCell + ", target: " + targetCell); + assertTrue(CellUtil.matchingFamily(sourceCell, targetCell), + "Families don't match, source: " + sourceCell + ", target: " + targetCell); + assertTrue(CellUtil.matchingQualifier(sourceCell, targetCell), + "Qualifiers don't match, source: " + sourceCell + ", target: " + targetCell); if (checkTimestamp.getAsBoolean()) { - assertTrue("Timestamps don't match, source: " + sourceCell + ", target: " + targetCell, - CellUtil.matchingTimestamp(sourceCell, targetCell)); + assertTrue(CellUtil.matchingTimestamp(sourceCell, targetCell), + "Timestamps don't match, source: " + sourceCell + ", target: " + targetCell); } - assertTrue("Values don't match, source: " + sourceCell + ", target: " + targetCell, - CellUtil.matchingValue(sourceCell, targetCell)); + assertTrue(CellUtil.matchingValue(sourceCell, targetCell), + "Values don't match, source: " + sourceCell + ", target: " + targetCell); } private void assertEqualTables(int expectedRows, HBaseTestingUtil sourceCluster, @@ -320,7 +321,7 @@ private void assertTargetDoDeletesFalse(int expectedRows, HBaseTestingUtil sourc targetRow = targetScanner.next(); sourceRow = sourceScanner.next(); } - assertEquals("Target expected rows does not match.", expectedRows, rowsCount); + assertEquals(expectedRows, rowsCount, "Target expected rows does not match."); } } @@ -390,7 +391,7 @@ private void assertTargetDoPutsFalse(int expectedRows, HBaseTestingUtil sourceCl targetRow = targetScanner.next(); sourceRow = sourceScanner.next(); } - assertEquals("Target expected rows does not match.", expectedRows, rowsCount); + assertEquals(expectedRows, rowsCount, "Target expected rows does not match."); } } @@ -402,7 +403,7 @@ private Counters syncTables(Configuration conf, TableName sourceTableName, args[options.length + 1] = sourceTableName.getNameAsString(); args[options.length + 2] = targetTableName.getNameAsString(); int code = syncTable.run(args); - assertEquals("sync table job failed", 0, code); + assertEquals(0, code, "sync table job failed"); LOG.info("Sync tables completed"); return syncTable.counters; @@ -421,7 +422,7 @@ private void hashSourceTable(HBaseTestingUtil sourceCluster, TableName sourceTab args[options.length + 3] = sourceTableName.getNameAsString(); args[options.length + 4] = testDir.toString(); int code = hashTable.run(args); - assertEquals("hash table job failed", 0, code); + assertEquals(0, code, "hash table job failed"); FileSystem fs = sourceCluster.getTestFileSystem(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java index c12a7e817bb7..8d9056e0f90e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java @@ -17,9 +17,10 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.*; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; @@ -31,7 +32,6 @@ import java.util.Map; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.NotServingRegionException; @@ -57,12 +57,11 @@ import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import org.slf4j.Logger; @@ -71,13 +70,9 @@ /** * This tests the TableInputFormat and its recovery semantics */ -@Category(LargeTests.class) +@Tag(LargeTests.TAG) public class TestTableInputFormat { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormat.class); - private static final Logger LOG = LoggerFactory.getLogger(TestTableInputFormat.class); private final static HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -86,17 +81,17 @@ public class TestTableInputFormat { private static final byte[][] columns = new byte[][] { FAMILY }; - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { UTIL.startMiniCluster(); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { UTIL.shutdownMiniCluster(); } - @Before + @BeforeEach public void before() throws IOException { LOG.info("before"); UTIL.ensureSomeRegionServersAvailable(1); @@ -266,11 +261,11 @@ public void testTableRecordReaderScannerFailMapreduce() throws IOException, Inte /** * Run test assuming Scanner IOException failure using newer mapreduce api */ - @Test(expected = IOException.class) + @Test public void testTableRecordReaderScannerFailMapreduceTwice() throws IOException, InterruptedException { Table htable = createIOEScannerTable(Bytes.toBytes("table3-mr"), 2); - runTestMapreduce(htable); + assertThrows(IOException.class, () -> runTestMapreduce(htable)); } /** @@ -286,11 +281,12 @@ public void testTableRecordReaderScannerTimeoutMapreduce() /** * Run test assuming NotServingRegionException using newer mapreduce api */ - @Test(expected = org.apache.hadoop.hbase.NotServingRegionException.class) + @Test public void testTableRecordReaderScannerTimeoutMapreduceTwice() throws IOException, InterruptedException { Table htable = createDNRIOEScannerTable(Bytes.toBytes("table5-mr"), 2); - runTestMapreduce(htable); + assertThrows(org.apache.hadoop.hbase.NotServingRegionException.class, + () -> runTestMapreduce(htable)); } /** @@ -334,19 +330,25 @@ void testInputFormat(Class clazz) job.setNumReduceTasks(0); LOG.debug("submitting job."); - assertTrue("job failed!", job.waitForCompletion(true)); - assertEquals("Saw the wrong number of instances of the filtered-for row.", 2, job.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getValue()); - assertEquals("Saw any instances of the filtered out row.", 0, job.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getValue()); - assertEquals("Saw the wrong number of instances of columnA.", 1, job.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getValue()); - assertEquals("Saw the wrong number of instances of columnB.", 1, job.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getValue()); - assertEquals("Saw the wrong count of values for the filtered-for row.", 2, job.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getValue()); - assertEquals("Saw the wrong count of values for the filtered-out row.", 0, job.getCounters() - .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getValue()); + assertTrue(job.waitForCompletion(true), "job failed!"); + assertEquals(2, job.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getValue(), + "Saw the wrong number of instances of the filtered-for row."); + assertEquals(0, job.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getValue(), + "Saw any instances of the filtered out row."); + assertEquals(1, job.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getValue(), + "Saw the wrong number of instances of columnA."); + assertEquals(1, job.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getValue(), + "Saw the wrong number of instances of columnB."); + assertEquals(2, job.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getValue(), + "Saw the wrong count of values for the filtered-for row."); + assertEquals(0, job.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getValue(), + "Saw the wrong count of values for the filtered-out row."); } public static class ExampleVerifier extends TableMapper { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java index 7b2170d19520..124221e44784 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.mock; @@ -31,7 +31,6 @@ import java.util.TreeMap; import java.util.concurrent.ExecutorService; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; @@ -54,20 +53,15 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.mapreduce.JobContext; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -@Category({ SmallTests.class }) +@Tag(SmallTests.TAG) public class TestTableInputFormatBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatBase.class); - @Test public void testReuseRegionSizeCalculator() throws IOException { JobContext context = mock(JobContext.class); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java index aeea1dffbf51..3cf0004018e9 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java @@ -18,20 +18,15 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestTableInputFormatScan extends TestTableInputFormatScanBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScan.class); - /** * Tests a MR scan using specific number of mappers. The test table has 26 regions, */ diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java index ced6e156e87b..3010abd59db5 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -44,9 +44,9 @@ import org.apache.hadoop.mapreduce.TaskCounter; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -66,7 +66,7 @@ public abstract class TestTableInputFormatScanBase { private static Table table = null; - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { // start mini hbase cluster TEST_UTIL.startMiniCluster(3); @@ -75,7 +75,7 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.loadTable(table, INPUT_FAMILYS, null, false); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @@ -238,15 +238,15 @@ protected void testNumOfSplits(int splitsPerRegion, int expectedNumOfSplits) ImmutableBytesWritable.class, ImmutableBytesWritable.class, job); TableInputFormat tif = new TableInputFormat(); tif.setConf(job.getConfiguration()); - Assert.assertEquals(TABLE_NAME, table.getName()); + Assertions.assertEquals(TABLE_NAME, table.getName()); List splits = tif.getSplits(job); for (InputSplit split : splits) { TableSplit tableSplit = (TableSplit) split; // In table input format, we do no store the scanner at the split level // because we use the scan object from the map-reduce job conf itself. - Assert.assertTrue(tableSplit.getScanAsString().isEmpty()); + Assertions.assertTrue(tableSplit.getScanAsString().isEmpty()); } - Assert.assertEquals(expectedNumOfSplits, splits.size()); + Assertions.assertEquals(expectedNumOfSplits, splits.size()); } /** @@ -269,11 +269,12 @@ protected void testNumOfSplitsMR(int splitsPerRegion, int expectedNumOfSplits) job.setReducerClass(ScanReducer.class); job.setNumReduceTasks(1); job.setOutputFormatClass(NullOutputFormat.class); - assertTrue("job failed!", job.waitForCompletion(true)); + assertTrue(job.waitForCompletion(true), "job failed!"); // for some reason, hbase does not expose JobCounter.TOTAL_LAUNCHED_MAPS, // we use TaskCounter.SHUFFLED_MAPS to get total launched maps - assertEquals("Saw the wrong count of mappers per region", expectedNumOfSplits, - job.getCounters().findCounter(TaskCounter.SHUFFLED_MAPS).getValue()); + assertEquals(expectedNumOfSplits, + job.getCounters().findCounter(TaskCounter.SHUFFLED_MAPS).getValue(), + "Saw the wrong count of mappers per region"); } /** @@ -292,14 +293,14 @@ protected void testAutobalanceNumOfSplit() throws IOException { TableInputFormat tif = new TableInputFormat(); List res = tif.calculateAutoBalancedSplits(splits, 1073741824); - assertEquals("Saw the wrong number of splits", 5, res.size()); + assertEquals(5, res.size(), "Saw the wrong number of splits"); TableSplit ts1 = (TableSplit) res.get(0); - assertEquals("The first split end key should be", 2, Bytes.toInt(ts1.getEndRow())); + assertEquals(2, Bytes.toInt(ts1.getEndRow()), "The first split end key should be"); TableSplit ts2 = (TableSplit) res.get(1); - assertEquals("The second split regionsize should be", 20 * 1048576, ts2.getLength()); + assertEquals(20 * 1048576, ts2.getLength(), "The second split regionsize should be"); TableSplit ts3 = (TableSplit) res.get(2); - assertEquals("The third split start key should be", 3, Bytes.toInt(ts3.getStartRow())); + assertEquals(3, Bytes.toInt(ts3.getStartRow()), "The third split start key should be"); TableSplit ts4 = (TableSplit) res.get(4); - assertNotEquals("The seventh split start key should not be", 4, Bytes.toInt(ts4.getStartRow())); + assertNotEquals(4, Bytes.toInt(ts4.getStartRow()), "The seventh split start key should not be"); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java index addcdc898c8e..885bfb5ab44c 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java @@ -18,20 +18,15 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MapReduceTests.class, MediumTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(MediumTests.TAG) public class TestTableInputFormatScanEmptyToAPP extends TestTableInputFormatScanBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToAPP.class); - /** * Tests a MR scan using specific start and stop rows. */ diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java index e395b36e2a70..2a0357e5a46d 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java @@ -18,20 +18,15 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MapReduceTests.class, MediumTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(MediumTests.TAG) public class TestTableInputFormatScanEmptyToBBA extends TestTableInputFormatScanBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToBBA.class); - /** * Tests a MR scan using specific start and stop rows. */ diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java index f86578712ae8..9107c0efb6e6 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java @@ -18,20 +18,15 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MapReduceTests.class, MediumTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(MediumTests.TAG) public class TestTableInputFormatScanEmptyToBBB extends TestTableInputFormatScanBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToBBB.class); - /** * Tests a MR scan using specific start and stop rows. */ diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java index ef7b38b21be1..c4e73a945bb3 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java @@ -18,20 +18,15 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MapReduceTests.class, MediumTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(MediumTests.TAG) public class TestTableInputFormatScanEmptyToEmpty extends TestTableInputFormatScanBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToEmpty.class); - /** * Tests a MR scan using specific start and stop rows. */ diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java index f20d8113f780..d88e9c57205a 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java @@ -18,20 +18,15 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MapReduceTests.class, MediumTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(MediumTests.TAG) public class TestTableInputFormatScanEmptyToOPP extends TestTableInputFormatScanBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToOPP.class); - /** * Tests a MR scan using specific start and stop rows. */ diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java index fe3d703a289b..0eb16e3ff236 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java @@ -18,20 +18,15 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestTableInputFormatScanOBBToOPP extends TestTableInputFormatScanBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanOBBToOPP.class); - /** * Tests a MR scan using specific start and stop rows. */ diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java index f6985a3fd773..1c32ce6e2227 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java @@ -18,20 +18,15 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MapReduceTests.class, MediumTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(MediumTests.TAG) public class TestTableInputFormatScanOBBToQPP extends TestTableInputFormatScanBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanOBBToQPP.class); - /** * Tests a MR scan using specific start and stop rows. */ diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java index e57051dfd192..7543cc39537f 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java @@ -18,20 +18,15 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MapReduceTests.class, MediumTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(MediumTests.TAG) public class TestTableInputFormatScanOPPToEmpty extends TestTableInputFormatScanBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanOPPToEmpty.class); - /** * Tests a MR scan using specific start and stop rows. */ diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java index c8b3394e54b4..a01fc83caa79 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java @@ -18,20 +18,15 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MapReduceTests.class, MediumTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(MediumTests.TAG) public class TestTableInputFormatScanYYXToEmpty extends TestTableInputFormatScanBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanYYXToEmpty.class); - /** * Tests a MR scan using specific start and stop rows. */ diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java index 175d10e1f755..c01477cffae3 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java @@ -18,20 +18,15 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MapReduceTests.class, MediumTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(MediumTests.TAG) public class TestTableInputFormatScanYYYToEmpty extends TestTableInputFormatScanBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanYYYToEmpty.class); - /** * Tests a MR scan using specific start and stop rows. */ diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java index 9ce2f0782b2f..68d1ecc8c439 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java @@ -18,20 +18,15 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MapReduceTests.class, MediumTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(MediumTests.TAG) public class TestTableInputFormatScanYZYToEmpty extends TestTableInputFormatScanBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanYZYToEmpty.class); - /** * Tests a MR scan using specific start and stop rows. */ diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java index 99606050667a..523ed5e670d5 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; import java.io.IOException; @@ -27,7 +27,6 @@ import java.util.NavigableMap; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; @@ -44,9 +43,8 @@ import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -56,13 +54,10 @@ * to the table. */ -@Category({ VerySlowMapReduceTests.class, LargeTests.class }) +@Tag(VerySlowMapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestTableMapReduce extends TestTableMapReduceBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableMapReduce.class); - private static final Logger LOG = LoggerFactory.getLogger(TestTableMapReduce.class); @Override @@ -142,27 +137,27 @@ private void verifyJobCountersAreEmitted(Job job) throws IOException { Counters counters = job.getCounters(); Counter counter = counters.findCounter(TableRecordReaderImpl.HBASE_COUNTER_GROUP_NAME, "RPC_CALLS"); - assertNotNull("Unable to find Job counter for HBase scan metrics, RPC_CALLS", counter); - assertTrue("Counter value for RPC_CALLS should be larger than 0", counter.getValue() > 0); + assertNotNull(counter, "Unable to find Job counter for HBase scan metrics, RPC_CALLS"); + assertTrue(counter.getValue() > 0, "Counter value for RPC_CALLS should be larger than 0"); } - @Test(expected = TableNotEnabledException.class) + @Test public void testWritingToDisabledTable() throws IOException { - - try (Admin admin = UTIL.getConnection().getAdmin(); - Table table = UTIL.getConnection().getTable(TABLE_FOR_NEGATIVE_TESTS)) { - admin.disableTable(table.getName()); - runTestOnTable(table); - fail("Should not have reached here, should have thrown an exception"); - } + assertThrows(TableNotEnabledException.class, () -> { + try (Admin admin = UTIL.getConnection().getAdmin(); + Table table = UTIL.getConnection().getTable(TABLE_FOR_NEGATIVE_TESTS)) { + admin.disableTable(table.getName()); + runTestOnTable(table); + } + }); } - @Test(expected = TableNotFoundException.class) + @Test public void testWritingToNonExistentTable() throws IOException { - - try (Table table = UTIL.getConnection().getTable(TableName.valueOf("table-does-not-exist"))) { - runTestOnTable(table); - fail("Should not have reached here, should have thrown an exception"); - } + assertThrows(TableNotFoundException.class, () -> { + try (Table table = UTIL.getConnection().getTable(TableName.valueOf("table-does-not-exist"))) { + runTestOnTable(table); + } + }); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java index 477ea5d7f6dd..7f00914abb31 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.Iterator; @@ -37,9 +37,9 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; /** @@ -67,7 +67,7 @@ public abstract class TestTableMapReduceBase { */ protected abstract void runTestOnTable(Table table) throws IOException; - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { UTIL.startMiniCluster(); Table table = UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, @@ -76,7 +76,7 @@ public static void beforeClass() throws Exception { UTIL.createTable(TABLE_FOR_NEGATIVE_TESTS, new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { UTIL.deleteTable(TABLE_FOR_NEGATIVE_TESTS); UTIL.shutdownMiniCluster(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java index 22688485c971..5cd386c1f9d1 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java @@ -18,16 +18,15 @@ package org.apache.hadoop.hbase.mapreduce; import static org.apache.hadoop.security.UserGroupInformation.loginUserFromKeytab; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.Closeable; import java.io.File; import java.net.URI; import java.util.Collection; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier; @@ -43,21 +42,17 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; /** * Test different variants of initTableMapperJob method */ -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestTableMapReduceUtil { private static final String HTTP_PRINCIPAL = "HTTP/localhost"; - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableMapReduceUtil.class); - /* * initTableSnapshotMapperJob is tested in {@link TestTableSnapshotInputFormat} because the method * depends on an online cluster. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableOutputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableOutputFormat.java index 52c7321617a4..404f945d594a 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableOutputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableOutputFormat.java @@ -20,7 +20,6 @@ import java.io.IOException; import javax.validation.constraints.Null; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Delete; @@ -33,24 +32,20 @@ import org.apache.hadoop.mapreduce.OutputCommitter; import org.apache.hadoop.mapreduce.RecordWriter; import org.apache.hadoop.mapreduce.TaskAttemptContext; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; /** * Simple Tests to check whether the durability of the Mutation is changed or not, for * {@link TableOutputFormat} if {@link TableOutputFormat#WAL_PROPERTY} is set to false. */ -@Category(MediumTests.class) +@Tag(MediumTests.TAG) public class TestTableOutputFormat { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableOutputFormat.class); private static final HBaseTestingUtil util = new HBaseTestingUtil(); private static final TableName TABLE_NAME = TableName.valueOf("TEST_TABLE"); @@ -60,7 +55,7 @@ public class TestTableOutputFormat { private static TaskAttemptContext context; private static TableOutputFormat tableOutputFormat; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { util.startMiniCluster(); util.createTable(TABLE_NAME, columnFamily); @@ -71,12 +66,12 @@ public static void setUp() throws Exception { conf.set(TableOutputFormat.OUTPUT_TABLE, "TEST_TABLE"); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { util.shutdownMiniCluster(); } - @After + @AfterEach public void close() throws IOException, InterruptedException { if (writer != null && context != null) { writer.close(context); @@ -96,14 +91,14 @@ public void testTableOutputFormatWhenWalIsOFFForPut() throws IOException, Interr put.addColumn(columnFamily, Bytes.toBytes("aa"), Bytes.toBytes("value")); // verifying whether durability of mutation is USE_DEFAULT or not, before commiting write. - Assert.assertEquals("Durability of the mutation should be USE_DEFAULT", Durability.USE_DEFAULT, - put.getDurability()); + Assertions.assertEquals(Durability.USE_DEFAULT, put.getDurability(), + "Durability of the mutation should be USE_DEFAULT"); writer.write(null, put); // verifying whether durability of mutation got changed to the SKIP_WAL or not. - Assert.assertEquals("Durability of the mutation should be SKIP_WAL", Durability.SKIP_WAL, - put.getDurability()); + Assertions.assertEquals(Durability.SKIP_WAL, put.getDurability(), + "Durability of the mutation should be SKIP_WAL"); } @Test @@ -120,14 +115,14 @@ public void testTableOutputFormatWhenWalIsOFFForDelete() delete.addColumn(columnFamily, Bytes.toBytes("aa")); // verifying whether durability of mutation is USE_DEFAULT or not, before commiting write. - Assert.assertEquals("Durability of the mutation should be USE_DEFAULT", Durability.USE_DEFAULT, - delete.getDurability()); + Assertions.assertEquals(Durability.USE_DEFAULT, delete.getDurability(), + "Durability of the mutation should be USE_DEFAULT"); writer.write(null, delete); // verifying whether durability of mutation got changed from USE_DEFAULT to the SKIP_WAL or not. - Assert.assertEquals("Durability of the mutation should be SKIP_WAL", Durability.SKIP_WAL, - delete.getDurability()); + Assertions.assertEquals(Durability.SKIP_WAL, delete.getDurability(), + "Durability of the mutation should be SKIP_WAL"); } @Test @@ -135,14 +130,14 @@ public void testOutputCommitterConfiguration() throws IOException, InterruptedEx // 1. Verify it returns the default committer when the property is not set. conf.unset(TableOutputFormat.OUTPUT_COMMITTER_CLASS); tableOutputFormat.setConf(conf); - Assert.assertEquals("Should use default committer", TableOutputCommitter.class, - tableOutputFormat.getOutputCommitter(context).getClass()); + Assertions.assertEquals(TableOutputCommitter.class, + tableOutputFormat.getOutputCommitter(context).getClass(), "Should use default committer"); // 2. Verify it returns the custom committer when the property is set. conf.set(TableOutputFormat.OUTPUT_COMMITTER_CLASS, DummyCommitter.class.getName()); tableOutputFormat.setConf(conf); - Assert.assertEquals("Should use custom committer", DummyCommitter.class, - tableOutputFormat.getOutputCommitter(context).getClass()); + Assertions.assertEquals(DummyCommitter.class, + tableOutputFormat.getOutputCommitter(context).getClass(), "Should use custom committer"); } // Simple dummy committer for testing diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java index 232083ea7e78..7184c020713f 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java @@ -17,13 +17,12 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.util.ArrayList; import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTestConst; @@ -37,20 +36,15 @@ import org.apache.hadoop.hbase.regionserver.StoreScanner; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category(MediumTests.class) +@Tag(MediumTests.TAG) public class TestTableRecordReader { private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableRecordReader.class); - private static TableName TABLE_NAME = TableName.valueOf("TestTableRecordReader"); private static int NUM_ROWS = 5; @@ -70,7 +64,7 @@ public class TestTableRecordReader { private static final int TIMEOUT = 4000; - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); @@ -105,7 +99,7 @@ private static List createPuts(byte[][] rows, byte[][] families, byte[][] q return puts; } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java index c24f8e62c816..12444d4d9210 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java @@ -23,7 +23,9 @@ import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY; import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_ROW_LIMIT_PER_INPUTSPLIT; import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -33,7 +35,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; @@ -60,24 +61,18 @@ import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; -import org.junit.Assert; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -@Category({ VerySlowMapReduceTests.class, LargeTests.class }) +@Tag(VerySlowMapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableSnapshotInputFormat.class); - private static final Logger LOG = LoggerFactory.getLogger(TestTableSnapshotInputFormat.class); private static final byte[] bbb = Bytes.toBytes("bbb"); @@ -85,9 +80,6 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa private static final byte[] bbc = Bytes.toBytes("bbc"); private static final byte[] yya = Bytes.toBytes("yya"); - @Rule - public TestName name = new TestName(); - @Override protected byte[] getStartRow() { return bbb; @@ -104,19 +96,18 @@ public void testGetBestLocations() throws IOException { Configuration conf = UTIL.getConfiguration(); HDFSBlocksDistribution blockDistribution = new HDFSBlocksDistribution(); - Assert.assertEquals(null, - TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); + assertEquals(null, TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); blockDistribution.addHostsAndBlockWeight(new String[] { "h1" }, 1); - Assert.assertEquals(Lists.newArrayList("h1"), + assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); blockDistribution.addHostsAndBlockWeight(new String[] { "h1" }, 1); - Assert.assertEquals(Lists.newArrayList("h1"), + assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 1); - Assert.assertEquals(Lists.newArrayList("h1"), + assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); blockDistribution = new HDFSBlocksDistribution(); @@ -124,21 +115,21 @@ public void testGetBestLocations() throws IOException { blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 7); blockDistribution.addHostsAndBlockWeight(new String[] { "h3" }, 5); blockDistribution.addHostsAndBlockWeight(new String[] { "h4" }, 1); - Assert.assertEquals(Lists.newArrayList("h1"), + assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 2); - Assert.assertEquals(Lists.newArrayList("h1", "h2"), + assertEquals(Lists.newArrayList("h1", "h2"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 3); - Assert.assertEquals(Lists.newArrayList("h2", "h1"), + assertEquals(Lists.newArrayList("h2", "h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); blockDistribution.addHostsAndBlockWeight(new String[] { "h3" }, 6); blockDistribution.addHostsAndBlockWeight(new String[] { "h4" }, 9); - Assert.assertEquals(Lists.newArrayList("h2", "h3", "h4"), + assertEquals(Lists.newArrayList("h2", "h3", "h4"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); } @@ -174,8 +165,8 @@ protected void cleanup(Context context) throws IOException, InterruptedException } @Test - public void testInitTableSnapshotMapperJobConfig() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + public void testInitTableSnapshotMapperJobConfig(TestInfo testInfo) throws Exception { + final TableName tableName = TableName.valueOf(testInfo.getTestMethod().get().getName()); String snapshotName = "foo"; try { @@ -189,11 +180,11 @@ public void testInitTableSnapshotMapperJobConfig() throws Exception { // TODO: would be better to examine directly the cache instance that results from this // config. Currently this is not possible because BlockCache initialization is static. - Assert.assertEquals("Snapshot job should be configured for default LruBlockCache.", - HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT, - job.getConfiguration().getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, -1), 0.01); - Assert.assertEquals("Snapshot job should not use BucketCache.", 0, - job.getConfiguration().getFloat("hbase.bucketcache.size", -1), 0.01); + assertEquals(HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT, + job.getConfiguration().getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, -1), 0.01, + "Snapshot job should be configured for default LruBlockCache."); + assertEquals(0, job.getConfiguration().getFloat("hbase.bucketcache.size", -1), 0.01, + "Snapshot job should not use BucketCache."); } finally { UTIL.getAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); @@ -201,11 +192,13 @@ public void testInitTableSnapshotMapperJobConfig() throws Exception { } @Test - public void testWithMockedMapReduceSingleRegionByRegionLocation() throws Exception { + public void testWithMockedMapReduceSingleRegionByRegionLocation(TestInfo testInfo) + throws Exception { Configuration conf = UTIL.getConfiguration(); conf.setBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION, true); try { - testWithMockedMapReduce(UTIL, name.getMethodName() + "Snapshot", 1, 1, 1, true); + testWithMockedMapReduce(UTIL, testInfo.getTestMethod().get().getName() + "Snapshot", 1, 1, 1, + true); } finally { conf.unset(SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION); } @@ -223,7 +216,7 @@ public void testRestoreSnapshotDoesNotCreateBackRefLinksInit(TableName tableName @Override public void testWithMockedMapReduce(HBaseTestingUtil util, String snapshotName, int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(snapshotName + "_table"); try { createTableAndSnapshot(util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); @@ -254,9 +247,9 @@ public void testWithMockedMapReduce(HBaseTestingUtil util, String snapshotName, } @Test - public void testWithMockedMapReduceWithSplitsPerRegion() throws Exception { + public void testWithMockedMapReduceWithSplitsPerRegion(TestInfo testInfo) throws Exception { String snapshotName = "testWithMockedMapReduceMultiRegion"; - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(testInfo.getTestMethod().get().getName()); try { createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 10); @@ -279,9 +272,9 @@ public void testWithMockedMapReduceWithSplitsPerRegion() throws Exception { } @Test - public void testWithMockedMapReduceWithNoStartRowStopRow() throws Exception { + public void testWithMockedMapReduceWithNoStartRowStopRow(TestInfo testInfo) throws Exception { String snapshotName = "testWithMockedMapReduceMultiRegion"; - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(testInfo.getTestMethod().get().getName()); try { createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 10); @@ -306,8 +299,8 @@ public void testWithMockedMapReduceWithNoStartRowStopRow() throws Exception { } @Test - public void testScanLimit() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + public void testScanLimit(TestInfo testInfo) throws Exception { + final TableName tableName = TableName.valueOf(testInfo.getTestMethod().get().getName()); final String snapshotName = tableName + "Snapshot"; Table table = null; try { @@ -340,8 +333,8 @@ public void testScanLimit() throws Exception { TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, RowCounter.RowCounterMapper.class, NullWritable.class, NullWritable.class, job, true, tmpTableDir); - Assert.assertTrue(job.waitForCompletion(true)); - Assert.assertEquals(10 * regionNum, + assertTrue(job.waitForCompletion(true)); + assertEquals(10 * regionNum, job.getCounters().findCounter(RowCounter.RowCounterMapper.Counters.ROWS).getValue()); } finally { if (table != null) { @@ -438,7 +431,7 @@ private void verifyWithMockedMapReduce(Job job, int numRegions, int expectedNumS TableSnapshotInputFormat tsif = new TableSnapshotInputFormat(); List splits = tsif.getSplits(job); - Assert.assertEquals(expectedNumSplits, splits.size()); + assertEquals(expectedNumSplits, splits.size()); HBaseTestingUtil.SeenRowTracker rowTracker = new HBaseTestingUtil.SeenRowTracker(startRow, stopRow.length > 0 ? stopRow : Bytes.toBytes("\uffff")); @@ -452,38 +445,35 @@ private void verifyWithMockedMapReduce(Job job, int numRegions, int expectedNumS for (int i = 0; i < splits.size(); i++) { // validate input split InputSplit split = splits.get(i); - Assert.assertTrue(split instanceof TableSnapshotRegionSplit); + assertTrue(split instanceof TableSnapshotRegionSplit); TableSnapshotRegionSplit snapshotRegionSplit = (TableSnapshotRegionSplit) split; if (localityEnabled) { - Assert.assertTrue(split.getLocations() != null && split.getLocations().length != 0); + assertTrue(split.getLocations() != null && split.getLocations().length != 0); if (byRegionLoc) { // When it uses region location from meta, the hostname will be "localhost", // the location from hdfs block location is "127.0.0.1". - Assert.assertEquals(1, split.getLocations().length); - Assert.assertTrue("Not using region location!", - split.getLocations()[0].equals("localhost")); + assertEquals(1, split.getLocations().length); + assertTrue(split.getLocations()[0].equals("localhost"), "Not using region location!"); } else { - Assert.assertTrue("Not using region location!", - split.getLocations()[0].equals("127.0.0.1")); + assertTrue(split.getLocations()[0].equals("127.0.0.1"), "Not using region location!"); } } else { - Assert.assertTrue(split.getLocations() != null && split.getLocations().length == 0); + assertTrue(split.getLocations() != null && split.getLocations().length == 0); } Scan scan = TableMapReduceUtil.convertStringToScan(snapshotRegionSplit.getDelegate().getScan()); if (startRow.length > 0) { - Assert.assertTrue( - Bytes.toStringBinary(startRow) + " should <= " + Bytes.toStringBinary(scan.getStartRow()), - Bytes.compareTo(startRow, scan.getStartRow()) <= 0); + assertTrue(Bytes.compareTo(startRow, scan.getStartRow()) <= 0, + Bytes.toStringBinary(startRow) + " should <= " + + Bytes.toStringBinary(scan.getStartRow())); } if (stopRow.length > 0) { - Assert.assertTrue( - Bytes.toStringBinary(stopRow) + " should >= " + Bytes.toStringBinary(scan.getStopRow()), - Bytes.compareTo(stopRow, scan.getStopRow()) >= 0); + assertTrue(Bytes.compareTo(stopRow, scan.getStopRow()) >= 0, + Bytes.toStringBinary(stopRow) + " should >= " + Bytes.toStringBinary(scan.getStopRow())); } - Assert.assertTrue("startRow should < stopRow", - Bytes.compareTo(scan.getStartRow(), scan.getStopRow()) < 0); + assertTrue(Bytes.compareTo(scan.getStartRow(), scan.getStopRow()) < 0, + "startRow should < stopRow"); // validate record reader TaskAttemptContext taskAttemptContext = mock(TaskAttemptContext.class); @@ -552,7 +542,7 @@ public static void doTestWithMapReduce(HBaseTestingUtil util, TableName tableNam job.setNumReduceTasks(1); job.setOutputFormatClass(NullOutputFormat.class); - Assert.assertTrue(job.waitForCompletion(true)); + assertTrue(job.waitForCompletion(true)); } finally { if (!shutdownCluster) { util.getAdmin().deleteSnapshot(snapshotName); @@ -579,9 +569,9 @@ public void testCleanRestoreDir() throws Exception { FileSystem fs = workingDir.getFileSystem(job.getConfiguration()); Path restorePath = new Path(job.getConfiguration().get("hbase.TableSnapshotInputFormat.restore.dir")); - Assert.assertTrue(fs.exists(restorePath)); + assertTrue(fs.exists(restorePath)); TableSnapshotInputFormat.cleanRestoreDir(job, snapshotName); - Assert.assertFalse(fs.exists(restorePath)); + assertFalse(fs.exists(restorePath)); } /** @@ -594,8 +584,8 @@ public void testCleanRestoreDir() throws Exception { * 4. Delete restored temporary directory 5. Configure a new job and verify that it fails */ @Test - public void testReadFromRestoredSnapshotViaMR() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + public void testReadFromRestoredSnapshotViaMR(TestInfo testInfo) throws Exception { + final TableName tableName = TableName.valueOf(testInfo.getTestMethod().get().getName()); final String snapshotName = tableName + "_snapshot"; try { if (UTIL.getAdmin().tableExists(tableName)) { @@ -618,7 +608,7 @@ public void testReadFromRestoredSnapshotViaMR() throws Exception { Path tempRestoreDir = UTIL.getDataTestDirOnTestFS("restore_" + snapshotName); RestoreSnapshotHelper.copySnapshotForScanner(UTIL.getConfiguration(), fs, rootDir, tempRestoreDir, snapshotName); - Assert.assertTrue("Restore directory should exist", fs.exists(tempRestoreDir)); + assertTrue(fs.exists(tempRestoreDir), "Restore directory should exist"); Job job = Job.getInstance(UTIL.getConfiguration()); job.setJarByClass(TestTableSnapshotInputFormat.class); @@ -636,13 +626,12 @@ public void testReadFromRestoredSnapshotViaMR() throws Exception { scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, false, TableSnapshotInputFormat.class); TableMapReduceUtil.resetCacheConfig(conf); - Assert.assertTrue(job.waitForCompletion(true)); - Assert.assertTrue(job.isSuccessful()); + assertTrue(job.waitForCompletion(true)); + assertTrue(job.isSuccessful()); // Now verify that job fails when restore directory is deleted - Assert.assertTrue(fs.delete(tempRestoreDir, true)); - Assert.assertFalse("Restore directory should not exist after deletion", - fs.exists(tempRestoreDir)); + assertTrue(fs.delete(tempRestoreDir, true)); + assertFalse(fs.exists(tempRestoreDir), "Restore directory should not exist after deletion"); Job failureJob = Job.getInstance(UTIL.getConfiguration()); failureJob.setJarByClass(TestTableSnapshotInputFormat.class); TableMapReduceUtil.addDependencyJarsForClasses(failureJob.getConfiguration(), @@ -661,12 +650,12 @@ public void testReadFromRestoredSnapshotViaMR() throws Exception { TableSnapshotInputFormat.class); TableMapReduceUtil.resetCacheConfig(failureConf); - Assert.assertFalse("Restore directory should not exist before job execution", - fs.exists(tempRestoreDir)); + assertFalse(fs.exists(tempRestoreDir), + "Restore directory should not exist before job execution"); failureJob.waitForCompletion(true); - Assert.assertFalse("Job should fail since the restored snapshot directory is deleted", - failureJob.isSuccessful()); + assertFalse(failureJob.isSuccessful(), + "Job should fail since the restored snapshot directory is deleted"); } finally { try { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java index e61cb6c6de7b..7a259649798b 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java @@ -17,38 +17,29 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.HashSet; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.ReflectionUtils; -import org.junit.Assert; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; -@Category({ MapReduceTests.class, SmallTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(SmallTests.TAG) public class TestTableSplit { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableSplit.class); - - @Rule - public TestName name = new TestName(); @Test - public void testHashCode() { - TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()), + public void testHashCode(TestInfo testInfo) { + TableSplit split1 = new TableSplit(TableName.valueOf(testInfo.getTestMethod().get().getName()), Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location"); - TableSplit split2 = new TableSplit(TableName.valueOf(name.getMethodName()), + TableSplit split2 = new TableSplit(TableName.valueOf(testInfo.getTestMethod().get().getName()), Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location"); assertEquals(split1, split2); assertTrue(split1.hashCode() == split2.hashCode()); @@ -62,10 +53,10 @@ public void testHashCode() { * length of region should not influence hashcode */ @Test - public void testHashCode_length() { - TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()), + public void testHashCode_length(TestInfo testInfo) { + TableSplit split1 = new TableSplit(TableName.valueOf(testInfo.getTestMethod().get().getName()), Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location", 1984); - TableSplit split2 = new TableSplit(TableName.valueOf(name.getMethodName()), + TableSplit split2 = new TableSplit(TableName.valueOf(testInfo.getTestMethod().get().getName()), Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location", 1982); assertEquals(split1, split2); @@ -80,40 +71,41 @@ public void testHashCode_length() { * Length of region need to be properly serialized. */ @Test - public void testLengthIsSerialized() throws Exception { - TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()), + public void testLengthIsSerialized(TestInfo testInfo) throws Exception { + TableSplit split1 = new TableSplit(TableName.valueOf(testInfo.getTestMethod().get().getName()), Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location", 666); - TableSplit deserialized = new TableSplit(TableName.valueOf(name.getMethodName()), - Bytes.toBytes("row-start2"), Bytes.toBytes("row-end2"), "location1"); + TableSplit deserialized = + new TableSplit(TableName.valueOf(testInfo.getTestMethod().get().getName()), + Bytes.toBytes("row-start2"), Bytes.toBytes("row-end2"), "location1"); ReflectionUtils.copy(new Configuration(), split1, deserialized); - Assert.assertEquals(666, deserialized.getLength()); + assertEquals(666, deserialized.getLength()); } @Test - public void testToString() { - TableSplit split = new TableSplit(TableName.valueOf(name.getMethodName()), + public void testToString(TestInfo testInfo) { + TableSplit split = new TableSplit(TableName.valueOf(testInfo.getTestMethod().get().getName()), Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location"); - String str = "Split(tablename=" + name.getMethodName() + ", startrow=row-start, " - + "endrow=row-end, regionLocation=location, " + "regionname=)"; - Assert.assertEquals(str, split.toString()); + String str = "Split(tablename=" + testInfo.getTestMethod().get().getName() + + ", startrow=row-start, " + "endrow=row-end, regionLocation=location, " + "regionname=)"; + assertEquals(str, split.toString()); - split = - new TableSplit(TableName.valueOf(name.getMethodName()), null, Bytes.toBytes("row-start"), - Bytes.toBytes("row-end"), "location", "encoded-region-name", 1000L); - str = "Split(tablename=" + name.getMethodName() + ", startrow=row-start, " + split = new TableSplit(TableName.valueOf(testInfo.getTestMethod().get().getName()), null, + Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location", "encoded-region-name", + 1000L); + str = "Split(tablename=" + testInfo.getTestMethod().get().getName() + ", startrow=row-start, " + "endrow=row-end, regionLocation=location, " + "regionname=encoded-region-name)"; - Assert.assertEquals(str, split.toString()); + assertEquals(str, split.toString()); split = new TableSplit(null, null, null, null); str = "Split(tablename=null, startrow=null, " + "endrow=null, regionLocation=null, " + "regionname=)"; - Assert.assertEquals(str, split.toString()); + assertEquals(str, split.toString()); split = new TableSplit(null, null, null, null, null, null, 1000L); str = "Split(tablename=null, startrow=null, " + "endrow=null, regionLocation=null, " + "regionname=null)"; - Assert.assertEquals(str, split.toString()); + assertEquals(str, split.toString()); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java index 596932edf24f..e66e8d0b6f55 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java @@ -29,7 +29,6 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -52,22 +51,18 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestTimeRangeMapRed { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTimeRangeMapRed.class); - private final static Logger log = LoggerFactory.getLogger(TestTimeRangeMapRed.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private Admin admin; @@ -90,17 +85,17 @@ public class TestTimeRangeMapRed { static final byte[] FAMILY_NAME = Bytes.toBytes("text"); static final byte[] COLUMN_NAME = Bytes.toBytes("input"); - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { UTIL.startMiniCluster(); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { UTIL.shutdownMiniCluster(); } - @Before + @BeforeEach public void before() throws Exception { this.admin = UTIL.getAdmin(); } @@ -199,7 +194,7 @@ private void verify(final Table table) throws IOException { log.debug(Bytes.toString(r.getRow()) + "\t" + Bytes.toString(CellUtil.cloneFamily(kv)) + "\t" + Bytes.toString(CellUtil.cloneQualifier(kv)) + "\t" + kv.getTimestamp() + "\t" + Bytes.toBoolean(CellUtil.cloneValue(kv))); - org.junit.Assert.assertEquals(TIMESTAMP.get(kv.getTimestamp()), + org.junit.jupiter.api.Assertions.assertEquals(TIMESTAMP.get(kv.getTimestamp()), Bytes.toBoolean(CellUtil.cloneValue(kv))); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java index 930c8d11375f..92138e7dfe72 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.ArrayList; import java.util.List; @@ -25,7 +25,6 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.regionserver.HRegionServer; @@ -38,21 +37,17 @@ import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; -@Category({ MapReduceTests.class, MediumTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(MediumTests.TAG) public class TestWALInputFormat { private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALInputFormat.class); - - @BeforeClass + @BeforeAll public static void setupClass() throws Exception { TEST_UTIL.startMiniCluster(); TEST_UTIL.createWALRootDir(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java index 220e9a3793cd..ce59ef1424bd 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java @@ -21,9 +21,9 @@ import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -39,7 +39,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -68,24 +67,20 @@ import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Mapper.Context; import org.apache.hadoop.util.ToolRunner; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; /** * Basic test for the WALPlayer M/R tool */ -@Category({ MapReduceTests.class, LargeTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(LargeTests.TAG) public class TestWALPlayer { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALPlayer.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static SingleProcessHBaseCluster cluster; @@ -95,10 +90,7 @@ public class TestWALPlayer { private static FileSystem logFs; private static Configuration conf; - @Rule - public TestName name = new TestName(); - - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { conf = TEST_UTIL.getConfiguration(); rootDir = TEST_UTIL.createRootDir(); @@ -108,7 +100,7 @@ public static void beforeClass() throws Exception { cluster = TEST_UTIL.startMiniCluster(); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); fs.delete(rootDir, true); @@ -148,8 +140,8 @@ public void testPlayingRecoveredEdit() throws Exception { * the resulting bulkloaded HFiles. See HBASE-27649 */ @Test - public void testWALPlayerBulkLoadWithOverriddenTimestamps() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName() + "1"); + public void testWALPlayerBulkLoadWithOverriddenTimestamps(TestInfo testInfo) throws Exception { + final TableName tableName = TableName.valueOf(testInfo.getTestMethod().get().getName() + "1"); final byte[] family = Bytes.toBytes("family"); final byte[] column1 = Bytes.toBytes("c1"); final byte[] column2 = Bytes.toBytes("c2"); @@ -187,7 +179,7 @@ public void testWALPlayerBulkLoadWithOverriddenTimestamps() throws Exception { HConstants.HREGION_LOGDIR_NAME).toString(); Configuration configuration = new Configuration(TEST_UTIL.getConfiguration()); - String outPath = "/tmp/" + name.getMethodName(); + String outPath = "/tmp/" + testInfo.getTestMethod().get().getName(); configuration.set(WALPlayer.BULK_OUTPUT_CONF_KEY, outPath); configuration.setBoolean(WALPlayer.MULTI_TABLES_SUPPORT, true); @@ -229,9 +221,9 @@ public void testWALPlayerBulkLoadWithOverriddenTimestamps() throws Exception { * Simple end-to-end test */ @Test - public void testWALPlayer() throws Exception { - final TableName tableName1 = TableName.valueOf(name.getMethodName() + "1"); - final TableName tableName2 = TableName.valueOf(name.getMethodName() + "2"); + public void testWALPlayer(TestInfo testInfo) throws Exception { + final TableName tableName1 = TableName.valueOf(testInfo.getTestMethod().get().getName() + "1"); + final TableName tableName2 = TableName.valueOf(testInfo.getTestMethod().get().getName() + "2"); final byte[] FAMILY = Bytes.toBytes("family"); final byte[] COLUMN1 = Bytes.toBytes("c1"); final byte[] COLUMN2 = Bytes.toBytes("c2"); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java index 3a457ee4d9c1..f72348ac3fe9 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java @@ -17,10 +17,11 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.List; @@ -30,7 +31,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -56,25 +56,21 @@ import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.MapReduceTestUtil; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * JUnit tests for the WALRecordReader */ -@Category({ MapReduceTests.class, MediumTests.class }) +@Tag(MapReduceTests.TAG) +@Tag(MediumTests.TAG) public class TestWALRecordReader { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALRecordReader.class); - private static final Logger LOG = LoggerFactory.getLogger(TestWALRecordReader.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static Configuration conf; @@ -102,14 +98,14 @@ private static String getServerName() { return serverName.toString(); } - @Before + @BeforeEach public void setUp() throws Exception { fs.delete(hbaseDir, true); walFs.delete(walRootDir, true); mvcc = new MultiVersionConcurrencyControl(); } - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { // Make block sizes small. conf = TEST_UTIL.getConfiguration(); @@ -126,7 +122,7 @@ public static void setUpBeforeClass() throws Exception { logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { fs.delete(hbaseDir, true); walFs.delete(walRootDir, true); @@ -316,10 +312,8 @@ private void testSplit(InputSplit split, byte[]... columns) throws Exception { !Bytes.equals(column, 0, column.length, cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) ) { - assertTrue( - "expected [" + Bytes.toString(column) + "], actual [" + Bytes.toString( - cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) + "]", - false); + fail("expected [" + Bytes.toString(column) + "], actual [" + Bytes.toString( + cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) + "]"); } } assertFalse(reader.nextKeyValue()); @@ -340,10 +334,8 @@ private void testSplitWithMovingWAL(InputSplit split, byte[] col1, byte[] col2) !Bytes.equals(col1, 0, col1.length, cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) ) { - assertTrue( - "expected [" + Bytes.toString(col1) + "], actual [" + Bytes.toString( - cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) + "]", - false); + fail("expected [" + Bytes.toString(col1) + "], actual [" + Bytes.toString( + cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) + "]"); } // Move log file to archive directory // While WAL record reader is open @@ -364,10 +356,8 @@ private void testSplitWithMovingWAL(InputSplit split, byte[] col1, byte[] col2) !Bytes.equals(col2, 0, col2.length, cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) ) { - assertTrue( - "expected [" + Bytes.toString(col2) + "], actual [" + Bytes.toString( - cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) + "]", - false); + fail("expected [" + Bytes.toString(col2) + "], actual [" + Bytes.toString( + cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) + "]"); } reader.close(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java index a14febd21e6d..91efdca88989 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java @@ -17,13 +17,12 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -33,19 +32,15 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.util.ToolRunner; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ MediumTests.class, RegionServerTests.class }) +@Tag(MediumTests.TAG) +@Tag(RegionServerTests.TAG) public class TestCompactionTool { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCompactionTool.class); - private final HBaseTestingUtil testUtil = new HBaseTestingUtil(); private HRegion region; @@ -53,7 +48,7 @@ public class TestCompactionTool { private Path rootDir; private final TableName tableName = TableName.valueOf(getClass().getSimpleName()); - @Before + @BeforeEach public void setUp() throws Exception { this.testUtil.startMiniCluster(); testUtil.createTable(tableName, HBaseTestingUtil.fam1); @@ -61,7 +56,7 @@ public void setUp() throws Exception { this.region = testUtil.getMiniHBaseCluster().getRegions(tableName).get(0); } - @After + @AfterEach public void tearDown() throws Exception { this.testUtil.shutdownMiniCluster(); testUtil.cleanupTestDir(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionToolNpeFix.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionToolNpeFix.java index b230fd6c4d93..21142f91f96c 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionToolNpeFix.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionToolNpeFix.java @@ -17,14 +17,13 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -34,21 +33,17 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.util.ToolRunner; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category({ MediumTests.class, RegionServerTests.class }) +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +@Tag(MediumTests.TAG) +@Tag(RegionServerTests.TAG) public class TestCompactionToolNpeFix { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCompactionToolNpeFix.class); - private static final HBaseTestingUtil TESTUTIL = new HBaseTestingUtil(); private HRegion region; @@ -56,7 +51,7 @@ public class TestCompactionToolNpeFix { private static Path rootDir; private final TableName tableName = TableName.valueOf(getClass().getSimpleName()); - @BeforeClass + @BeforeAll public static void setUpAfterClass() throws Exception { TESTUTIL.getConfiguration().setBoolean(MemStoreLAB.USEMSLAB_KEY, false); TESTUTIL.startMiniCluster(); @@ -65,7 +60,7 @@ public static void setUpAfterClass() throws Exception { } - @AfterClass + @AfterAll public static void tearDown() throws Exception { TESTUTIL.shutdownMiniMapReduceCluster(); TESTUTIL.shutdownMiniCluster(); @@ -73,13 +68,13 @@ public static void tearDown() throws Exception { } - @Before + @BeforeEach public void setUp() throws IOException { TESTUTIL.createTable(tableName, HBaseTestingUtil.fam1); this.region = TESTUTIL.getMiniHBaseCluster().getRegions(tableName).get(0); } - @After + @AfterEach public void after() throws IOException { TESTUTIL.deleteTable(tableName); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java index db7cead8c5db..fcef6ebaa477 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java @@ -17,16 +17,15 @@ */ package org.apache.hadoop.hbase.replication; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -48,14 +47,11 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -65,22 +61,16 @@ * We moved some of {@link TestVerifyReplicationZkClusterKey}'s tests here because it could take too * long to complete. In here we have miscellaneous. */ -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestVerifyReplicationAdjunct extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVerifyReplicationAdjunct.class); - private static final Logger LOG = LoggerFactory.getLogger(TestVerifyReplicationAdjunct.class); private static final String PEER_ID = "2"; private static final TableName peerTableName = TableName.valueOf("peerTest"); private static Table htable3; - @Rule - public TestName name = new TestName(); - @Override protected String getClusterKey(HBaseTestingUtil util) throws Exception { // TODO: VerifyReplication does not support connection uri yet, so here we need to use cluster @@ -89,13 +79,13 @@ protected String getClusterKey(HBaseTestingUtil util) throws Exception { return util.getClusterKey(); } - @Before + @BeforeEach public void setUp() throws Exception { cleanUp(); UTIL2.deleteTableData(peerTableName); } - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TestReplicationBase.setUpBeforeClass(); TableDescriptor peerTable = @@ -261,31 +251,31 @@ public void testVerifyReplicationPrefixFiltering() throws Exception { public void testVerifyReplicationSnapshotArguments() { String[] args = new String[] { "--sourceSnapshotName=snapshot1", "2", tableName.getNameAsString() }; - assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args)); + assertFalse(new VerifyReplication().doCommandLine(args), Lists.newArrayList(args).toString()); args = new String[] { "--sourceSnapshotTmpDir=tmp", "2", tableName.getNameAsString() }; - assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args)); + assertFalse(new VerifyReplication().doCommandLine(args), Lists.newArrayList(args).toString()); args = new String[] { "--sourceSnapshotName=snapshot1", "--sourceSnapshotTmpDir=tmp", "2", tableName.getNameAsString() }; - assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args)); + assertTrue(new VerifyReplication().doCommandLine(args), Lists.newArrayList(args).toString()); args = new String[] { "--peerSnapshotName=snapshot1", "2", tableName.getNameAsString() }; - assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args)); + assertFalse(new VerifyReplication().doCommandLine(args), Lists.newArrayList(args).toString()); args = new String[] { "--peerSnapshotTmpDir=/tmp/", "2", tableName.getNameAsString() }; - assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args)); + assertFalse(new VerifyReplication().doCommandLine(args), Lists.newArrayList(args).toString()); args = new String[] { "--peerSnapshotName=snapshot1", "--peerSnapshotTmpDir=/tmp/", "--peerFSAddress=tempfs", "--peerHBaseRootAddress=hdfs://tempfs:50070/hbase/", "2", tableName.getNameAsString() }; - assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args)); + assertTrue(new VerifyReplication().doCommandLine(args), Lists.newArrayList(args).toString()); args = new String[] { "--sourceSnapshotName=snapshot1", "--sourceSnapshotTmpDir=/tmp/", "--peerSnapshotName=snapshot2", "--peerSnapshotTmpDir=/tmp/", "--peerFSAddress=tempfs", "--peerHBaseRootAddress=hdfs://tempfs:50070/hbase/", "2", tableName.getNameAsString() }; - assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args)); + assertTrue(new VerifyReplication().doCommandLine(args), Lists.newArrayList(args).toString()); } @Test @@ -352,7 +342,7 @@ public void testVerifyReplicationWithSnapshotSupport() throws Exception { TestVerifyReplicationZkClusterKey.checkRestoreTmpDir(CONF2, temPath2, 2); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { htable3.close(); TestReplicationBase.tearDownAfterClass(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java index 9edc6245295c..b1ee77681552 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java @@ -17,14 +17,13 @@ */ package org.apache.hadoop.hbase.replication; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -47,23 +46,20 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.mapreduce.Job; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestVerifyReplicationCrossDiffHdfs { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVerifyReplicationCrossDiffHdfs.class); private static final Logger LOG = LoggerFactory.getLogger(TestVerifyReplicationCrossDiffHdfs.class); @@ -80,7 +76,7 @@ public class TestVerifyReplicationCrossDiffHdfs { private static final String PEER_ID = "1"; private static final TableName TABLE_NAME = TableName.valueOf("testVerifyRepCrossDiffHDFS"); - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); util1 = new HBaseTestingUtil(conf1); @@ -144,11 +140,11 @@ private static void loadSomeData() throws IOException, InterruptedException { } } } - Assert.assertNotNull(results); - Assert.assertEquals(10, results.length); + Assertions.assertNotNull(results); + Assertions.assertEquals(10, results.length); } - @AfterClass + @AfterAll public static void tearDownClass() throws Exception { if (mapReduceUtil != null) { mapReduceUtil.shutdownMiniCluster(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRecompareRunnable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRecompareRunnable.java index 49c52fbcc3b3..ca75cfd01020 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRecompareRunnable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRecompareRunnable.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hbase.replication; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.lenient; import static org.mockito.Mockito.when; import java.io.IOException; import java.util.concurrent.ThreadLocalRandom; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Result; @@ -37,22 +37,18 @@ import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.counters.GenericCounter; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.junit.jupiter.MockitoExtension; -@Category({ ReplicationTests.class, SmallTests.class }) -@RunWith(MockitoJUnitRunner.class) +@Tag(ReplicationTests.TAG) +@Tag(SmallTests.TAG) +@ExtendWith(MockitoExtension.class) public class TestVerifyReplicationRecompareRunnable { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVerifyReplicationRecompareRunnable.class); - @Mock private Table sourceTable; @@ -77,12 +73,12 @@ static byte[] genBytes() { return Bytes.toBytes(ThreadLocalRandom.current().nextInt()); } - @Before + @BeforeEach public void setUp() { for (VerifyReplication.Verifier.Counters counter : VerifyReplication.Verifier.Counters .values()) { Counter emptyCounter = new GenericCounter(counter.name(), counter.name()); - when(context.getCounter(counter)).thenReturn(emptyCounter); + lenient().when(context.getCounter(counter)).thenReturn(emptyCounter); } } @@ -136,7 +132,7 @@ public void itRecomparesBadRow() throws IOException { @Test public void itHandlesExceptionOnRecompare() throws IOException { when(sourceTable.get(any(Get.class))).thenThrow(new IOException("Error!")); - when(replicatedTable.get(any(Get.class))).thenReturn(genResult(5)); + lenient().when(replicatedTable.get(any(Get.class))).thenReturn(genResult(5)); VerifyReplicationRecompareRunnable runnable = new VerifyReplicationRecompareRunnable(context, genResult(5), null, VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS, "", diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRpcConnectionUri.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRpcConnectionUri.java index 3e603ec41ac8..6df8a20cad01 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRpcConnectionUri.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationRpcConnectionUri.java @@ -17,20 +17,15 @@ */ package org.apache.hadoop.hbase.replication; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestVerifyReplicationRpcConnectionUri extends VerifyReplicationTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVerifyReplicationRpcConnectionUri.class); - @Override protected String getClusterKey(HBaseTestingUtil util) throws Exception { return util.getRpcConnnectionURI(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java index 6c1e77d609e5..a4b77a3293c0 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.replication; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.File; import java.io.IOException; @@ -25,7 +25,6 @@ import java.util.Collection; import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; @@ -47,22 +46,15 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameter; -import org.junit.runners.Parameterized.Parameters; - -@Category({ ReplicationTests.class, LargeTests.class }) -@RunWith(Parameterized.class) +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestVerifyReplicationSecureClusterCredentials { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVerifyReplicationSecureClusterCredentials.class); private static MiniKdc KDC; private static final HBaseTestingUtil UTIL1 = new HBaseTestingUtil(); @@ -105,7 +97,7 @@ private static void setupCluster(HBaseTestingUtil util) throws Exception { /** * Sets the security firstly for getting the correct default realm. */ - @BeforeClass + @BeforeAll public static void beforeClass() throws Exception { setUpKdcServer(); setupCluster(UTIL1); @@ -123,24 +115,21 @@ public static void beforeClass() throws Exception { } } - @AfterClass + @AfterAll public static void cleanup() throws IOException { UTIL1.shutdownMiniCluster(); UTIL2.shutdownMiniCluster(); } - @Parameters public static Collection> peer() { return Arrays.asList(() -> "1", () -> ZKConfig.getZooKeeperClusterKey(UTIL2.getConfiguration())); } - @Parameter - public Supplier peer; - - @Test + @ParameterizedTest + @MethodSource("peer") @SuppressWarnings("unchecked") - public void testJobCredentials() throws Exception { + public void testJobCredentials(Supplier peer) throws Exception { Job job = new VerifyReplication().createSubmittableJob( new Configuration(UTIL1.getConfiguration()), new String[] { peer.get(), "table" }); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationZkClusterKey.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationZkClusterKey.java index 718cba231ff4..6d7307c60475 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationZkClusterKey.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationZkClusterKey.java @@ -17,20 +17,15 @@ */ package org.apache.hadoop.hbase.replication; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestVerifyReplicationZkClusterKey extends VerifyReplicationTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVerifyReplicationZkClusterKey.class); - @Override protected String getClusterKey(HBaseTestingUtil util) throws Exception { return util.getClusterKey(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationZkConnectionUri.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationZkConnectionUri.java index 046d2d06664c..401e36d00b79 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationZkConnectionUri.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationZkConnectionUri.java @@ -17,20 +17,15 @@ */ package org.apache.hadoop.hbase.replication; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestVerifyReplicationZkConnectionUri extends VerifyReplicationTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVerifyReplicationZkConnectionUri.class); - @Override protected String getClusterKey(HBaseTestingUtil util) throws Exception { return util.getZkConnectionURI(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/VerifyReplicationTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/VerifyReplicationTestBase.java index e263076677a5..9286953bb512 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/VerifyReplicationTestBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/VerifyReplicationTestBase.java @@ -17,11 +17,11 @@ */ package org.apache.hadoop.hbase.replication; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.ArrayList; @@ -56,36 +56,30 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.Job; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public abstract class VerifyReplicationTestBase extends TestReplicationBase { - private static final Logger LOG = - LoggerFactory.getLogger(TestVerifyReplicationZkClusterKey.class); + private static final Logger LOG = LoggerFactory.getLogger(VerifyReplicationTestBase.class); private static final String PEER_ID = "2"; private static final TableName peerTableName = TableName.valueOf("peerTest"); private static Table htable3; - @Rule - public TestName name = new TestName(); - - @Before + @BeforeEach public void setUp() throws Exception { cleanUp(); UTIL2.deleteTableData(peerTableName); } - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { - TestReplicationBase.setUpBeforeClass(); TableDescriptor peerTable = TableDescriptorBuilder.newBuilder(peerTableName) @@ -150,10 +144,10 @@ public void testVerifyRepJob() throws Exception { * delete marker is replicated, run verify replication with and without raw to check the results. */ @Test - public void testVerifyRepJobWithRawOptions() throws Exception { - LOG.info(name.getMethodName()); + public void testVerifyRepJobWithRawOptions(TestInfo testInfo) throws Exception { + LOG.info(testInfo.getTestMethod().get().getName()); - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(testInfo.getTestMethod().get().getName()); byte[] familyname = Bytes.toBytes("fam_raw"); byte[] row = Bytes.toBytes("row_raw"); @@ -464,18 +458,16 @@ public void testVerifyReplicationThreadedRecompares() throws Exception { "--recompareSleep=1", "--peerTableName=" + peerTableName.getNameAsString(), getClusterKey(UTIL2), tableName.getNameAsString() }; Counters counters = runVerifyReplication(args, NB_ROWS_IN_BATCH - 1, 3); - assertEquals( - counters.findCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).getValue(), 9); - assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue(), - 9); - assertEquals( - counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_PEER_TABLE_ROWS).getValue(), - 1); - assertEquals( - counters.findCounter(VerifyReplication.Verifier.Counters.CONTENT_DIFFERENT_ROWS).getValue(), - 1); - assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS) - .getValue(), 1); + assertEquals(9, + counters.findCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).getValue()); + assertEquals(9, + counters.findCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue()); + assertEquals(1, + counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_PEER_TABLE_ROWS).getValue()); + assertEquals(1, + counters.findCounter(VerifyReplication.Verifier.Counters.CONTENT_DIFFERENT_ROWS).getValue()); + assertEquals(1, counters + .findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS).getValue()); } @Test @@ -509,18 +501,16 @@ public void testFailsRemainingComparesAfterShutdown() throws Exception { getClusterKey(UTIL2), tableName.getNameAsString() }; Counters counters = runVerifyReplication(args, NB_ROWS_IN_BATCH - 1, 3); - assertEquals( - counters.findCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).getValue(), 3); - assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue(), - 3); - assertEquals( - counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_PEER_TABLE_ROWS).getValue(), - 1); - assertEquals( - counters.findCounter(VerifyReplication.Verifier.Counters.CONTENT_DIFFERENT_ROWS).getValue(), - 1); - assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS) - .getValue(), 1); + assertEquals(3, + counters.findCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).getValue()); + assertEquals(3, + counters.findCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue()); + assertEquals(1, + counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_PEER_TABLE_ROWS).getValue()); + assertEquals(1, + counters.findCounter(VerifyReplication.Verifier.Counters.CONTENT_DIFFERENT_ROWS).getValue()); + assertEquals(1, counters + .findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS).getValue()); } @Test @@ -547,21 +537,19 @@ public void testVerifyReplicationSynchronousRecompares() throws Exception { "--peerTableName=" + peerTableName.getNameAsString(), getClusterKey(UTIL2), tableName.getNameAsString() }; Counters counters = runVerifyReplication(args, NB_ROWS_IN_BATCH - 1, 3); - assertEquals( - counters.findCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).getValue(), 9); - assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue(), - 9); - assertEquals( - counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_PEER_TABLE_ROWS).getValue(), - 1); - assertEquals( - counters.findCounter(VerifyReplication.Verifier.Counters.CONTENT_DIFFERENT_ROWS).getValue(), - 1); - assertEquals(counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS) - .getValue(), 1); + assertEquals(9, + counters.findCounter(VerifyReplication.Verifier.Counters.FAILED_RECOMPARE).getValue()); + assertEquals(9, + counters.findCounter(VerifyReplication.Verifier.Counters.RECOMPARES).getValue()); + assertEquals(1, + counters.findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_PEER_TABLE_ROWS).getValue()); + assertEquals(1, + counters.findCounter(VerifyReplication.Verifier.Counters.CONTENT_DIFFERENT_ROWS).getValue()); + assertEquals(1, counters + .findCounter(VerifyReplication.Verifier.Counters.ONLY_IN_SOURCE_TABLE_ROWS).getValue()); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { htable3.close(); TestReplicationBase.tearDownAfterClass(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java index 21679efeb82f..da4ef689d449 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java @@ -39,6 +39,7 @@ @Tag(MapReduceTests.TAG) @Tag(SmallTests.TAG) public class TestExportSnapshotV2NoCluster { + private static final Logger LOG = LoggerFactory.getLogger(TestExportSnapshotV2NoCluster.class); private HBaseCommonTestingUtil testUtil = new HBaseCommonTestingUtil(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestMigrateReplicationQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestMigrateReplicationQueue.java index 5404cfab05be..450eb67b8178 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestMigrateReplicationQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestMigrateReplicationQueue.java @@ -20,15 +20,14 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.not; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.Collections; import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -49,18 +48,38 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; -@Category({ MasterTests.class, LargeTests.class }) +@Tag(MasterTests.TAG) +@Tag(LargeTests.TAG) public class TestMigrateReplicationQueue extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMigrateReplicationQueue.class); + @BeforeAll + public static void setUpBeforeClass() throws Exception { + TestReplicationBase.setUpBeforeClass(); + } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } private int disableAndInsert() throws Exception { UTIL1.getAdmin().disableReplicationPeer(PEER_ID2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java index 55df08e1ef13..5266ca6d7d1f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java @@ -19,9 +19,9 @@ import static org.apache.hadoop.hbase.HConstants.REPLICATION_CLUSTER_ID; import static org.apache.hadoop.hbase.HConstants.REPLICATION_CONF_DIR; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; import java.io.FileOutputStream; @@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -72,14 +71,12 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TemporaryFolder; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -93,13 +90,10 @@ * of the clusters. This CP counts the amount of times bulk load actually gets invoked, certifying * we are not entering the infinite loop condition addressed by HBASE-22380. */ -@Category({ ReplicationTests.class, MediumTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(MediumTests.TAG) public class TestBulkLoadReplication extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBulkLoadReplication.class); - protected static final Logger LOG = LoggerFactory.getLogger(TestBulkLoadReplication.class); private static final String PEER1_CLUSTER_ID = "peer1"; @@ -119,18 +113,18 @@ public class TestBulkLoadReplication extends TestReplicationBase { private static Table htable3; - @Rule - public TestName name = new TestName(); - - @ClassRule - public static TemporaryFolder testFolder = new TemporaryFolder(); - private static ReplicationQueueStorage queueStorage; private static boolean replicationPeersAdded = false; - @BeforeClass + private static File testFolderRoot; + + @TempDir + static File tempDir; + + @BeforeAll public static void setUpBeforeClass() throws Exception { + testFolderRoot = tempDir; setupBulkLoadConfigsForCluster(CONF1, PEER1_CLUSTER_ID); setupBulkLoadConfigsForCluster(CONF2, PEER2_CLUSTER_ID); setupBulkLoadConfigsForCluster(CONF3, PEER3_CLUSTER_ID); @@ -159,16 +153,20 @@ private static void startThirdCluster() throws Exception { htable3 = connection3.getTable(tableName); } - @Before - @Override - public void setUpBase() throws Exception { + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); // removing the peer and adding again causing the previously completed bulk load jobs getting // submitted again, adding a check to add the peers only once. if (!replicationPeersAdded) { // "super.setUpBase()" already sets replication from 1->2, // then on the subsequent lines, sets 2->1, 2->3 and 3->2. // So we have following topology: "1 <-> 2 <->3" - super.setUpBase(); ReplicationPeerConfig peer1Config = getPeerConfigForCluster(UTIL1); ReplicationPeerConfig peer2Config = getPeerConfigForCluster(UTIL2); ReplicationPeerConfig peer3Config = getPeerConfigForCluster(UTIL3); @@ -214,10 +212,11 @@ protected static void setupBulkLoadConfigsForCluster(Configuration config, String clusterReplicationId) throws Exception { config.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true); config.set(REPLICATION_CLUSTER_ID, clusterReplicationId); - File sourceConfigFolder = testFolder.newFolder(clusterReplicationId); + File sourceConfigFolder = new File(testFolderRoot, clusterReplicationId); + sourceConfigFolder.mkdirs(); File sourceConfigFile = new File(sourceConfigFolder.getAbsolutePath() + "/hbase-site.xml"); config.writeXml(new FileOutputStream(sourceConfigFile)); - config.set(REPLICATION_CONF_DIR, testFolder.getRoot().getAbsolutePath()); + config.set(REPLICATION_CONF_DIR, testFolderRoot.getAbsolutePath()); } @Test @@ -290,7 +289,7 @@ private String createHFileForFamilies(byte[] row, byte[] value, Configuration cl HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(clusterConfig); // TODO We need a way to do this without creating files - File hFileLocation = testFolder.newFile(); + File hFileLocation = new File(testFolderRoot, "hfile_" + System.currentTimeMillis()); FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation), null); try { hFileFactory.withOutputStream(out); @@ -376,7 +375,7 @@ private String createHFileForNoRepFamilies(byte[] row, byte[] value, Configurati HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(clusterConfig); // TODO We need a way to do this without creating files - File hFileLocation = testFolder.newFile(); + File hFileLocation = new File(testFolderRoot, "hfile_norep_" + System.currentTimeMillis()); FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation), null); try { hFileFactory.withOutputStream(out); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplicationHFileRefs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplicationHFileRefs.java index 067b3c45e162..7af72cb5c260 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplicationHFileRefs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplicationHFileRefs.java @@ -19,9 +19,9 @@ import static org.apache.hadoop.hbase.HConstants.REPLICATION_CLUSTER_ID; import static org.apache.hadoop.hbase.HConstants.REPLICATION_CONF_DIR; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; import java.io.FileOutputStream; @@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.NamespaceDescriptor; @@ -60,26 +59,22 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TemporaryFolder; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; -@Category({ ReplicationTests.class, SmallTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(SmallTests.TAG) public class TestBulkLoadReplicationHFileRefs extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBulkLoadReplicationHFileRefs.class); - private static final String PEER1_CLUSTER_ID = "peer1"; private static final String PEER2_CLUSTER_ID = "peer2"; @@ -96,9 +91,6 @@ public class TestBulkLoadReplicationHFileRefs extends TestReplicationBase { private byte[] qualifier = Bytes.toBytes("q1"); private byte[] value = Bytes.toBytes("v1"); - @ClassRule - public static TemporaryFolder testFolder = new TemporaryFolder(); - private static final Path BULK_LOAD_BASE_DIR = new Path("/bulk_dir"); private static Admin admin1; @@ -106,8 +98,14 @@ public class TestBulkLoadReplicationHFileRefs extends TestReplicationBase { private static ReplicationQueueStorage queueStorage; - @BeforeClass + private static File testFolderRoot; + + @TempDir + static File tempDir; + + @BeforeAll public static void setUpBeforeClass() throws Exception { + testFolderRoot = tempDir; setupBulkLoadConfigsForCluster(CONF1, PEER1_CLUSTER_ID); setupBulkLoadConfigsForCluster(CONF2, PEER2_CLUSTER_ID); TestReplicationBase.setUpBeforeClass(); @@ -123,24 +121,31 @@ public static void setUpBeforeClass() throws Exception { admin2.createNamespace(NamespaceDescriptor.create(NO_REPLICATE_NAMESPACE).build()); } + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + protected static void setupBulkLoadConfigsForCluster(Configuration config, String clusterReplicationId) throws Exception { config.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true); config.set(REPLICATION_CLUSTER_ID, clusterReplicationId); - File sourceConfigFolder = testFolder.newFolder(clusterReplicationId); + File sourceConfigFolder = new File(testFolderRoot, clusterReplicationId); + sourceConfigFolder.mkdirs(); File sourceConfigFile = new File(sourceConfigFolder.getAbsolutePath() + "/hbase-site.xml"); config.writeXml(new FileOutputStream(sourceConfigFile)); - config.set(REPLICATION_CONF_DIR, testFolder.getRoot().getAbsolutePath()); + config.set(REPLICATION_CONF_DIR, testFolderRoot.getAbsolutePath()); } - @Before + @BeforeEach public void setUp() throws Exception { + setUpBase(); for (ReplicationPeerDescription peer : admin1.listReplicationPeers()) { admin1.removeReplicationPeer(peer.getPeerId()); } } - @After + @AfterEach public void teardown() throws Exception { for (ReplicationPeerDescription peer : admin1.listReplicationPeers()) { admin1.removeReplicationPeer(peer.getPeerId()); @@ -151,6 +156,7 @@ public void teardown() throws Exception { for (TableName tableName : admin2.listTableNames()) { UTIL2.deleteTable(tableName); } + tearDownBase(); } @Test @@ -164,9 +170,9 @@ public void testWhenExcludeCF() throws Exception { ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getRpcConnnectionURI()) .setReplicateAllUserTables(true).setExcludeTableCFsMap(excludeTableCFs).build(); admin1.addReplicationPeer(PEER_ID2, peerConfig); - Assert.assertTrue(peerConfig.needToReplicate(REPLICATE_TABLE)); - Assert.assertTrue(peerConfig.needToReplicate(REPLICATE_TABLE, CF_A)); - Assert.assertFalse(peerConfig.needToReplicate(REPLICATE_TABLE, CF_B)); + assertTrue(peerConfig.needToReplicate(REPLICATE_TABLE)); + assertTrue(peerConfig.needToReplicate(REPLICATE_TABLE, CF_A)); + assertFalse(peerConfig.needToReplicate(REPLICATE_TABLE, CF_B)); assertEquals(0, queueStorage.getAllHFileRefs().size()); @@ -261,7 +267,7 @@ private String createHFileForFamilies(byte[] family) throws IOException { .setType(Cell.Type.Put); HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(UTIL1.getConfiguration()); - File hFileLocation = testFolder.newFile(); + File hFileLocation = new File(testFolderRoot, "hfile_" + System.currentTimeMillis()); FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation), null); try { hFileFactory.withOutputStream(out); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationDroppedTablesTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationDroppedTablesTestBase.java index c01df9545e40..fc41ef253d23 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationDroppedTablesTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationDroppedTablesTestBase.java @@ -18,7 +18,8 @@ package org.apache.hadoop.hbase.replication; import static org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.REPLICATION_DROP_ON_DELETED_TABLE_KEY; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.concurrent.ThreadLocalRandom; @@ -37,7 +38,6 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.Assert; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -149,7 +149,7 @@ private boolean peerHasAllNormalRows() throws IOException { return false; } for (int i = 0; i < results.length; i++) { - Assert.assertArrayEquals(generateRowKey(i), results[i].getRow()); + assertArrayEquals(generateRowKey(i), results[i].getRow()); } return true; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestBidirectionSerialReplicationStuck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestBidirectionSerialReplicationStuck.java index f069d6b1095b..2b93c47d89cd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestBidirectionSerialReplicationStuck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestBidirectionSerialReplicationStuck.java @@ -17,38 +17,47 @@ */ package org.apache.hadoop.hbase.replication; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestBidirectionSerialReplicationStuck extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBidirectionSerialReplicationStuck.class); + @BeforeAll + public static void setUpBeforeClass() throws Exception { + TestReplicationBase.setUpBeforeClass(); + } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } @Override protected boolean isSerialPeer() { return true; } - @Override - public void setUpBase() throws Exception { + @BeforeEach + public void setUp() throws Exception { UTIL1.ensureSomeRegionServersAvailable(2); hbaseAdmin.balancerSwitch(false, true); addPeer(PEER_ID2, tableName, UTIL1, UTIL2); addPeer(PEER_ID2, tableName, UTIL2, UTIL1); } - @Override - public void tearDownBase() throws Exception { + @AfterEach + public void tearDown() throws Exception { removePeer(PEER_ID2, UTIL1); removePeer(PEER_ID2, UTIL2); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestClaimReplicationQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestClaimReplicationQueue.java index de226b13e8fc..e4af27499711 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestClaimReplicationQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestClaimReplicationQueue.java @@ -21,7 +21,6 @@ import java.util.Collections; import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -35,11 +34,12 @@ import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @@ -49,13 +49,10 @@ * In HBASE-26029, we reimplement the claim queue operation with proc-v2 and make it a step in SCP, * this is a UT to make sure the {@link AssignReplicationQueuesProcedure} works correctly. */ -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestClaimReplicationQueue extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClaimReplicationQueue.class); - private static final TableName tableName3 = TableName.valueOf("test3"); private static final String PEER_ID3 = "3"; @@ -100,7 +97,7 @@ protected ServerManager createServerManager(MasterServices master, RegionServerL } } - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { CONF1.setClass(HConstants.MASTER_IMPL, HMasterForTest.class, HMaster.class); TestReplicationBase.setUpBeforeClass(); @@ -109,25 +106,25 @@ public static void setUpBeforeClass() throws Exception { table4 = connection2.getTable(tableName3); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { Closeables.close(table3, true); Closeables.close(table4, true); TestReplicationBase.tearDownAfterClass(); } - @Override - public void setUpBase() throws Exception { - super.setUpBase(); + @BeforeEach + public void setUp() throws Exception { + setUpBase(); // set up two replication peers and only 1 rs to test claim replication queue with multiple // round addPeer(PEER_ID3, tableName3); } - @Override - public void tearDownBase() throws Exception { - super.tearDownBase(); + @AfterEach + public void tearDown() throws Exception { removePeer(PEER_ID3); + tearDownBase(); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsBehindDroppedTableTiming.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsBehindDroppedTableTiming.java index 76ddbf5b6470..f90f357956ce 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsBehindDroppedTableTiming.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsBehindDroppedTableTiming.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.replication; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -31,23 +30,37 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestEditsBehindDroppedTableTiming extends ReplicationDroppedTablesTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestEditsBehindDroppedTableTiming.class); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { setupClusters(true); } + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } + @Test public void testEditsBehindDroppedTableTiming() throws Exception { TableName tablename = TableName.valueOf("testdroppedtimed"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsDroppedWithDroppedTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsDroppedWithDroppedTable.java index 9548d3a56b55..17c48834832b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsDroppedWithDroppedTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsDroppedWithDroppedTable.java @@ -17,26 +17,39 @@ */ package org.apache.hadoop.hbase.replication; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestEditsDroppedWithDroppedTable extends ReplicationDroppedTablesTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestEditsDroppedWithDroppedTable.class); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { setupClusters(true); } + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } + @Test public void testEditsDroppedWithDroppedTable() throws Exception { // Make sure by default edits for dropped tables are themselves dropped when the diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsDroppedWithDroppedTableNS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsDroppedWithDroppedTableNS.java index aa153471ca13..da5ae3db26fe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsDroppedWithDroppedTableNS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsDroppedWithDroppedTableNS.java @@ -17,23 +17,21 @@ */ package org.apache.hadoop.hbase.replication; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestEditsDroppedWithDroppedTableNS extends ReplicationDroppedTablesTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestEditsDroppedWithDroppedTableNS.class); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { setupClusters(true); // also try with a namespace @@ -41,6 +39,21 @@ public static void setUpBeforeClass() throws Exception { UTIL2.getAdmin().createNamespace(NamespaceDescriptor.create("NS").build()); } + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } + @Test public void testEditsDroppedWithDroppedTableNS() throws Exception { testEditsBehindDroppedTable("NS:test_dropped"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsStuckBehindDroppedTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsStuckBehindDroppedTable.java index 924ba72861bd..f459862cac76 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsStuckBehindDroppedTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsStuckBehindDroppedTable.java @@ -17,26 +17,39 @@ */ package org.apache.hadoop.hbase.replication; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestEditsStuckBehindDroppedTable extends ReplicationDroppedTablesTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestEditsStuckBehindDroppedTable.class); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { setupClusters(false); } + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } + @Test public void testEditsStuckBehindDroppedTable() throws Exception { // Sanity check Make sure by default edits for dropped tables stall the replication queue, even diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java index 8fe01dcd17da..31604aa6ea58 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.replication; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; import java.util.ArrayList; import java.util.HashMap; @@ -27,7 +27,8 @@ import java.util.List; import java.util.Map; import java.util.Set; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import java.util.stream.Stream; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; @@ -44,28 +45,20 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameter; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.params.provider.Arguments; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; -@RunWith(Parameterized.class) -@Category({ LargeTests.class }) +@Tag(LargeTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: serialPeer={0}") public class TestNamespaceReplication extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestNamespaceReplication.class); - private static final Logger LOG = LoggerFactory.getLogger(TestNamespaceReplication.class); private static String ns1 = "ns1"; @@ -84,20 +77,22 @@ public class TestNamespaceReplication extends TestReplicationBase { private static Admin admin1; private static Admin admin2; - @Parameter public boolean serialPeer; + public TestNamespaceReplication(boolean serialPeer) { + this.serialPeer = serialPeer; + } + @Override protected boolean isSerialPeer() { return serialPeer; } - @Parameters(name = "{index}: serialPeer={0}") - public static List parameters() { - return ImmutableList.of(true, false); + public static Stream parameters() { + return ImmutableList.of(true, false).stream().map(Arguments::of); } - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TestReplicationBase.setUpBeforeClass(); @@ -130,7 +125,7 @@ public static void setUpBeforeClass() throws Exception { admin2.createTable(tabB); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { admin1.disableTable(tabAName); admin1.deleteTable(tabAName); @@ -151,7 +146,7 @@ public static void tearDownAfterClass() throws Exception { TestReplicationBase.tearDownAfterClass(); } - @Test + @TestTemplate public void testNamespaceReplication() throws Exception { String peerId = "2"; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestRemovePeerProcedureWaitForSCP.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestRemovePeerProcedureWaitForSCP.java index e93fa3b01e87..8129b7de8d32 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestRemovePeerProcedureWaitForSCP.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestRemovePeerProcedureWaitForSCP.java @@ -17,15 +17,14 @@ */ package org.apache.hadoop.hbase.replication; -import static org.hamcrest.MatcherAssert.*; -import static org.hamcrest.Matchers.*; -import static org.junit.Assert.assertEquals; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.empty; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.util.Collections; import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -40,11 +39,12 @@ import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @@ -56,13 +56,10 @@ *

* See HBASE-27109 for more details. */ -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestRemovePeerProcedureWaitForSCP extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRemovePeerProcedureWaitForSCP.class); - private static final TableName tableName3 = TableName.valueOf("test3"); private static final String PEER_ID3 = "3"; @@ -105,7 +102,7 @@ protected ServerManager createServerManager(MasterServices master, RegionServerL } } - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { CONF1.setClass(HConstants.MASTER_IMPL, HMasterForTest.class, HMaster.class); TestReplicationBase.setUpBeforeClass(); @@ -113,24 +110,24 @@ public static void setUpBeforeClass() throws Exception { table3 = connection1.getTable(tableName3); } - @Override - public void setUpBase() throws Exception { - super.setUpBase(); + @AfterAll + public static void tearDownAfterClass() throws Exception { + Closeables.close(table3, true); + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); // set up two replication peers and only 1 rs to test claim replication queue with multiple // round addPeer(PEER_ID3, tableName3); } - @Override - public void tearDownBase() throws Exception { - super.tearDownBase(); + @AfterEach + public void tearDown() throws Exception { removePeer(PEER_ID3); - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - Closeables.close(table3, true); - TestReplicationBase.tearDownAfterClass(); + tearDownBase(); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java index b03b89d6d69a..36362a691330 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.replication; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.ArrayList; @@ -52,10 +52,6 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -272,7 +268,6 @@ private static void startClusters() throws Exception { htable2 = connection2.getTable(tableName); } - @BeforeClass public static void setUpBeforeClass() throws Exception { configureClusters(UTIL1, UTIL2); startClusters(); @@ -317,7 +312,6 @@ protected final void addPeer(String peerId, TableName tableName, HBaseTestingUti source.getAdmin().addReplicationPeer(peerId, builder.build()); } - @Before public void setUpBase() throws Exception { addPeer(PEER_ID2, tableName); } @@ -332,7 +326,6 @@ protected final void removePeer(String peerId, HBaseTestingUtil util) throws Exc } } - @After public void tearDownBase() throws Exception { removePeer(PEER_ID2); } @@ -399,7 +392,6 @@ protected static void stopAllRegionServers(HBaseTestingUtil util) throws IOExcep } } - @AfterClass public static void tearDownAfterClass() throws Exception { if (htable2 != null) { htable2.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java index 1f591e5d9ebe..e4de925c60e0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hbase.replication; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; -import java.util.List; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import java.util.stream.Stream; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Get; @@ -35,14 +35,13 @@ import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameter; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.params.provider.Arguments; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -51,24 +50,24 @@ /** * Test handling of changes to the number of a peer's regionservers. */ -@RunWith(Parameterized.class) -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: serialPeer={0}, syncPeer={1}") public class TestReplicationChangingPeerRegionservers extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationChangingPeerRegionservers.class); - private static final Logger LOG = LoggerFactory.getLogger(TestReplicationChangingPeerRegionservers.class); @SuppressWarnings("checkstyle:VisibilityModifier") - @Parameter(0) public boolean serialPeer; - @Parameter(1) public boolean syncPeer; + public TestReplicationChangingPeerRegionservers(boolean serialPeer, boolean syncPeer) { + this.serialPeer = serialPeer; + this.syncPeer = syncPeer; + } + @Override protected boolean isSerialPeer() { return serialPeer; @@ -79,14 +78,24 @@ protected boolean isSyncPeer() { return syncPeer; } - @Parameters(name = "{index}: serialPeer={0}, syncPeer={1}") - public static List parameters() { + public static Stream parameters() { return ImmutableList.of(new Object[] { false, false }, new Object[] { false, true }, - new Object[] { true, false }, new Object[] { true, true }); + new Object[] { true, false }, new Object[] { true, true }).stream().map(Arguments::of); + } + + @BeforeAll + public static void setUpBeforeClass() throws Exception { + TestReplicationBase.setUpBeforeClass(); } - @Before + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach public void setUp() throws Exception { + setUpBase(); // Starting and stopping replication can make us miss new logs, // rolling like this makes sure the most recent one gets added to the queue for (JVMClusterUtil.RegionServerThread r : UTIL1.getHBaseCluster().getRegionServerThreads()) { @@ -120,7 +129,12 @@ public void setUp() throws Exception { } } - @Test + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } + + @TestTemplate public void testChangingNumberOfPeerRegionServers() throws IOException, InterruptedException { LOG.info("testSimplePutDelete"); SingleProcessHBaseCluster peerCluster = UTIL2.getMiniHBaseCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java index 1faa25f116f0..6900ce197011 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.hbase.replication; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.fail; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; @@ -28,22 +27,42 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestReplicationDisableInactivePeer extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationDisableInactivePeer.class); - private static final Logger LOG = LoggerFactory.getLogger(TestReplicationDisableInactivePeer.class); + @BeforeAll + public static void setUpBeforeClass() throws Exception { + TestReplicationBase.setUpBeforeClass(); + } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } + @Override protected String getClusterKey(HBaseTestingUtil util) throws Exception { // in this test we will restart the peer cluster, and the master address will be changed, so we diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java index 38fc6599dad0..ff4e82f768d9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java @@ -17,13 +17,14 @@ */ package org.apache.hadoop.hbase.replication; +import static org.junit.jupiter.api.Assertions.assertEquals; + import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.NavigableMap; import java.util.TreeMap; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Waiter; @@ -44,30 +45,44 @@ import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALKeyImpl; -import org.junit.Assert; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestReplicationEmptyWALRecovery extends TestReplicationBase { MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(); static final RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationEmptyWALRecovery.class); + @BeforeAll + public static void setUpBeforeClass() throws Exception { + TestReplicationBase.setUpBeforeClass(); + } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } - @Before - public void setUp() throws IOException, InterruptedException { + @BeforeEach + public void setUp() throws Exception { + setUpBase(); cleanUp(); scopes.put(famName, HConstants.REPLICATION_SCOPE_GLOBAL); replicateCount.set(0); replicatedEntries.clear(); } + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } + /** * Waits until there is only one log(the current writing one) in the replication queue * @param numRs number of region servers @@ -199,12 +214,12 @@ public void testReplicationOfEmptyWALFollowingNonEmptyWAL() throws Exception { // Now we should expect numOfEntriesToReplicate entries // replicated from each region server. This makes sure we didn't loose data // from any previous batch when we encounter EOF exception for empty file. - Assert.assertEquals("Replicated entries are not correct", numOfEntriesToReplicate * numRs, - replicatedEntries.size()); + assertEquals(numOfEntriesToReplicate * numRs, replicatedEntries.size(), + "Replicated entries are not correct"); // We expect just one batch of replication which will // be from when we handle the EOF exception. - Assert.assertEquals("Replicated batches are not correct", 1, replicateCount.intValue()); + assertEquals(1, replicateCount.intValue(), "Replicated batches are not correct"); verifyNumberOfLogsInQueue(1, numRs); // we're now writing to the new wal // if everything works, the source should've stopped reading from the empty wal, and start @@ -254,12 +269,12 @@ public void testReplicationOfEmptyWALFollowedByNonEmptyWAL() throws Exception { // Now we should expect numOfEntriesToReplicate entries // replicated from each region server. This makes sure we didn't loose data // from any previous batch when we encounter EOF exception for empty file. - Assert.assertEquals("Replicated entries are not correct", numOfEntriesToReplicate * numRs, - replicatedEntries.size()); + assertEquals(numOfEntriesToReplicate * numRs, replicatedEntries.size(), + "Replicated entries are not correct"); // We expect just one batch of replication to be shipped which will // for non empty WAL - Assert.assertEquals("Replicated batches are not correct", 1, replicateCount.get()); + assertEquals(1, replicateCount.get(), "Replicated batches are not correct"); verifyNumberOfLogsInQueue(1, numRs); // we're now writing to the new wal // if everything works, the source should've stopped reading from the empty wal, and start @@ -311,12 +326,12 @@ public void testReplicationOfEmptyWALSurroundedNonEmptyWAL() throws Exception { // Now we should expect numOfEntriesToReplicate entries // replicated from each region server. This makes sure we didn't loose data // from any previous batch when we encounter EOF exception for empty file. - Assert.assertEquals("Replicated entries are not correct", numOfEntriesToReplicate * numRs * 2, - replicatedEntries.size()); + assertEquals(numOfEntriesToReplicate * numRs * 2, replicatedEntries.size(), + "Replicated entries are not correct"); // We expect two batch of replication to be shipped which will // for non empty WAL - Assert.assertEquals("Replicated batches are not correct", 2, replicateCount.get()); + assertEquals(2, replicateCount.get(), "Replicated batches are not correct"); verifyNumberOfLogsInQueue(1, numRs); // we're now writing to the new wal // if everything works, the source should've stopped reading from the empty wal, and start diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java index 057a9f3567f5..354ae23eb420 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java @@ -17,634 +17,26 @@ */ package org.apache.hadoop.hbase.replication; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.Waiter; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint; -import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationGlobalSourceSource; -import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationGlobalSourceSourceImpl; -import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl; -import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource; -import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSourceImpl; -import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationTableSource; -import org.apache.hadoop.hbase.replication.regionserver.MetricsSource; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.wal.WAL.Entry; -import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALEditInternalHelper; -import org.apache.hadoop.hbase.wal.WALKeyImpl; -import org.apache.hadoop.hbase.zookeeper.ZKConfig; -import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; /** * Tests ReplicationSource and ReplicationEndpoint interactions */ -@Category({ ReplicationTests.class, MediumTests.class }) -public class TestReplicationEndpoint extends TestReplicationBase { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationEndpoint.class); - - private static final Logger LOG = LoggerFactory.getLogger(TestReplicationEndpoint.class); - - static int numRegionServers; - - @BeforeClass - public static void setUpBeforeClass() throws Exception { - TestReplicationBase.setUpBeforeClass(); - numRegionServers = UTIL1.getHBaseCluster().getRegionServerThreads().size(); - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - TestReplicationBase.tearDownAfterClass(); - // check stop is called - Assert.assertTrue(ReplicationEndpointForTest.stoppedCount.get() > 0); - } - - @Before - public void setup() throws Exception { - ReplicationEndpointForTest.contructedCount.set(0); - ReplicationEndpointForTest.startedCount.set(0); - ReplicationEndpointForTest.replicateCount.set(0); - ReplicationEndpointReturningFalse.replicated.set(false); - ReplicationEndpointForTest.lastEntries = null; - final List rsThreads = UTIL1.getMiniHBaseCluster().getRegionServerThreads(); - for (RegionServerThread rs : rsThreads) { - UTIL1.getAdmin().rollWALWriter(rs.getRegionServer().getServerName()); - } - // Wait for all log roll to finish - UTIL1.waitFor(3000, new Waiter.ExplainingPredicate() { - @Override - public boolean evaluate() throws Exception { - for (RegionServerThread rs : rsThreads) { - if (!rs.getRegionServer().walRollRequestFinished()) { - return false; - } - } - return true; - } - - @Override - public String explainFailure() throws Exception { - List logRollInProgressRsList = new ArrayList<>(); - for (RegionServerThread rs : rsThreads) { - if (!rs.getRegionServer().walRollRequestFinished()) { - logRollInProgressRsList.add(rs.getRegionServer().toString()); - } - } - return "Still waiting for log roll on regionservers: " + logRollInProgressRsList; - } - }); - } - - @Test - public void testCustomReplicationEndpoint() throws Exception { - // test installing a custom replication endpoint other than the default one. - hbaseAdmin.addReplicationPeer("testCustomReplicationEndpoint", - ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()).build()); - - // check whether the class has been constructed and started - Waiter.waitFor(CONF1, 60000, new Waiter.Predicate() { - @Override - public boolean evaluate() throws Exception { - return ReplicationEndpointForTest.contructedCount.get() >= numRegionServers; - } - }); - - Waiter.waitFor(CONF1, 60000, new Waiter.Predicate() { - @Override - public boolean evaluate() throws Exception { - return ReplicationEndpointForTest.startedCount.get() >= numRegionServers; - } - }); - - Assert.assertEquals(0, ReplicationEndpointForTest.replicateCount.get()); - - // now replicate some data. - doPut(Bytes.toBytes("row42")); - - Waiter.waitFor(CONF1, 60000, new Waiter.Predicate() { - @Override - public boolean evaluate() throws Exception { - return ReplicationEndpointForTest.replicateCount.get() >= 1; - } - }); - - doAssert(Bytes.toBytes("row42")); - - hbaseAdmin.removeReplicationPeer("testCustomReplicationEndpoint"); - } - - @Test - public void testReplicationEndpointReturnsFalseOnReplicate() throws Exception { - Assert.assertEquals(0, ReplicationEndpointForTest.replicateCount.get()); - Assert.assertTrue(!ReplicationEndpointReturningFalse.replicated.get()); - int peerCount = hbaseAdmin.listReplicationPeers().size(); - final String id = "testReplicationEndpointReturnsFalseOnReplicate"; - hbaseAdmin.addReplicationPeer(id, - ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointReturningFalse.class.getName()).build()); - // This test is flakey and then there is so much stuff flying around in here its, hard to - // debug. Peer needs to be up for the edit to make it across. This wait on - // peer count seems to be a hack that has us not progress till peer is up. - if (hbaseAdmin.listReplicationPeers().size() <= peerCount) { - LOG.info("Waiting on peercount to go up from " + peerCount); - Threads.sleep(100); - } - // now replicate some data - doPut(row); - - Waiter.waitFor(CONF1, 60000, new Waiter.Predicate() { - @Override - public boolean evaluate() throws Exception { - // Looks like replication endpoint returns false unless we put more than 10 edits. We - // only send over one edit. - int count = ReplicationEndpointForTest.replicateCount.get(); - LOG.info("count=" + count); - return ReplicationEndpointReturningFalse.replicated.get(); - } - }); - if (ReplicationEndpointReturningFalse.ex.get() != null) { - throw ReplicationEndpointReturningFalse.ex.get(); - } - - hbaseAdmin.removeReplicationPeer("testReplicationEndpointReturnsFalseOnReplicate"); - } - - @Test - public void testInterClusterReplication() throws Exception { - final String id = "testInterClusterReplication"; - - List regions = UTIL1.getHBaseCluster().getRegions(tableName); - int totEdits = 0; - - // Make sure edits are spread across regions because we do region based batching - // before shipping edits. - for (HRegion region : regions) { - RegionInfo hri = region.getRegionInfo(); - byte[] row = hri.getStartKey(); - for (int i = 0; i < 100; i++) { - if (row.length > 0) { - Put put = new Put(row); - put.addColumn(famName, row, row); - region.put(put); - totEdits++; - } - } - } - - hbaseAdmin.addReplicationPeer(id, - ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF2)) - .setReplicationEndpointImpl(InterClusterReplicationEndpointForTest.class.getName()) - .build()); - - final int numEdits = totEdits; - Waiter.waitFor(CONF1, 30000, new Waiter.ExplainingPredicate() { - @Override - public boolean evaluate() throws Exception { - return InterClusterReplicationEndpointForTest.replicateCount.get() == numEdits; - } - - @Override - public String explainFailure() throws Exception { - String failure = "Failed to replicate all edits, expected = " + numEdits + " replicated = " - + InterClusterReplicationEndpointForTest.replicateCount.get(); - return failure; - } - }); - - hbaseAdmin.removeReplicationPeer("testInterClusterReplication"); - UTIL1.deleteTableData(tableName); - } - - @Test - public void testWALEntryFilterFromReplicationEndpoint() throws Exception { - ReplicationPeerConfig rpc = - ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()) - // test that we can create mutliple WALFilters reflectively - .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, - EverythingPassesWALEntryFilter.class.getName() + "," - + EverythingPassesWALEntryFilterSubclass.class.getName()) - .build(); - - hbaseAdmin.addReplicationPeer("testWALEntryFilterFromReplicationEndpoint", rpc); - // now replicate some data. - try (Connection connection = ConnectionFactory.createConnection(CONF1)) { - doPut(connection, Bytes.toBytes("row1")); - doPut(connection, row); - doPut(connection, Bytes.toBytes("row2")); - } - - Waiter.waitFor(CONF1, 60000, new Waiter.Predicate() { - @Override - public boolean evaluate() throws Exception { - return ReplicationEndpointForTest.replicateCount.get() >= 1; - } - }); - - Assert.assertNull(ReplicationEndpointWithWALEntryFilter.ex.get()); - // make sure our reflectively created filter is in the filter chain - Assert.assertTrue(EverythingPassesWALEntryFilter.hasPassedAnEntry()); - hbaseAdmin.removeReplicationPeer("testWALEntryFilterFromReplicationEndpoint"); - } - - @Test(expected = IOException.class) - public void testWALEntryFilterAddValidation() throws Exception { - ReplicationPeerConfig rpc = - ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()) - // test that we can create mutliple WALFilters reflectively - .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, - "IAmNotARealWalEntryFilter") - .build(); - hbaseAdmin.addReplicationPeer("testWALEntryFilterAddValidation", rpc); - } - - @Test(expected = IOException.class) - public void testWALEntryFilterUpdateValidation() throws Exception { - ReplicationPeerConfig rpc = - ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()) - // test that we can create mutliple WALFilters reflectively - .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, - "IAmNotARealWalEntryFilter") - .build(); - hbaseAdmin.updateReplicationPeerConfig("testWALEntryFilterUpdateValidation", rpc); - } - - @Test - public void testMetricsSourceBaseSourcePassThrough() { - /* - * The replication MetricsSource wraps a MetricsReplicationTableSourceImpl, - * MetricsReplicationSourceSourceImpl and a MetricsReplicationGlobalSourceSource, so that - * metrics get written to both namespaces. Both of those classes wrap a - * MetricsReplicationSourceImpl that implements BaseSource, which allows for custom JMX metrics. - * This test checks to make sure the BaseSource decorator logic on MetricsSource actually calls - * down through the two layers of wrapping to the actual BaseSource. - */ - String id = "id"; - DynamicMetricsRegistry mockRegistry = mock(DynamicMetricsRegistry.class); - MetricsReplicationSourceImpl singleRms = mock(MetricsReplicationSourceImpl.class); - when(singleRms.getMetricsRegistry()).thenReturn(mockRegistry); - MetricsReplicationSourceImpl globalRms = mock(MetricsReplicationSourceImpl.class); - when(globalRms.getMetricsRegistry()).thenReturn(mockRegistry); - - MetricsReplicationSourceSource singleSourceSource = - new MetricsReplicationSourceSourceImpl(singleRms, id); - MetricsReplicationGlobalSourceSource globalSourceSource = - new MetricsReplicationGlobalSourceSourceImpl(globalRms); - MetricsReplicationGlobalSourceSource spyglobalSourceSource = spy(globalSourceSource); - doNothing().when(spyglobalSourceSource).incrFailedRecoveryQueue(); - - Map singleSourceSourceByTable = new HashMap<>(); - MetricsSource source = - new MetricsSource(id, singleSourceSource, spyglobalSourceSource, singleSourceSourceByTable); - - String gaugeName = "gauge"; - String singleGaugeName = "source.id." + gaugeName; - String globalGaugeName = "source." + gaugeName; - long delta = 1; - String counterName = "counter"; - String singleCounterName = "source.id." + counterName; - String globalCounterName = "source." + counterName; - long count = 2; - source.decGauge(gaugeName, delta); - source.getMetricsContext(); - source.getMetricsDescription(); - source.getMetricsJmxContext(); - source.getMetricsName(); - source.incCounters(counterName, count); - source.incGauge(gaugeName, delta); - source.init(); - source.removeMetric(gaugeName); - source.setGauge(gaugeName, delta); - source.updateHistogram(counterName, count); - source.incrFailedRecoveryQueue(); - - verify(singleRms).decGauge(singleGaugeName, delta); - verify(globalRms).decGauge(globalGaugeName, delta); - verify(globalRms).getMetricsContext(); - verify(globalRms).getMetricsJmxContext(); - verify(globalRms).getMetricsName(); - verify(singleRms).incCounters(singleCounterName, count); - verify(globalRms).incCounters(globalCounterName, count); - verify(singleRms).incGauge(singleGaugeName, delta); - verify(globalRms).incGauge(globalGaugeName, delta); - verify(globalRms).init(); - verify(singleRms).removeMetric(singleGaugeName); - verify(globalRms).removeMetric(globalGaugeName); - verify(singleRms).setGauge(singleGaugeName, delta); - verify(globalRms).setGauge(globalGaugeName, delta); - verify(singleRms).updateHistogram(singleCounterName, count); - verify(globalRms).updateHistogram(globalCounterName, count); - verify(spyglobalSourceSource).incrFailedRecoveryQueue(); - - // check singleSourceSourceByTable metrics. - // singleSourceSourceByTable map entry will be created only - // after calling #setAgeOfLastShippedOpByTable - boolean containsRandomNewTable = - source.getSingleSourceSourceByTable().containsKey("RandomNewTable"); - Assert.assertEquals(false, containsRandomNewTable); - source.updateTableLevelMetrics(createWALEntriesWithSize("RandomNewTable")); - containsRandomNewTable = source.getSingleSourceSourceByTable().containsKey("RandomNewTable"); - Assert.assertEquals(true, containsRandomNewTable); - MetricsReplicationTableSource msr = source.getSingleSourceSourceByTable().get("RandomNewTable"); - - // age should be greater than zero we created the entry with time in the past - Assert.assertTrue(msr.getLastShippedAge() > 0); - Assert.assertTrue(msr.getShippedBytes() > 0); - - } - - private List> createWALEntriesWithSize(String tableName) { - List> walEntriesWithSize = new ArrayList<>(); - byte[] a = new byte[] { 'a' }; - Entry entry = createEntry(tableName, null, a); - walEntriesWithSize.add(new Pair<>(entry, 10L)); - return walEntriesWithSize; - } - - private Entry createEntry(String tableName, TreeMap scopes, byte[]... kvs) { - WALKeyImpl key1 = new WALKeyImpl(new byte[0], TableName.valueOf(tableName), - EnvironmentEdgeManager.currentTime() - 1L, scopes); - WALEdit edit1 = new WALEdit(); - - for (byte[] kv : kvs) { - WALEditInternalHelper.addExtendedCell(edit1, new KeyValue(kv, kv, kv)); - } - return new Entry(key1, edit1); - } - - private void doPut(byte[] row) throws IOException { - try (Connection connection = ConnectionFactory.createConnection(CONF1)) { - doPut(connection, row); - } - } - - private void doPut(final Connection connection, final byte[] row) throws IOException { - try (Table t = connection.getTable(tableName)) { - Put put = new Put(row); - put.addColumn(famName, row, row); - t.put(put); - } - } - - private static void doAssert(byte[] row) throws Exception { - if (ReplicationEndpointForTest.lastEntries == null) { - return; // first call - } - Assert.assertEquals(1, ReplicationEndpointForTest.lastEntries.size()); - List cells = ReplicationEndpointForTest.lastEntries.get(0).getEdit().getCells(); - Assert.assertEquals(1, cells.size()); - Assert.assertTrue(Bytes.equals(cells.get(0).getRowArray(), cells.get(0).getRowOffset(), - cells.get(0).getRowLength(), row, 0, row.length)); - } - - public static class ReplicationEndpointForTest extends BaseReplicationEndpoint { - static UUID uuid = UTIL1.getRandomUUID(); - static AtomicInteger contructedCount = new AtomicInteger(); - static AtomicInteger startedCount = new AtomicInteger(); - static AtomicInteger stoppedCount = new AtomicInteger(); - static AtomicInteger replicateCount = new AtomicInteger(); - static volatile List lastEntries = null; - - public ReplicationEndpointForTest() { - replicateCount.set(0); - contructedCount.incrementAndGet(); - } - - @Override - public UUID getPeerUUID() { - return uuid; - } - - @Override - public boolean replicate(ReplicateContext replicateContext) { - replicateCount.incrementAndGet(); - lastEntries = new ArrayList<>(replicateContext.entries); - return true; - } - - @Override - public void start() { - startAsync(); - } - - @Override - public void stop() { - stopAsync(); - } - - @Override - protected void doStart() { - startedCount.incrementAndGet(); - notifyStarted(); - } - - @Override - protected void doStop() { - stoppedCount.incrementAndGet(); - notifyStopped(); - } - - @Override - public boolean canReplicateToSameCluster() { - return true; - } - } - - /** - * Not used by unit tests, helpful for manual testing with replication. - *

- * Snippet for `hbase shell`: - * - *

-   * create 't', 'f'
-   * add_peer '1', ENDPOINT_CLASSNAME => 'org.apache.hadoop.hbase.replication.' + \
-   *    'TestReplicationEndpoint$SleepingReplicationEndpointForTest'
-   * alter 't', {NAME=>'f', REPLICATION_SCOPE=>1}
-   * 
- */ - public static class SleepingReplicationEndpointForTest extends ReplicationEndpointForTest { - private long duration; - - public SleepingReplicationEndpointForTest() { - super(); - } - - @Override - public void init(Context context) throws IOException { - super.init(context); - if (this.ctx != null) { - duration = this.ctx.getConfiguration() - .getLong("hbase.test.sleep.replication.endpoint.duration.millis", 5000L); - } - } - - @Override - public boolean replicate(ReplicateContext context) { - try { - Thread.sleep(duration); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - return false; - } - return super.replicate(context); - } - } - - public static class InterClusterReplicationEndpointForTest - extends HBaseInterClusterReplicationEndpoint { - - static AtomicInteger replicateCount = new AtomicInteger(); - static boolean failedOnce; - - public InterClusterReplicationEndpointForTest() { - replicateCount.set(0); - } - - @Override - public boolean replicate(ReplicateContext replicateContext) { - boolean success = super.replicate(replicateContext); - if (success) { - replicateCount.addAndGet(replicateContext.entries.size()); - } - return success; - } - - @Override - protected CompletableFuture asyncReplicate(List entries, int ordinal, - int timeout) { - // Fail only once, we don't want to slow down the test. - if (failedOnce) { - return CompletableFuture.completedFuture(ordinal); - } else { - failedOnce = true; - CompletableFuture future = new CompletableFuture(); - future.completeExceptionally(new IOException("Sample Exception: Failed to replicate.")); - return future; - } - } - } - - public static class ReplicationEndpointReturningFalse extends ReplicationEndpointForTest { - static int COUNT = 10; - static AtomicReference ex = new AtomicReference<>(null); - static AtomicBoolean replicated = new AtomicBoolean(false); - - @Override - public boolean replicate(ReplicateContext replicateContext) { - try { - // check row - doAssert(row); - } catch (Exception e) { - ex.set(e); - } - - super.replicate(replicateContext); - LOG.info("Replicated " + Bytes.toString(row) + ", count=" + replicateCount.get()); - - replicated.set(replicateCount.get() > COUNT); // first 10 times, we return false - return replicated.get(); - } - } - - // return a WALEntry filter which only accepts "row", but not other rows - public static class ReplicationEndpointWithWALEntryFilter extends ReplicationEndpointForTest { - static AtomicReference ex = new AtomicReference<>(null); - - @Override - public boolean replicate(ReplicateContext replicateContext) { - try { - super.replicate(replicateContext); - doAssert(row); - } catch (Exception e) { - ex.set(e); - } - return true; - } - - @Override - public WALEntryFilter getWALEntryfilter() { - return new ChainWALEntryFilter(super.getWALEntryfilter(), new WALEntryFilter() { - @Override - public Entry filter(Entry entry) { - ArrayList cells = entry.getEdit().getCells(); - int size = cells.size(); - for (int i = size - 1; i >= 0; i--) { - Cell cell = cells.get(i); - if ( - !Bytes.equals(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), row, 0, - row.length) - ) { - cells.remove(i); - } - } - return entry; - } - }); - } - } - - public static class EverythingPassesWALEntryFilter implements WALEntryFilter { - private static boolean passedEntry = false; - - @Override - public Entry filter(Entry entry) { - passedEntry = true; - return entry; - } +@Tag(ReplicationTests.TAG) +@Tag(MediumTests.TAG) +public class TestReplicationEndpoint extends TestReplicationEndpointBase { - public static boolean hasPassedAnEntry() { - return passedEntry; - } + @BeforeAll + public static void beforeClass() throws Exception { + setUpBeforeClass(); } - public static class EverythingPassesWALEntryFilterSubclass - extends EverythingPassesWALEntryFilter { + @AfterAll + public static void afterClass() throws Exception { + tearDownAfterClass(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpointBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpointBase.java new file mode 100644 index 000000000000..e46f39b5c2cf --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpointBase.java @@ -0,0 +1,651 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint; +import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationGlobalSourceSource; +import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationGlobalSourceSourceImpl; +import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl; +import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource; +import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSourceImpl; +import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationTableSource; +import org.apache.hadoop.hbase.replication.regionserver.MetricsSource; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.wal.WAL.Entry; +import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALEditInternalHelper; +import org.apache.hadoop.hbase.wal.WALKeyImpl; +import org.apache.hadoop.hbase.zookeeper.ZKConfig; +import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Abstract base class for TestReplicationEndpoint tests. Subclasses must call + * {@link #setUpBeforeClass()} and {@link #tearDownAfterClass()} in their + * @BeforeAll and @AfterAll methods respectively. + */ +public abstract class TestReplicationEndpointBase extends TestReplicationBase { + + private static final Logger LOG = LoggerFactory.getLogger(TestReplicationEndpointBase.class); + + static int numRegionServers; + + public static void setUpBeforeClass() throws Exception { + TestReplicationBase.setUpBeforeClass(); + numRegionServers = UTIL1.getHBaseCluster().getRegionServerThreads().size(); + } + + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + // check stop is called + assertTrue(ReplicationEndpointForTest.stoppedCount.get() > 0); + } + + @BeforeEach + public void setup() throws Exception { + setUpBase(); + ReplicationEndpointForTest.contructedCount.set(0); + ReplicationEndpointForTest.startedCount.set(0); + ReplicationEndpointForTest.replicateCount.set(0); + ReplicationEndpointReturningFalse.replicated.set(false); + ReplicationEndpointForTest.lastEntries = null; + final List rsThreads = UTIL1.getMiniHBaseCluster().getRegionServerThreads(); + for (RegionServerThread rs : rsThreads) { + UTIL1.getAdmin().rollWALWriter(rs.getRegionServer().getServerName()); + } + // Wait for all log roll to finish + UTIL1.waitFor(3000, new Waiter.ExplainingPredicate() { + @Override + public boolean evaluate() throws Exception { + for (RegionServerThread rs : rsThreads) { + if (!rs.getRegionServer().walRollRequestFinished()) { + return false; + } + } + return true; + } + + @Override + public String explainFailure() throws Exception { + List logRollInProgressRsList = new ArrayList<>(); + for (RegionServerThread rs : rsThreads) { + if (!rs.getRegionServer().walRollRequestFinished()) { + logRollInProgressRsList.add(rs.getRegionServer().toString()); + } + } + return "Still waiting for log roll on regionservers: " + logRollInProgressRsList; + } + }); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } + + @Test + public void testCustomReplicationEndpoint() throws Exception { + // test installing a custom replication endpoint other than the default one. + hbaseAdmin.addReplicationPeer("testCustomReplicationEndpoint", + ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()).build()); + + // check whether the class has been constructed and started + Waiter.waitFor(CONF1, 60000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return ReplicationEndpointForTest.contructedCount.get() >= numRegionServers; + } + }); + + Waiter.waitFor(CONF1, 60000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return ReplicationEndpointForTest.startedCount.get() >= numRegionServers; + } + }); + + assertEquals(0, ReplicationEndpointForTest.replicateCount.get()); + + // now replicate some data. + doPut(Bytes.toBytes("row42")); + + Waiter.waitFor(CONF1, 60000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return ReplicationEndpointForTest.replicateCount.get() >= 1; + } + }); + + doAssert(Bytes.toBytes("row42")); + + hbaseAdmin.removeReplicationPeer("testCustomReplicationEndpoint"); + } + + @Test + public void testReplicationEndpointReturnsFalseOnReplicate() throws Exception { + assertEquals(0, ReplicationEndpointForTest.replicateCount.get()); + assertTrue(!ReplicationEndpointReturningFalse.replicated.get()); + int peerCount = hbaseAdmin.listReplicationPeers().size(); + final String id = "testReplicationEndpointReturnsFalseOnReplicate"; + hbaseAdmin.addReplicationPeer(id, + ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointReturningFalse.class.getName()).build()); + // This test is flakey and then there is so much stuff flying around in here its, hard to + // debug. Peer needs to be up for the edit to make it across. This wait on + // peer count seems to be a hack that has us not progress till peer is up. + if (hbaseAdmin.listReplicationPeers().size() <= peerCount) { + LOG.info("Waiting on peercount to go up from " + peerCount); + Threads.sleep(100); + } + // now replicate some data + doPut(row); + + Waiter.waitFor(CONF1, 60000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + // Looks like replication endpoint returns false unless we put more than 10 edits. We + // only send over one edit. + int count = ReplicationEndpointForTest.replicateCount.get(); + LOG.info("count=" + count); + return ReplicationEndpointReturningFalse.replicated.get(); + } + }); + if (ReplicationEndpointReturningFalse.ex.get() != null) { + throw ReplicationEndpointReturningFalse.ex.get(); + } + + hbaseAdmin.removeReplicationPeer("testReplicationEndpointReturnsFalseOnReplicate"); + } + + @Test + public void testInterClusterReplication() throws Exception { + final String id = "testInterClusterReplication"; + + List regions = UTIL1.getHBaseCluster().getRegions(tableName); + int totEdits = 0; + + // Make sure edits are spread across regions because we do region based batching + // before shipping edits. + for (HRegion region : regions) { + RegionInfo hri = region.getRegionInfo(); + byte[] row = hri.getStartKey(); + for (int i = 0; i < 100; i++) { + if (row.length > 0) { + Put put = new Put(row); + put.addColumn(famName, row, row); + region.put(put); + totEdits++; + } + } + } + + hbaseAdmin.addReplicationPeer(id, + ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF2)) + .setReplicationEndpointImpl(InterClusterReplicationEndpointForTest.class.getName()) + .build()); + + final int numEdits = totEdits; + Waiter.waitFor(CONF1, 30000, new Waiter.ExplainingPredicate() { + @Override + public boolean evaluate() throws Exception { + return InterClusterReplicationEndpointForTest.replicateCount.get() == numEdits; + } + + @Override + public String explainFailure() throws Exception { + String failure = "Failed to replicate all edits, expected = " + numEdits + " replicated = " + + InterClusterReplicationEndpointForTest.replicateCount.get(); + return failure; + } + }); + + hbaseAdmin.removeReplicationPeer("testInterClusterReplication"); + UTIL1.deleteTableData(tableName); + } + + @Test + public void testWALEntryFilterFromReplicationEndpoint() throws Exception { + ReplicationPeerConfig rpc = + ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()) + // test that we can create mutliple WALFilters reflectively + .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, + EverythingPassesWALEntryFilter.class.getName() + "," + + EverythingPassesWALEntryFilterSubclass.class.getName()) + .build(); + + hbaseAdmin.addReplicationPeer("testWALEntryFilterFromReplicationEndpoint", rpc); + // now replicate some data. + try (Connection connection = ConnectionFactory.createConnection(CONF1)) { + doPut(connection, Bytes.toBytes("row1")); + doPut(connection, row); + doPut(connection, Bytes.toBytes("row2")); + } + + Waiter.waitFor(CONF1, 60000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return ReplicationEndpointForTest.replicateCount.get() >= 1; + } + }); + + assertEquals(null, ReplicationEndpointWithWALEntryFilter.ex.get()); + // make sure our reflectively created filter is in the filter chain + assertTrue(EverythingPassesWALEntryFilter.hasPassedAnEntry()); + hbaseAdmin.removeReplicationPeer("testWALEntryFilterFromReplicationEndpoint"); + } + + @Test + public void testWALEntryFilterAddValidation() { + assertThrows(IOException.class, () -> { + ReplicationPeerConfig rpc = + ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()) + // test that we can create mutliple WALFilters reflectively + .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, + "IAmNotARealWalEntryFilter") + .build(); + hbaseAdmin.addReplicationPeer("testWALEntryFilterAddValidation", rpc); + }); + } + + @Test + public void testWALEntryFilterUpdateValidation() { + assertThrows(IOException.class, () -> { + ReplicationPeerConfig rpc = + ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()) + // test that we can create mutliple WALFilters reflectively + .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, + "IAmNotARealWalEntryFilter") + .build(); + hbaseAdmin.updateReplicationPeerConfig("testWALEntryFilterUpdateValidation", rpc); + }); + } + + @Test + public void testMetricsSourceBaseSourcePassThrough() { + /* + * The replication MetricsSource wraps a MetricsReplicationTableSourceImpl, + * MetricsReplicationSourceSourceImpl and a MetricsReplicationGlobalSourceSource, so that + * metrics get written to both namespaces. Both of those classes wrap a + * MetricsReplicationSourceImpl that implements BaseSource, which allows for custom JMX metrics. + * This test checks to make sure the BaseSource decorator logic on MetricsSource actually calls + * down through the two layers of wrapping to the actual BaseSource. + */ + String id = "id"; + DynamicMetricsRegistry mockRegistry = mock(DynamicMetricsRegistry.class); + MetricsReplicationSourceImpl singleRms = mock(MetricsReplicationSourceImpl.class); + when(singleRms.getMetricsRegistry()).thenReturn(mockRegistry); + MetricsReplicationSourceImpl globalRms = mock(MetricsReplicationSourceImpl.class); + when(globalRms.getMetricsRegistry()).thenReturn(mockRegistry); + + MetricsReplicationSourceSource singleSourceSource = + new MetricsReplicationSourceSourceImpl(singleRms, id); + MetricsReplicationGlobalSourceSource globalSourceSource = + new MetricsReplicationGlobalSourceSourceImpl(globalRms); + MetricsReplicationGlobalSourceSource spyglobalSourceSource = spy(globalSourceSource); + doNothing().when(spyglobalSourceSource).incrFailedRecoveryQueue(); + + Map singleSourceSourceByTable = new HashMap<>(); + MetricsSource source = + new MetricsSource(id, singleSourceSource, spyglobalSourceSource, singleSourceSourceByTable); + + String gaugeName = "gauge"; + String singleGaugeName = "source.id." + gaugeName; + String globalGaugeName = "source." + gaugeName; + long delta = 1; + String counterName = "counter"; + String singleCounterName = "source.id." + counterName; + String globalCounterName = "source." + counterName; + long count = 2; + source.decGauge(gaugeName, delta); + source.getMetricsContext(); + source.getMetricsDescription(); + source.getMetricsJmxContext(); + source.getMetricsName(); + source.incCounters(counterName, count); + source.incGauge(gaugeName, delta); + source.init(); + source.removeMetric(gaugeName); + source.setGauge(gaugeName, delta); + source.updateHistogram(counterName, count); + source.incrFailedRecoveryQueue(); + + verify(singleRms).decGauge(singleGaugeName, delta); + verify(globalRms).decGauge(globalGaugeName, delta); + verify(globalRms).getMetricsContext(); + verify(globalRms).getMetricsJmxContext(); + verify(globalRms).getMetricsName(); + verify(singleRms).incCounters(singleCounterName, count); + verify(globalRms).incCounters(globalCounterName, count); + verify(singleRms).incGauge(singleGaugeName, delta); + verify(globalRms).incGauge(globalGaugeName, delta); + verify(globalRms).init(); + verify(singleRms).removeMetric(singleGaugeName); + verify(globalRms).removeMetric(globalGaugeName); + verify(singleRms).setGauge(singleGaugeName, delta); + verify(globalRms).setGauge(globalGaugeName, delta); + verify(singleRms).updateHistogram(singleCounterName, count); + verify(globalRms).updateHistogram(globalCounterName, count); + verify(spyglobalSourceSource).incrFailedRecoveryQueue(); + + // check singleSourceSourceByTable metrics. + // singleSourceSourceByTable map entry will be created only + // after calling #setAgeOfLastShippedOpByTable + boolean containsRandomNewTable = + source.getSingleSourceSourceByTable().containsKey("RandomNewTable"); + assertEquals(false, containsRandomNewTable); + source.updateTableLevelMetrics(createWALEntriesWithSize("RandomNewTable")); + containsRandomNewTable = source.getSingleSourceSourceByTable().containsKey("RandomNewTable"); + assertEquals(true, containsRandomNewTable); + MetricsReplicationTableSource msr = source.getSingleSourceSourceByTable().get("RandomNewTable"); + + // age should be greater than zero we created the entry with time in the past + assertTrue(msr.getLastShippedAge() > 0); + assertTrue(msr.getShippedBytes() > 0); + + } + + private List> createWALEntriesWithSize(String tableName) { + List> walEntriesWithSize = new ArrayList<>(); + byte[] a = new byte[] { 'a' }; + Entry entry = createEntry(tableName, null, a); + walEntriesWithSize.add(new Pair<>(entry, 10L)); + return walEntriesWithSize; + } + + private Entry createEntry(String tableName, TreeMap scopes, byte[]... kvs) { + WALKeyImpl key1 = new WALKeyImpl(new byte[0], TableName.valueOf(tableName), + EnvironmentEdgeManager.currentTime() - 1L, scopes); + WALEdit edit1 = new WALEdit(); + + for (byte[] kv : kvs) { + WALEditInternalHelper.addExtendedCell(edit1, new KeyValue(kv, kv, kv)); + } + return new Entry(key1, edit1); + } + + private void doPut(byte[] row) throws IOException { + try (Connection connection = ConnectionFactory.createConnection(CONF1)) { + doPut(connection, row); + } + } + + private void doPut(final Connection connection, final byte[] row) throws IOException { + try (Table t = connection.getTable(tableName)) { + Put put = new Put(row); + put.addColumn(famName, row, row); + t.put(put); + } + } + + private static void doAssert(byte[] row) throws Exception { + if (ReplicationEndpointForTest.lastEntries == null) { + return; // first call + } + assertEquals(1, ReplicationEndpointForTest.lastEntries.size()); + List cells = ReplicationEndpointForTest.lastEntries.get(0).getEdit().getCells(); + assertEquals(1, cells.size()); + assertTrue(Bytes.equals(cells.get(0).getRowArray(), cells.get(0).getRowOffset(), + cells.get(0).getRowLength(), row, 0, row.length)); + } + + public static class ReplicationEndpointForTest extends BaseReplicationEndpoint { + static UUID uuid = UTIL1.getRandomUUID(); + static AtomicInteger contructedCount = new AtomicInteger(); + static AtomicInteger startedCount = new AtomicInteger(); + static AtomicInteger stoppedCount = new AtomicInteger(); + static AtomicInteger replicateCount = new AtomicInteger(); + static volatile List lastEntries = null; + + public ReplicationEndpointForTest() { + replicateCount.set(0); + contructedCount.incrementAndGet(); + } + + @Override + public UUID getPeerUUID() { + return uuid; + } + + @Override + public boolean replicate(ReplicateContext replicateContext) { + replicateCount.incrementAndGet(); + lastEntries = new ArrayList<>(replicateContext.entries); + return true; + } + + @Override + public void start() { + startAsync(); + } + + @Override + public void stop() { + stopAsync(); + } + + @Override + protected void doStart() { + startedCount.incrementAndGet(); + notifyStarted(); + } + + @Override + protected void doStop() { + stoppedCount.incrementAndGet(); + notifyStopped(); + } + + @Override + public boolean canReplicateToSameCluster() { + return true; + } + } + + /** + * Not used by unit tests, helpful for manual testing with replication. + *

+ * Snippet for `hbase shell`: + * + *

+   * create 't', 'f'
+   * add_peer '1', ENDPOINT_CLASSNAME => 'org.apache.hadoop.hbase.replication.' + \
+   *    'TestReplicationEndpoint$SleepingReplicationEndpointForTest'
+   * alter 't', {NAME=>'f', REPLICATION_SCOPE=>1}
+   * 
+ */ + public static class SleepingReplicationEndpointForTest extends ReplicationEndpointForTest { + private long duration; + + public SleepingReplicationEndpointForTest() { + super(); + } + + @Override + public void init(Context context) throws IOException { + super.init(context); + if (this.ctx != null) { + duration = this.ctx.getConfiguration() + .getLong("hbase.test.sleep.replication.endpoint.duration.millis", 5000L); + } + } + + @Override + public boolean replicate(ReplicateContext context) { + try { + Thread.sleep(duration); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return false; + } + return super.replicate(context); + } + } + + public static class InterClusterReplicationEndpointForTest + extends HBaseInterClusterReplicationEndpoint { + + static AtomicInteger replicateCount = new AtomicInteger(); + static boolean failedOnce; + + public InterClusterReplicationEndpointForTest() { + replicateCount.set(0); + } + + @Override + public boolean replicate(ReplicateContext replicateContext) { + boolean success = super.replicate(replicateContext); + if (success) { + replicateCount.addAndGet(replicateContext.entries.size()); + } + return success; + } + + @Override + protected CompletableFuture asyncReplicate(List entries, int ordinal, + int timeout) { + // Fail only once, we don't want to slow down the test. + if (failedOnce) { + return CompletableFuture.completedFuture(ordinal); + } else { + failedOnce = true; + CompletableFuture future = new CompletableFuture(); + future.completeExceptionally(new IOException("Sample Exception: Failed to replicate.")); + return future; + } + } + } + + public static class ReplicationEndpointReturningFalse extends ReplicationEndpointForTest { + static int COUNT = 10; + static AtomicReference ex = new AtomicReference<>(null); + static AtomicBoolean replicated = new AtomicBoolean(false); + + @Override + public boolean replicate(ReplicateContext replicateContext) { + try { + // check row + doAssert(row); + } catch (Exception e) { + ex.set(e); + } + + super.replicate(replicateContext); + LOG.info("Replicated " + Bytes.toString(row) + ", count=" + replicateCount.get()); + + replicated.set(replicateCount.get() > COUNT); // first 10 times, we return false + return replicated.get(); + } + } + + // return a WALEntry filter which only accepts "row", but not other rows + public static class ReplicationEndpointWithWALEntryFilter extends ReplicationEndpointForTest { + static AtomicReference ex = new AtomicReference<>(null); + + @Override + public boolean replicate(ReplicateContext replicateContext) { + try { + super.replicate(replicateContext); + doAssert(row); + } catch (Exception e) { + ex.set(e); + } + return true; + } + + @Override + public WALEntryFilter getWALEntryfilter() { + return new ChainWALEntryFilter(super.getWALEntryfilter(), new WALEntryFilter() { + @Override + public Entry filter(Entry entry) { + ArrayList cells = entry.getEdit().getCells(); + int size = cells.size(); + for (int i = size - 1; i >= 0; i--) { + Cell cell = cells.get(i); + if ( + !Bytes.equals(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), row, 0, + row.length) + ) { + cells.remove(i); + } + } + return entry; + } + }); + } + } + + public static class EverythingPassesWALEntryFilter implements WALEntryFilter { + private static boolean passedEntry = false; + + @Override + public Entry filter(Entry entry) { + passedEntry = true; + return entry; + } + + public static boolean hasPassedAnEntry() { + return passedEntry; + } + } + + public static class EverythingPassesWALEntryFilterSubclass + extends EverythingPassesWALEntryFilter { + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRS.java index 7720d42a6edc..78123ec9c208 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRS.java @@ -17,31 +17,40 @@ */ package org.apache.hadoop.hbase.replication; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; /** * Runs the TestReplicationKillRS test and selects the RS to kill in the master cluster Do not add * other tests in this class. */ -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestReplicationKillMasterRS extends TestReplicationKillRS { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationKillMasterRS.class); - - @BeforeClass public static void setUpBeforeClass() throws Exception { NUM_SLAVES1 = 2; TestReplicationBase.setUpBeforeClass(); } + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } + @Test public void killOneMasterRS() throws Exception { loadTableAndKillRS(UTIL1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressed.java index 7140d39adbfe..ab6d299daca9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressed.java @@ -17,28 +17,28 @@ */ package org.apache.hadoop.hbase.replication; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; /** * Run the same test as TestReplicationKillMasterRS but with WAL compression enabled Do not add * other tests in this class. */ -@Category({ ReplicationTests.class, LargeTests.class }) -public class TestReplicationKillMasterRSCompressed extends TestReplicationKillMasterRS { +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) +public class TestReplicationKillMasterRSCompressed + extends TestReplicationKillMasterRSCompressedBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationKillMasterRSCompressed.class); + @BeforeAll + public static void beforeClass() throws Exception { + setUpBeforeClass(); + } - @BeforeClass - public static void setUpBeforeClass() throws Exception { - CONF1.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true); - TestReplicationKillMasterRS.setUpBeforeClass(); + @AfterAll + public static void afterClass() throws Exception { + tearDownAfterClass(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressedBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressedBase.java new file mode 100644 index 000000000000..af618350c708 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressedBase.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.junit.jupiter.api.Tag; + +/** + * Abstract base class for TestReplicationKillMasterRSCompressed tests. Subclasses must call + * {@link #setUpBeforeClass()} and {@link #tearDownAfterClass()} in their + * @BeforeAll and @AfterAll methods respectively. + */ +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) +public abstract class TestReplicationKillMasterRSCompressedBase + extends TestReplicationKillMasterRS { + + public static void setUpBeforeClass() throws Exception { + CONF1.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true); + TestReplicationKillMasterRS.setUpBeforeClass(); + } + + public static void tearDownAfterClass() throws Exception { + TestReplicationKillMasterRS.tearDownAfterClass(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSWithSeparateOldWALs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSWithSeparateOldWALs.java index a5e19c9f4432..40409074967f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSWithSeparateOldWALs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSWithSeparateOldWALs.java @@ -17,24 +17,25 @@ */ package org.apache.hadoop.hbase.replication; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestReplicationKillMasterRSWithSeparateOldWALs extends TestReplicationKillMasterRS { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationKillMasterRSWithSeparateOldWALs.class); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { CONF1.setBoolean(AbstractFSWALProvider.SEPARATE_OLDLOGDIR, true); TestReplicationKillMasterRS.setUpBeforeClass(); } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationKillMasterRS.tearDownAfterClass(); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java index 9a4819b2c28f..85df0c789535 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.replication; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.fail; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.UnknownScannerException; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRS.java index 6505a4a191d9..560ba8edfc8d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRS.java @@ -17,31 +17,40 @@ */ package org.apache.hadoop.hbase.replication; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; /** * Runs the TestReplicationKillRS test and selects the RS to kill in the slave cluster Do not add * other tests in this class. */ -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestReplicationKillSlaveRS extends TestReplicationKillRS { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationKillSlaveRS.class); - - @BeforeClass public static void setUpBeforeClass() throws Exception { NUM_SLAVES2 = 2; TestReplicationBase.setUpBeforeClass(); } + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } + @Test public void killOneSlaveRS() throws Exception { loadTableAndKillRS(UTIL2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRSWithSeparateOldWALs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRSWithSeparateOldWALs.java index 3b0766f6ed9a..056bffe16a25 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRSWithSeparateOldWALs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRSWithSeparateOldWALs.java @@ -17,24 +17,25 @@ */ package org.apache.hadoop.hbase.replication; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestReplicationKillSlaveRSWithSeparateOldWALs extends TestReplicationKillSlaveRS { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationKillSlaveRSWithSeparateOldWALs.class); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { CONF1.setBoolean(AbstractFSWALProvider.SEPARATE_OLDLOGDIR, true); TestReplicationKillSlaveRS.setUpBeforeClass(); } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationKillSlaveRS.tearDownAfterClass(); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationMetricsforUI.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationMetricsforUI.java index a5dc1490fc65..8082d459817f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationMetricsforUI.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationMetricsforUI.java @@ -17,9 +17,12 @@ */ package org.apache.hadoop.hbase.replication; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + import java.util.Map; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; @@ -28,18 +31,38 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.Assert; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ ReplicationTests.class, MediumTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(MediumTests.TAG) public class TestReplicationMetricsforUI extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationMetricsforUI.class); private static final byte[] qualName = Bytes.toBytes("q"); + @BeforeAll + public static void setUpBeforeClass() throws Exception { + TestReplicationBase.setUpBeforeClass(); + } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } + @Test public void testReplicationMetrics() throws Exception { try (Admin hbaseAdmin = UTIL1.getConnection().getAdmin()) { @@ -54,13 +77,13 @@ public void testReplicationMetrics() throws Exception { Thread.sleep(5000); HRegionServer rs = UTIL1.getRSForFirstRegionInTable(tableName); Map metrics = rs.getWalGroupsReplicationStatus(); - Assert.assertEquals("metric size ", 1, metrics.size()); + assertEquals(1, metrics.size(), "metric size "); long lastPosition = 0; for (Map.Entry metric : metrics.entrySet()) { - Assert.assertEquals("peerId", PEER_ID2, metric.getValue().getPeerId()); - Assert.assertEquals("queue length", 1, metric.getValue().getQueueSize()); - Assert.assertEquals("replication delay", 0, metric.getValue().getReplicationDelay()); - Assert.assertTrue("current position >= 0", metric.getValue().getCurrentPosition() >= 0); + assertEquals(PEER_ID2, metric.getValue().getPeerId(), "peerId"); + assertEquals(1, metric.getValue().getQueueSize(), "queue length"); + assertEquals(0, metric.getValue().getReplicationDelay(), "replication delay"); + assertTrue(metric.getValue().getCurrentPosition() >= 0, "current position >= 0"); lastPosition = metric.getValue().getCurrentPosition(); } for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { @@ -78,11 +101,11 @@ public void testReplicationMetrics() throws Exception { Path lastPath = null; for (Map.Entry metric : metrics.entrySet()) { lastPath = metric.getValue().getCurrentPath(); - Assert.assertEquals("peerId", PEER_ID2, metric.getValue().getPeerId()); - Assert.assertTrue("age of Last Shipped Op should be > 0 ", - metric.getValue().getAgeOfLastShippedOp() > 0); - Assert.assertTrue("current position should > last position", - metric.getValue().getCurrentPosition() - lastPosition > 0); + assertEquals(PEER_ID2, metric.getValue().getPeerId(), "peerId"); + assertTrue(metric.getValue().getAgeOfLastShippedOp() > 0, + "age of Last Shipped Op should be > 0 "); + assertTrue(metric.getValue().getCurrentPosition() - lastPosition > 0, + "current position should > last position"); lastPosition = metric.getValue().getCurrentPosition(); } @@ -98,10 +121,10 @@ public void testReplicationMetrics() throws Exception { Thread.sleep(5000); metrics = rs.getWalGroupsReplicationStatus(); for (Map.Entry metric : metrics.entrySet()) { - Assert.assertEquals("replication delay", 0, metric.getValue().getReplicationDelay()); - Assert.assertTrue("current position should < last position", - metric.getValue().getCurrentPosition() < lastPosition); - Assert.assertNotEquals("current path", lastPath, metric.getValue().getCurrentPath()); + assertEquals(0, metric.getValue().getReplicationDelay(), "replication delay"); + assertTrue(metric.getValue().getCurrentPosition() < lastPosition, + "current position should < last position"); + assertNotEquals(lastPath, metric.getValue().getCurrentPath(), "current path"); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java index 06fdc47fa3ae..8e629e383996 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java @@ -17,18 +17,19 @@ */ package org.apache.hadoop.hbase.replication; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.util.ArrayList; import java.util.List; import java.util.NavigableMap; import java.util.TreeMap; +import java.util.stream.Stream; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; @@ -53,52 +54,66 @@ import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALKeyImpl; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameter; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.params.provider.Arguments; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; -@RunWith(Parameterized.class) -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: serialPeer={0}") public class TestReplicationSmallTests extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationSmallTests.class); - private static final Logger LOG = LoggerFactory.getLogger(TestReplicationSmallTests.class); private static final String PEER_ID = "2"; - @Parameter - public boolean serialPeer; + private boolean serialPeer; + + public TestReplicationSmallTests(boolean serialPeer) { + this.serialPeer = serialPeer; + } @Override protected boolean isSerialPeer() { return serialPeer; } - @Parameters(name = "{index}: serialPeer={0}") - public static List parameters() { - return ImmutableList.of(true, false); + public static Stream parameters() { + return ImmutableList.of(true, false).stream().map(Arguments::of); + } + + @BeforeAll + public static void setUpBeforeClass() throws Exception { + TestReplicationBase.setUpBeforeClass(); } - @Before + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach public void setUp() throws Exception { + setUpBase(); cleanUp(); } + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } + /** * Verify that version and column delete marker types are replicated correctly. */ - @Test + @TestTemplate public void testDeleteTypes() throws Exception { LOG.info("testDeleteTypes"); final byte[] v1 = Bytes.toBytes("v1"); @@ -184,7 +199,7 @@ public void testDeleteTypes() throws Exception { /** * Add a row, check it's replicated, delete it, check's gone */ - @Test + @TestTemplate public void testSimplePutDelete() throws Exception { LOG.info("testSimplePutDelete"); runSimplePutDeleteTest(); @@ -193,7 +208,7 @@ public void testSimplePutDelete() throws Exception { /** * Try a small batch upload using the write buffer, check it's replicated */ - @Test + @TestTemplate public void testSmallBatch() throws Exception { LOG.info("testSmallBatch"); runSmallBatchTest(); @@ -203,7 +218,7 @@ public void testSmallBatch() throws Exception { * Test disable/enable replication, trying to insert, make sure nothing's replicated, enable it, * the insert should be replicated */ - @Test + @TestTemplate public void testDisableEnable() throws Exception { // Test disabling replication hbaseAdmin.disableReplicationPeer(PEER_ID); @@ -243,7 +258,7 @@ public void testDisableEnable() throws Exception { /** * Removes and re-add a peer cluster */ - @Test + @TestTemplate public void testAddAndRemoveClusters() throws Exception { LOG.info("testAddAndRemoveClusters"); hbaseAdmin.removeReplicationPeer(PEER_ID); @@ -296,7 +311,7 @@ public void testAddAndRemoveClusters() throws Exception { * Do a more intense version testSmallBatch, one that will trigger wal rolling and other * non-trivial code paths */ - @Test + @TestTemplate public void testLoading() throws Exception { LOG.info("Writing out rows to table1 in testLoading"); List puts = new ArrayList<>(NB_ROWS_IN_BIG_BATCH); @@ -357,7 +372,7 @@ public void testLoading() throws Exception { * Create two new Tables with colfamilies enabled for replication then run * {@link Admin#listReplicatedTableCFs()}. Finally verify the table:colfamilies. */ - @Test + @TestTemplate public void testVerifyListReplicatedTable() throws Exception { LOG.info("testVerifyListReplicatedTable"); @@ -390,7 +405,7 @@ public void testVerifyListReplicatedTable() throws Exception { // check the matching result for (int i = 0; i < match.length; i++) { - assertTrue("listReplicated() does not match table " + i, (match[i] == 1)); + assertTrue((match[i] == 1), "listReplicated() does not match table " + i); } // drop tables @@ -406,7 +421,7 @@ public void testVerifyListReplicatedTable() throws Exception { /** * Test for HBase-15259 WALEdits under replay will also be replicated */ - @Test + @TestTemplate public void testReplicationInReplay() throws Exception { final TableName tableName = htable1.getName(); @@ -448,7 +463,7 @@ public void testReplicationInReplay() throws Exception { /** * Test for HBASE-27448 Add an admin method to get replication enabled state */ - @Test + @TestTemplate public void testGetReplicationPeerState() throws Exception { // Test disable replication peer diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTestsSync.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTestsSync.java index ec36039a8425..afa077b45204 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTestsSync.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTestsSync.java @@ -17,21 +17,17 @@ */ package org.apache.hadoop.hbase.replication; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Tag; -@RunWith(Parameterized.class) -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestReplicationSmallTestsSync extends TestReplicationSmallTests { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationSmallTestsSync.class); + public TestReplicationSmallTestsSync(boolean serialPeer) { + super(serialPeer); + } @Override protected boolean isSyncPeer() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatus.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatus.java index 0ad2fd5acea4..e8a13353ad51 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatus.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatus.java @@ -17,15 +17,14 @@ */ package org.apache.hadoop.hbase.replication; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.EnumSet; import java.util.List; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics.Option; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Waiter; @@ -37,19 +36,39 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.Threads; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ ReplicationTests.class, MediumTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(MediumTests.TAG) public class TestReplicationStatus extends TestReplicationBase { private static final Logger LOG = LoggerFactory.getLogger(TestReplicationStatus.class); - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationStatus.class); + @BeforeAll + public static void setUpBeforeClass() throws Exception { + TestReplicationBase.setUpBeforeClass(); + } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } static void insertRowsOnSource() throws IOException { final byte[] qualName = Bytes.toBytes("q"); @@ -91,21 +110,21 @@ public boolean evaluate() throws Exception { for (JVMClusterUtil.RegionServerThread thread : UTIL1.getHBaseCluster() .getRegionServerThreads()) { ServerName server = thread.getRegionServer().getServerName(); - assertTrue("" + server, metrics.getLiveServerMetrics().containsKey(server)); + assertTrue(metrics.getLiveServerMetrics().containsKey(server), "" + server); ServerMetrics sm = metrics.getLiveServerMetrics().get(server); List rLoadSourceList = sm.getReplicationLoadSourceList(); ReplicationLoadSink rLoadSink = sm.getReplicationLoadSink(); // check SourceList only has one entry, because only has one peer - assertEquals("Failed to get ReplicationLoadSourceList " + rLoadSourceList + ", " + server, 1, - rLoadSourceList.size()); + assertEquals(1, rLoadSourceList.size(), + "Failed to get ReplicationLoadSourceList " + rLoadSourceList + ", " + server); assertEquals(PEER_ID2, rLoadSourceList.get(0).getPeerID()); // check Sink exist only as it is difficult to verify the value on the fly - assertTrue("failed to get ReplicationLoadSink.AgeOfLastShippedOp ", - (rLoadSink.getAgeOfLastAppliedOp() >= 0)); - assertTrue("failed to get ReplicationLoadSink.TimeStampsOfLastAppliedOp ", - (rLoadSink.getTimestampsOfLastAppliedOp() >= 0)); + assertTrue((rLoadSink.getAgeOfLastAppliedOp() >= 0), + "failed to get ReplicationLoadSink.AgeOfLastAppliedOp "); + assertTrue((rLoadSink.getTimestampsOfLastAppliedOp() >= 0), + "failed to get ReplicationLoadSink.TimeStampsOfLastAppliedOp "); } // Stop rs1, then the queue of rs1 will be transfered to rs0 @@ -122,7 +141,7 @@ public boolean evaluate() throws Exception { List rLoadSourceList = waitOnMetricsReport(1, server); // The remaining server should now have two queues -- the original and then the one that was // added because of failover. The original should still be PEER_ID2 though. - assertEquals("Failed ReplicationLoadSourceList " + rLoadSourceList, 2, rLoadSourceList.size()); + assertEquals(2, rLoadSourceList.size(), "Failed ReplicationLoadSourceList " + rLoadSourceList); assertEquals(PEER_ID2, rLoadSourceList.get(0).getPeerID()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusAfterLagging.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusAfterLagging.java index c761078dfab3..b67d2551f34b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusAfterLagging.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusAfterLagging.java @@ -17,14 +17,13 @@ */ package org.apache.hadoop.hbase.replication; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.EnumSet; import java.util.List; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics.Option; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Admin; @@ -32,16 +31,36 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ ReplicationTests.class, MediumTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(MediumTests.TAG) public class TestReplicationStatusAfterLagging extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationStatusAfterLagging.class); + @BeforeAll + public static void setUpBeforeClass() throws Exception { + TestReplicationBase.setUpBeforeClass(); + } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } @Override protected String getClusterKey(HBaseTestingUtil util) throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusBothNormalAndRecoveryLagging.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusBothNormalAndRecoveryLagging.java index de19d0f5f4a2..da14b1f1dc4d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusBothNormalAndRecoveryLagging.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusBothNormalAndRecoveryLagging.java @@ -17,30 +17,49 @@ */ package org.apache.hadoop.hbase.replication; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.EnumSet; import java.util.List; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics.Option; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ ReplicationTests.class, MediumTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(MediumTests.TAG) public class TestReplicationStatusBothNormalAndRecoveryLagging extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationStatusBothNormalAndRecoveryLagging.class); + @BeforeAll + public static void setUpBeforeClass() throws Exception { + TestReplicationBase.setUpBeforeClass(); + } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } @Test public void testReplicationStatusBothNormalAndRecoveryLagging() throws Exception { @@ -83,7 +102,7 @@ public void testReplicationStatusBothNormalAndRecoveryLagging() throws Exception assertEquals(0, loadSource.getTimestampOfLastShippedOp()); assertTrue(loadSource.getReplicationLag() > 0); } - assertTrue("No normal queue found.", foundNormal); - assertTrue("No recovery queue found.", foundRecovery); + assertTrue(foundNormal, "No normal queue found."); + assertTrue(foundRecovery, "No recovery queue found."); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSink.java index 34dcf2329c46..07ed48a9f166 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSink.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSink.java @@ -17,27 +17,48 @@ */ package org.apache.hadoop.hbase.replication; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; + import java.io.IOException; import java.util.EnumSet; import org.apache.hadoop.hbase.ClusterMetrics; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.junit.Assert; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ ReplicationTests.class, MediumTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(MediumTests.TAG) public class TestReplicationStatusSink extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationStatusSink.class); + @BeforeAll + public static void setUpBeforeClass() throws Exception { + TestReplicationBase.setUpBeforeClass(); + } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } @Test public void testReplicationStatusSink() throws Exception { @@ -46,7 +67,7 @@ public void testReplicationStatusSink() throws Exception { ReplicationLoadSink loadSink = getLatestSinkMetric(admin, server); // First checks if status of timestamp of last applied op is same as RS start, since no edits // were replicated yet - Assert.assertEquals(loadSink.getTimestampStarted(), loadSink.getTimestampsOfLastAppliedOp()); + assertEquals(loadSink.getTimestampStarted(), loadSink.getTimestampsOfLastAppliedOp()); // now insert some rows on source, so that it gets delivered to target TestReplicationStatus.insertRowsOnSource(); long wait = @@ -54,7 +75,7 @@ public void testReplicationStatusSink() throws Exception { ReplicationLoadSink loadSink1 = getLatestSinkMetric(admin, server); return loadSink1.getTimestampsOfLastAppliedOp() > loadSink1.getTimestampStarted(); }); - Assert.assertNotEquals(-1, wait); + assertNotEquals(-1, wait); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNewOp.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNewOp.java index c9ef613a21f3..d5b1a3769786 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNewOp.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNewOp.java @@ -17,31 +17,50 @@ */ package org.apache.hadoop.hbase.replication; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.EnumSet; import java.util.List; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics.Option; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ ReplicationTests.class, MediumTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(MediumTests.TAG) public class TestReplicationStatusSourceStartedTargetStoppedNewOp extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationStatusSourceStartedTargetStoppedNewOp.class); + @BeforeAll + public static void setUpBeforeClass() throws Exception { + TestReplicationBase.setUpBeforeClass(); + } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } @Test public void testReplicationStatusSourceStartedTargetStoppedNewOp() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNoOps.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNoOps.java index b3e52e858a7e..bd5bfa12cc7d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNoOps.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNoOps.java @@ -17,28 +17,47 @@ */ package org.apache.hadoop.hbase.replication; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import java.util.EnumSet; import java.util.List; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics.Option; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ ReplicationTests.class, MediumTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(MediumTests.TAG) public class TestReplicationStatusSourceStartedTargetStoppedNoOps extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationStatusSourceStartedTargetStoppedNoOps.class); + @BeforeAll + public static void setUpBeforeClass() throws Exception { + TestReplicationBase.setUpBeforeClass(); + } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } @Test public void testReplicationStatusSourceStartedTargetStoppedNoOps() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedWithRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedWithRecovery.java index 269fa1b38c70..5982b1bd9e3f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedWithRecovery.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedWithRecovery.java @@ -17,32 +17,51 @@ */ package org.apache.hadoop.hbase.replication; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.EnumSet; import java.util.List; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics.Option; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ ReplicationTests.class, MediumTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(MediumTests.TAG) public class TestReplicationStatusSourceStartedTargetStoppedWithRecovery extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationStatusSourceStartedTargetStoppedWithRecovery.class); + @BeforeAll + public static void setUpBeforeClass() throws Exception { + TestReplicationBase.setUpBeforeClass(); + } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } @Test public void testReplicationStatusSourceStartedTargetStoppedWithRecovery() throws Exception { @@ -80,7 +99,7 @@ public void testReplicationStatusSourceStartedTargetStoppedWithRecovery() throws assertEquals(0, loadSource.getReplicationLag()); } } - assertTrue("No normal queue found.", foundNormal); - assertTrue("No recovery queue found.", foundRecovery); + assertTrue(foundNormal, "No normal queue found."); + assertTrue(foundRecovery, "No recovery queue found."); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java index 66de933832b5..d1b64afa567b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java @@ -21,8 +21,8 @@ import static org.apache.hadoop.hbase.replication.TestReplicationBase.NB_RETRIES; import static org.apache.hadoop.hbase.replication.TestReplicationBase.NB_ROWS_IN_BATCH; import static org.apache.hadoop.hbase.replication.TestReplicationBase.SLEEP_TIME; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.ArrayList; import java.util.List; @@ -31,7 +31,6 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Put; @@ -40,19 +39,15 @@ import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestReplicationSyncUpTool extends TestReplicationSyncUpToolBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationSyncUpTool.class); - private static final Logger LOG = LoggerFactory.getLogger(TestReplicationSyncUpTool.class); /** @@ -159,8 +154,8 @@ private void putAndReplicateRows() throws Exception { for (int i = 0; i < NB_RETRIES; i++) { int rowCountHt1TargetAtPeer1 = countRows(ht1TargetAtPeer1); if (i == NB_RETRIES - 1) { - assertEquals("t1_syncup has 101 rows on source, and 100 on slave1", rowCountHt1Source - 1, - rowCountHt1TargetAtPeer1); + assertEquals(rowCountHt1Source - 1, rowCountHt1TargetAtPeer1, + "t1_syncup has 101 rows on source, and 100 on slave1"); } if (rowCountHt1Source - 1 == rowCountHt1TargetAtPeer1) { break; @@ -172,8 +167,8 @@ private void putAndReplicateRows() throws Exception { for (int i = 0; i < NB_RETRIES; i++) { int rowCountHt2TargetAtPeer1 = countRows(ht2TargetAtPeer1); if (i == NB_RETRIES - 1) { - assertEquals("t2_syncup has 201 rows on source, and 200 on slave1", rowCountHt2Source - 1, - rowCountHt2TargetAtPeer1); + assertEquals(rowCountHt2Source - 1, rowCountHt2TargetAtPeer1, + "t2_syncup has 201 rows on source, and 200 on slave1"); } if (rowCountHt2Source - 1 == rowCountHt2TargetAtPeer1) { break; @@ -203,12 +198,12 @@ private void mimicSyncUpAfterDelete() throws Exception { ht2Source.delete(list); int rowCount_ht1Source = countRows(ht1Source); - assertEquals("t1_syncup has 51 rows on source, after remove 50 of the replicated colfam", 51, - rowCount_ht1Source); + assertEquals(51, rowCount_ht1Source, + "t1_syncup has 51 rows on source, after remove 50 of the replicated colfam"); int rowCount_ht2Source = countRows(ht2Source); - assertEquals("t2_syncup has 101 rows on source, after remove 100 of the replicated colfam", 101, - rowCount_ht2Source); + assertEquals(101, rowCount_ht2Source, + "t2_syncup has 101 rows on source, after remove 100 of the replicated colfam"); List sourceRses = UTIL1.getHBaseCluster().getRegionServerThreads().stream() .map(rst -> rst.getRegionServer().getServerName()).collect(Collectors.toList()); shutDownSourceHBaseCluster(); @@ -219,18 +214,18 @@ private void mimicSyncUpAfterDelete() throws Exception { // before sync up int rowCountHt1TargetAtPeer1 = countRows(ht1TargetAtPeer1); int rowCountHt2TargetAtPeer1 = countRows(ht2TargetAtPeer1); - assertEquals("@Peer1 t1_syncup should still have 100 rows", 100, rowCountHt1TargetAtPeer1); - assertEquals("@Peer1 t2_syncup should still have 200 rows", 200, rowCountHt2TargetAtPeer1); + assertEquals(100, rowCountHt1TargetAtPeer1, "@Peer1 t1_syncup should still have 100 rows"); + assertEquals(200, rowCountHt2TargetAtPeer1, "@Peer1 t2_syncup should still have 200 rows"); syncUp(UTIL1); // After sync up rowCountHt1TargetAtPeer1 = countRows(ht1TargetAtPeer1); rowCountHt2TargetAtPeer1 = countRows(ht2TargetAtPeer1); - assertEquals("@Peer1 t1_syncup should be sync up and have 50 rows", 50, - rowCountHt1TargetAtPeer1); - assertEquals("@Peer1 t2_syncup should be sync up and have 100 rows", 100, - rowCountHt2TargetAtPeer1); + assertEquals(50, rowCountHt1TargetAtPeer1, + "@Peer1 t1_syncup should be sync up and have 50 rows"); + assertEquals(100, rowCountHt2TargetAtPeer1, + "@Peer1 t2_syncup should be sync up and have 100 rows"); // check we have recorded the dead region servers and also have an info file Path rootDir = CommonFSUtils.getRootDir(UTIL1.getConfiguration()); @@ -275,9 +270,9 @@ private void mimicSyncUpAfterPut() throws Exception { ht2Source.put(p); int rowCount_ht1Source = countRows(ht1Source); - assertEquals("t1_syncup has 102 rows on source", 102, rowCount_ht1Source); + assertEquals(102, rowCount_ht1Source, "t1_syncup has 102 rows on source"); int rowCount_ht2Source = countRows(ht2Source); - assertEquals("t2_syncup has 202 rows on source", 202, rowCount_ht2Source); + assertEquals(202, rowCount_ht2Source, "t2_syncup has 202 rows on source"); shutDownSourceHBaseCluster(); restartTargetHBaseCluster(1); @@ -287,20 +282,20 @@ private void mimicSyncUpAfterPut() throws Exception { // before sync up int rowCountHt1TargetAtPeer1 = countRows(ht1TargetAtPeer1); int rowCountHt2TargetAtPeer1 = countRows(ht2TargetAtPeer1); - assertEquals("@Peer1 t1_syncup should be NOT sync up and have 50 rows", 50, - rowCountHt1TargetAtPeer1); - assertEquals("@Peer1 t2_syncup should be NOT sync up and have 100 rows", 100, - rowCountHt2TargetAtPeer1); + assertEquals(50, rowCountHt1TargetAtPeer1, + "@Peer1 t1_syncup should be NOT sync up and have 50 rows"); + assertEquals(100, rowCountHt2TargetAtPeer1, + "@Peer1 t2_syncup should be NOT sync up and have 100 rows"); syncUp(UTIL1); // after sync up rowCountHt1TargetAtPeer1 = countRows(ht1TargetAtPeer1); rowCountHt2TargetAtPeer1 = countRows(ht2TargetAtPeer1); - assertEquals("@Peer1 t1_syncup should be sync up and have 100 rows", 100, - rowCountHt1TargetAtPeer1); - assertEquals("@Peer1 t2_syncup should be sync up and have 200 rows", 200, - rowCountHt2TargetAtPeer1); + assertEquals(100, rowCountHt1TargetAtPeer1, + "@Peer1 t1_syncup should be sync up and have 100 rows"); + assertEquals(200, rowCountHt2TargetAtPeer1, + "@Peer1 t2_syncup should be sync up and have 200 rows"); } /** @@ -324,8 +319,8 @@ public void testStartANewSyncUpToolAfterFailed() throws Exception { try { syncUp(UTIL1); } catch (Exception e) { - assertTrue("e should be a FileAlreadyExistsException", - (e instanceof FileAlreadyExistsException)); + assertTrue((e instanceof FileAlreadyExistsException), + "e should be a FileAlreadyExistsException"); } FileStatus fileStatus2 = fs.getFileStatus(replicationInfoPath); assertEquals(fileStatus1.getModificationTime(), fileStatus2.getModificationTime()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java index 9455cf567276..9b1981d65ab6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java @@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.ToolRunner; -import org.junit.After; -import org.junit.Before; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @@ -66,7 +66,7 @@ public abstract class TestReplicationSyncUpToolBase { protected void customizeClusterConf(Configuration conf) { } - @Before + @BeforeEach public void setUp() throws Exception { customizeClusterConf(UTIL1.getConfiguration()); customizeClusterConf(UTIL2.getConfiguration()); @@ -96,7 +96,7 @@ public void setUp() throws Exception { .setColumnFamily(ColumnFamilyDescriptorBuilder.of(NO_REP_FAMILY)).build(); } - @After + @AfterEach public void tearDown() throws Exception { Closeables.close(ht1Source, true); Closeables.close(ht2Source, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java index e9acc1bc45ee..71462655e741 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java @@ -21,7 +21,7 @@ import static org.apache.hadoop.hbase.replication.TestReplicationBase.NB_RETRIES; import static org.apache.hadoop.hbase.replication.TestReplicationBase.SLEEP_TIME; import static org.apache.hadoop.hbase.replication.TestReplicationBase.row; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.util.ArrayList; @@ -33,7 +33,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -45,19 +44,15 @@ import org.apache.hadoop.hbase.tool.BulkLoadHFiles; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.HFileTestUtil; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestReplicationSyncUpToolWithBulkLoadedData extends TestReplicationSyncUpToolBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationSyncUpToolWithBulkLoadedData.class); - private static final Logger LOG = LoggerFactory.getLogger(TestReplicationSyncUpToolWithBulkLoadedData.class); @@ -128,12 +123,12 @@ private void mimicSyncUpAfterBulkLoad(Iterator randomHFileRangeListItera loadAndReplicateHFiles(false, randomHFileRangeListIterator); int rowCount_ht1Source = countRows(ht1Source); - assertEquals("t1_syncup has 206 rows on source, after bulk load of another 103 hfiles", 206, - rowCount_ht1Source); + assertEquals(206, rowCount_ht1Source, + "t1_syncup has 206 rows on source, after bulk load of another 103 hfiles"); int rowCount_ht2Source = countRows(ht2Source); - assertEquals("t2_syncup has 406 rows on source, after bulk load of another 203 hfiles", 406, - rowCount_ht2Source); + assertEquals(406, rowCount_ht2Source, + "t2_syncup has 406 rows on source, after bulk load of another 203 hfiles"); shutDownSourceHBaseCluster(); restartTargetHBaseCluster(1); @@ -143,8 +138,8 @@ private void mimicSyncUpAfterBulkLoad(Iterator randomHFileRangeListItera // Before sync up int rowCountHt1TargetAtPeer1 = countRows(ht1TargetAtPeer1); int rowCountHt2TargetAtPeer1 = countRows(ht2TargetAtPeer1); - assertEquals("@Peer1 t1_syncup should still have 100 rows", 100, rowCountHt1TargetAtPeer1); - assertEquals("@Peer1 t2_syncup should still have 200 rows", 200, rowCountHt2TargetAtPeer1); + assertEquals(100, rowCountHt1TargetAtPeer1, "@Peer1 t1_syncup should still have 100 rows"); + assertEquals(200, rowCountHt2TargetAtPeer1, "@Peer1 t2_syncup should still have 200 rows"); // Run sync up tool syncUp(UTIL1); @@ -152,10 +147,10 @@ private void mimicSyncUpAfterBulkLoad(Iterator randomHFileRangeListItera // After syun up rowCountHt1TargetAtPeer1 = countRows(ht1TargetAtPeer1); rowCountHt2TargetAtPeer1 = countRows(ht2TargetAtPeer1); - assertEquals("@Peer1 t1_syncup should be sync up and have 200 rows", 200, - rowCountHt1TargetAtPeer1); - assertEquals("@Peer1 t2_syncup should be sync up and have 400 rows", 400, - rowCountHt2TargetAtPeer1); + assertEquals(200, rowCountHt1TargetAtPeer1, + "@Peer1 t1_syncup should be sync up and have 200 rows"); + assertEquals(400, rowCountHt2TargetAtPeer1, + "@Peer1 t2_syncup should be sync up and have 400 rows"); } private void loadAndReplicateHFiles(boolean verifyReplicationOnSlave, @@ -253,7 +248,7 @@ private void wait(Table target, int expectedCount, String msg) for (int i = 0; i < NB_RETRIES; i++) { int rowCountHt2TargetAtPeer1 = countRows(target); if (i == NB_RETRIES - 1) { - assertEquals(msg, expectedCount, rowCountHt2TargetAtPeer1); + assertEquals(expectedCount, rowCountHt2TargetAtPeer1, msg); } if (expectedCount == rowCountHt2TargetAtPeer1) { break; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithFSPeerStorage.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithFSPeerStorage.java index 6f5c6c20d8d8..b992d8cb1daa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithFSPeerStorage.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithFSPeerStorage.java @@ -17,23 +17,20 @@ */ package org.apache.hadoop.hbase.replication; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestReplicationWithFSPeerStorage extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationWithFSPeerStorage.class); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { // enable file system based peer storage UTIL1.getConfiguration().set(ReplicationStorageFactory.REPLICATION_PEER_STORAGE_IMPL, @@ -43,11 +40,22 @@ public static void setUpBeforeClass() throws Exception { TestReplicationBase.setUpBeforeClass(); } - @Before + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach public void setUp() throws Exception { + setUpBase(); cleanUp(); } + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } + /** * Add a row, check it's replicated, delete it, check's gone */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.java index d3a947fb2404..136a97a70390 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.java @@ -17,27 +17,28 @@ */ package org.apache.hadoop.hbase.replication.multiwal; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.replication.TestReplicationEndpoint; +import org.apache.hadoop.hbase.replication.TestReplicationEndpointBase; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.wal.RegionGroupingProvider; import org.apache.hadoop.hbase.wal.WALFactory; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; -@Category({ ReplicationTests.class, MediumTests.class }) -public class TestReplicationEndpointWithMultipleAsyncWAL extends TestReplicationEndpoint { +@Tag(ReplicationTests.TAG) +@Tag(MediumTests.TAG) +public class TestReplicationEndpointWithMultipleAsyncWAL extends TestReplicationEndpointBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationEndpointWithMultipleAsyncWAL.class); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { CONF1.set(WALFactory.WAL_PROVIDER, "multiwal"); CONF1.set(RegionGroupingProvider.DELEGATE_PROVIDER, "asyncfs"); - TestReplicationEndpoint.setUpBeforeClass(); + TestReplicationEndpointBase.setUpBeforeClass(); + } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationEndpointBase.tearDownAfterClass(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleWAL.java index a882c5043990..2a31915a851d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleWAL.java @@ -17,27 +17,28 @@ */ package org.apache.hadoop.hbase.replication.multiwal; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.replication.TestReplicationEndpoint; +import org.apache.hadoop.hbase.replication.TestReplicationEndpointBase; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.wal.RegionGroupingProvider; import org.apache.hadoop.hbase.wal.WALFactory; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; -@Category({ ReplicationTests.class, MediumTests.class }) -public class TestReplicationEndpointWithMultipleWAL extends TestReplicationEndpoint { +@Tag(ReplicationTests.TAG) +@Tag(MediumTests.TAG) +public class TestReplicationEndpointWithMultipleWAL extends TestReplicationEndpointBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationEndpointWithMultipleWAL.class); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { CONF1.set(WALFactory.WAL_PROVIDER, "multiwal"); CONF1.set(RegionGroupingProvider.DELEGATE_PROVIDER, "filesystem"); - TestReplicationEndpoint.setUpBeforeClass(); + TestReplicationEndpointBase.setUpBeforeClass(); + } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationEndpointBase.tearDownAfterClass(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL.java index 623e4c28cd05..2f29c3e37352 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL.java @@ -17,28 +17,29 @@ */ package org.apache.hadoop.hbase.replication.multiwal; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.replication.TestReplicationKillMasterRSCompressed; +import org.apache.hadoop.hbase.replication.TestReplicationKillMasterRSCompressedBase; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.wal.RegionGroupingProvider; import org.apache.hadoop.hbase.wal.WALFactory; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL - extends TestReplicationKillMasterRSCompressed { + extends TestReplicationKillMasterRSCompressedBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL.class); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { CONF1.set(WALFactory.WAL_PROVIDER, "multiwal"); CONF1.set(RegionGroupingProvider.DELEGATE_PROVIDER, "asyncfs"); - TestReplicationKillMasterRSCompressed.setUpBeforeClass(); + TestReplicationKillMasterRSCompressedBase.setUpBeforeClass(); + } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationKillMasterRSCompressedBase.tearDownAfterClass(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleWAL.java index 54921520b1cd..96968feb2edf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleWAL.java @@ -17,28 +17,29 @@ */ package org.apache.hadoop.hbase.replication.multiwal; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.replication.TestReplicationKillMasterRSCompressed; +import org.apache.hadoop.hbase.replication.TestReplicationKillMasterRSCompressedBase; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.wal.RegionGroupingProvider; import org.apache.hadoop.hbase.wal.WALFactory; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestReplicationKillMasterRSCompressedWithMultipleWAL - extends TestReplicationKillMasterRSCompressed { + extends TestReplicationKillMasterRSCompressedBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationKillMasterRSCompressedWithMultipleWAL.class); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { CONF1.set(WALFactory.WAL_PROVIDER, "multiwal"); CONF1.set(RegionGroupingProvider.DELEGATE_PROVIDER, "filesystem"); - TestReplicationKillMasterRSCompressed.setUpBeforeClass(); + TestReplicationKillMasterRSCompressedBase.setUpBeforeClass(); + } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationKillMasterRSCompressedBase.tearDownAfterClass(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleAsyncWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleAsyncWAL.java index 83cd41773ca8..f8de45600066 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleAsyncWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleAsyncWAL.java @@ -18,22 +18,17 @@ package org.apache.hadoop.hbase.replication.multiwal; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.replication.TestReplicationSyncUpTool; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.wal.RegionGroupingProvider; import org.apache.hadoop.hbase.wal.WALFactory; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestReplicationSyncUpToolWithMultipleAsyncWAL extends TestReplicationSyncUpTool { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationSyncUpToolWithMultipleAsyncWAL.class); - @Override protected void customizeClusterConf(Configuration conf) { conf.set(WALFactory.WAL_PROVIDER, "multiwal"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleWAL.java index 673b841430eb..6883c48cc8d8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleWAL.java @@ -18,22 +18,17 @@ package org.apache.hadoop.hbase.replication.multiwal; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.replication.TestReplicationSyncUpTool; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.wal.RegionGroupingProvider; import org.apache.hadoop.hbase.wal.WALFactory; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; -@Category({ ReplicationTests.class, LargeTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(LargeTests.TAG) public class TestReplicationSyncUpToolWithMultipleWAL extends TestReplicationSyncUpTool { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationSyncUpToolWithMultipleWAL.class); - @Override protected void customizeClusterConf(Configuration conf) { conf.set(WALFactory.WAL_PROVIDER, "multiwal"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshPeerWhileRegionServerRestarts.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshPeerWhileRegionServerRestarts.java index e9d8e05c8818..3a8b8de946a0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshPeerWhileRegionServerRestarts.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshPeerWhileRegionServerRestarts.java @@ -17,14 +17,13 @@ */ package org.apache.hadoop.hbase.replication.regionserver; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ForkJoinPool; import java.util.concurrent.Future; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.master.replication.DisablePeerProcedure; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -34,9 +33,12 @@ import org.apache.hadoop.hbase.replication.TestReplicationBase; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationState; @@ -44,13 +46,10 @@ * This UT is used to make sure that we will not accidentally change the way to generate online * servers. See HBASE-25774 and HBASE-25032 for more details. */ -@Category({ MasterTests.class, MediumTests.class }) +@Tag(MasterTests.TAG) +@Tag(MediumTests.TAG) public class TestRefreshPeerWhileRegionServerRestarts extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRefreshPeerWhileRegionServerRestarts.class); - private static CountDownLatch ARRIVE; private static CountDownLatch RESUME; @@ -76,6 +75,26 @@ protected void tryRegionServerReport(long reportStartTime, long reportEndTime) } } + @BeforeAll + public static void setUpBeforeClass() throws Exception { + TestReplicationBase.setUpBeforeClass(); + } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } + @Test public void testRestart() throws Exception { UTIL1.getMiniHBaseCluster().getConfiguration().setClass(HConstants.REGION_SERVER_IMPL, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java index 93aa7130926f..f2f415a69ab8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hbase.replication.regionserver; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.List; import java.util.Optional; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -40,16 +40,13 @@ import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -58,25 +55,19 @@ /** * Testcase for HBASE-24871. */ -@Category({ ReplicationTests.class, MediumTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(MediumTests.TAG) public class TestRefreshRecoveredReplication extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRefreshRecoveredReplication.class); - private static final Logger LOG = LoggerFactory.getLogger(TestRefreshRecoveredReplication.class); private static final int BATCH = 50; - @Rule - public TestName name = new TestName(); - private TableName tablename; private Table table1; private Table table2; - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { // NUM_SLAVES1 is presumed 2 in below. NUM_SLAVES1 = 2; @@ -86,16 +77,16 @@ public static void setUpBeforeClass() throws Exception { TestReplicationBase.setUpBeforeClass(); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TestReplicationBase.tearDownAfterClass(); } - @Before - public void setup() throws Exception { + @BeforeEach + public void setup(TestInfo testInfo) throws Exception { setUpBase(); - tablename = TableName.valueOf(name.getMethodName()); + tablename = TableName.valueOf(testInfo.getTestMethod().get().getName()); TableDescriptor table = TableDescriptorBuilder.newBuilder(tablename).setColumnFamily(ColumnFamilyDescriptorBuilder .newBuilder(famName).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build(); @@ -108,7 +99,7 @@ public void setup() throws Exception { table2 = UTIL2.getConnection().getTable(tablename); } - @After + @AfterEach public void teardown() throws Exception { tearDownBase(); @@ -130,7 +121,7 @@ public void testReplicationRefreshSource() throws Exception { Optional server = rss.stream() .filter(rst -> CollectionUtils.isNotEmpty(rst.getRegionServer().getRegions(tablename))) .findAny(); - Assert.assertTrue(server.isPresent()); + assertTrue(server.isPresent()); HRegionServer otherServer = rss.get(0).getRegionServer() == server.get().getRegionServer() ? rss.get(1).getRegionServer() : rss.get(0).getRegionServer(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationCompressedWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationCompressedWAL.java index 27c39cc0df2f..4364899719d6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationCompressedWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationCompressedWAL.java @@ -17,11 +17,10 @@ */ package org.apache.hadoop.hbase.replication.regionserver; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; @@ -29,36 +28,43 @@ import org.apache.hadoop.hbase.replication.TestReplicationBase; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category(MediumTests.class) +@Tag(MediumTests.TAG) public class TestReplicationCompressedWAL extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationCompressedWAL.class); - static final Logger LOG = LoggerFactory.getLogger(TestReplicationCompressedWAL.class); static final int NUM_BATCHES = 20; static final int NUM_ROWS_PER_BATCH = 100; - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { CONF1.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true); TestReplicationBase.setUpBeforeClass(); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TestReplicationBase.tearDownAfterClass(); } + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } + @Test public void testMultiplePuts() throws Exception { runMultiplePutTest(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManagerJoin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManagerJoin.java index d7d23783eacb..59d8a8c674c4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManagerJoin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManagerJoin.java @@ -17,12 +17,11 @@ */ package org.apache.hadoop.hbase.replication.regionserver; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.Optional; import java.util.stream.Stream; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -36,35 +35,45 @@ import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; -@Category({ ReplicationTests.class, MediumTests.class }) +@Tag(ReplicationTests.TAG) +@Tag(MediumTests.TAG) public class TestReplicationSourceManagerJoin extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationSourceManagerJoin.class); - - @Rule - public TestName testName = new TestName(); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { // NUM_SLAVES1 is presumed 2 in below. NUM_SLAVES1 = 2; TestReplicationBase.setUpBeforeClass(); } + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } + @Test - public void testReplicationSourcesTerminate() throws Exception { + public void testReplicationSourcesTerminate(TestInfo testInfo) throws Exception { // Create table in source cluster only, let TableNotFoundException block peer to avoid // recovered source end. - TableName tableName = TableName.valueOf(testName.getMethodName()); + TableName tableName = TableName.valueOf(testInfo.getTestMethod().get().getName()); TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName) .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationValueCompressedWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationValueCompressedWAL.java index 03b83964dccf..ea6411478392 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationValueCompressedWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationValueCompressedWAL.java @@ -17,40 +17,46 @@ */ package org.apache.hadoop.hbase.replication.regionserver; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.regionserver.wal.CompressionContext; import org.apache.hadoop.hbase.replication.TestReplicationBase; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category(MediumTests.class) +@Tag(MediumTests.TAG) public class TestReplicationValueCompressedWAL extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationValueCompressedWAL.class); - static final Logger LOG = LoggerFactory.getLogger(TestReplicationValueCompressedWAL.class); - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { CONF1.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true); CONF1.setBoolean(CompressionContext.ENABLE_WAL_VALUE_COMPRESSION, true); TestReplicationBase.setUpBeforeClass(); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TestReplicationBase.tearDownAfterClass(); } + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } + @Test public void testMultiplePuts() throws Exception { TestReplicationCompressedWAL.runMultiplePutTest(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java index 979db712ef34..32ee4943586b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java @@ -17,14 +17,13 @@ */ package org.apache.hadoop.hbase.replication.regionserver; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; @@ -36,33 +35,45 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL.Entry; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; -@Category(MediumTests.class) +@Tag(MediumTests.TAG) public class TestReplicator extends TestReplicationBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicator.class); - static final Logger LOG = LoggerFactory.getLogger(TestReplicator.class); static final int NUM_ROWS = 10; - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { // Set RPC size limit to 10kb (will be applied to both source and sink clusters) CONF1.setInt(RpcServer.MAX_REQUEST_SIZE, 1024 * 10); TestReplicationBase.setUpBeforeClass(); } + @AfterAll + public static void tearDownAfterClass() throws Exception { + TestReplicationBase.tearDownAfterClass(); + } + + @BeforeEach + public void setUp() throws Exception { + setUpBase(); + } + + @AfterEach + public void tearDown() throws Exception { + tearDownBase(); + } + @Test public void testReplicatorBatching() throws Exception { // Clear the tables @@ -104,9 +115,9 @@ public String explainFailure() throws Exception { } }); - assertEquals("We sent an incorrect number of batches", NUM_ROWS, - ReplicationEndpointForTest.getBatchCount()); - assertEquals("We did not replicate enough rows", NUM_ROWS, UTIL2.countRows(htable2)); + assertEquals(NUM_ROWS, ReplicationEndpointForTest.getBatchCount(), + "We sent an incorrect number of batches"); + assertEquals(NUM_ROWS, UTIL2.countRows(htable2), "We did not replicate enough rows"); } finally { hbaseAdmin.removeReplicationPeer("testReplicatorBatching"); } @@ -154,17 +165,12 @@ public String explainFailure() throws Exception { } }); - assertEquals("We did not replicate enough rows", NUM_ROWS, UTIL2.countRows(htable2)); + assertEquals(NUM_ROWS, UTIL2.countRows(htable2), "We did not replicate enough rows"); } finally { hbaseAdmin.removeReplicationPeer("testReplicatorWithErrors"); } } - @AfterClass - public static void tearDownAfterClass() throws Exception { - TestReplicationBase.tearDownAfterClass(); - } - private void truncateTable(HBaseTestingUtil util, TableName tablename) throws IOException { Admin admin = util.getAdmin(); admin.disableTable(tableName);