lastEntries = null;
-
- public ReplicationEndpointForTest() {
- replicateCount.set(0);
- contructedCount.incrementAndGet();
- }
-
- @Override
- public UUID getPeerUUID() {
- return uuid;
- }
-
- @Override
- public boolean replicate(ReplicateContext replicateContext) {
- replicateCount.incrementAndGet();
- lastEntries = new ArrayList<>(replicateContext.entries);
- return true;
- }
-
- @Override
- public void start() {
- startAsync();
- }
-
- @Override
- public void stop() {
- stopAsync();
- }
-
- @Override
- protected void doStart() {
- startedCount.incrementAndGet();
- notifyStarted();
- }
-
- @Override
- protected void doStop() {
- stoppedCount.incrementAndGet();
- notifyStopped();
- }
-
- @Override
- public boolean canReplicateToSameCluster() {
- return true;
- }
- }
-
- /**
- * Not used by unit tests, helpful for manual testing with replication.
- *
- * Snippet for `hbase shell`:
- *
- *
- * create 't', 'f'
- * add_peer '1', ENDPOINT_CLASSNAME => 'org.apache.hadoop.hbase.replication.' + \
- * 'TestReplicationEndpoint$SleepingReplicationEndpointForTest'
- * alter 't', {NAME=>'f', REPLICATION_SCOPE=>1}
- *
- */
- public static class SleepingReplicationEndpointForTest extends ReplicationEndpointForTest {
- private long duration;
-
- public SleepingReplicationEndpointForTest() {
- super();
- }
-
- @Override
- public void init(Context context) throws IOException {
- super.init(context);
- if (this.ctx != null) {
- duration = this.ctx.getConfiguration()
- .getLong("hbase.test.sleep.replication.endpoint.duration.millis", 5000L);
- }
- }
-
- @Override
- public boolean replicate(ReplicateContext context) {
- try {
- Thread.sleep(duration);
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- return false;
- }
- return super.replicate(context);
- }
- }
-
- public static class InterClusterReplicationEndpointForTest
- extends HBaseInterClusterReplicationEndpoint {
-
- static AtomicInteger replicateCount = new AtomicInteger();
- static boolean failedOnce;
-
- public InterClusterReplicationEndpointForTest() {
- replicateCount.set(0);
- }
-
- @Override
- public boolean replicate(ReplicateContext replicateContext) {
- boolean success = super.replicate(replicateContext);
- if (success) {
- replicateCount.addAndGet(replicateContext.entries.size());
- }
- return success;
- }
-
- @Override
- protected CompletableFuture asyncReplicate(List entries, int ordinal,
- int timeout) {
- // Fail only once, we don't want to slow down the test.
- if (failedOnce) {
- return CompletableFuture.completedFuture(ordinal);
- } else {
- failedOnce = true;
- CompletableFuture future = new CompletableFuture();
- future.completeExceptionally(new IOException("Sample Exception: Failed to replicate."));
- return future;
- }
- }
- }
-
- public static class ReplicationEndpointReturningFalse extends ReplicationEndpointForTest {
- static int COUNT = 10;
- static AtomicReference ex = new AtomicReference<>(null);
- static AtomicBoolean replicated = new AtomicBoolean(false);
-
- @Override
- public boolean replicate(ReplicateContext replicateContext) {
- try {
- // check row
- doAssert(row);
- } catch (Exception e) {
- ex.set(e);
- }
-
- super.replicate(replicateContext);
- LOG.info("Replicated " + Bytes.toString(row) + ", count=" + replicateCount.get());
-
- replicated.set(replicateCount.get() > COUNT); // first 10 times, we return false
- return replicated.get();
- }
- }
-
- // return a WALEntry filter which only accepts "row", but not other rows
- public static class ReplicationEndpointWithWALEntryFilter extends ReplicationEndpointForTest {
- static AtomicReference ex = new AtomicReference<>(null);
-
- @Override
- public boolean replicate(ReplicateContext replicateContext) {
- try {
- super.replicate(replicateContext);
- doAssert(row);
- } catch (Exception e) {
- ex.set(e);
- }
- return true;
- }
-
- @Override
- public WALEntryFilter getWALEntryfilter() {
- return new ChainWALEntryFilter(super.getWALEntryfilter(), new WALEntryFilter() {
- @Override
- public Entry filter(Entry entry) {
- ArrayList cells = entry.getEdit().getCells();
- int size = cells.size();
- for (int i = size - 1; i >= 0; i--) {
- Cell cell = cells.get(i);
- if (
- !Bytes.equals(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), row, 0,
- row.length)
- ) {
- cells.remove(i);
- }
- }
- return entry;
- }
- });
- }
- }
-
- public static class EverythingPassesWALEntryFilter implements WALEntryFilter {
- private static boolean passedEntry = false;
-
- @Override
- public Entry filter(Entry entry) {
- passedEntry = true;
- return entry;
- }
+@Tag(ReplicationTests.TAG)
+@Tag(MediumTests.TAG)
+public class TestReplicationEndpoint extends TestReplicationEndpointBase {
- public static boolean hasPassedAnEntry() {
- return passedEntry;
- }
+ @BeforeAll
+ public static void beforeClass() throws Exception {
+ setUpBeforeClass();
}
- public static class EverythingPassesWALEntryFilterSubclass
- extends EverythingPassesWALEntryFilter {
+ @AfterAll
+ public static void afterClass() throws Exception {
+ tearDownAfterClass();
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpointBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpointBase.java
new file mode 100644
index 000000000000..e46f39b5c2cf
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpointBase.java
@@ -0,0 +1,651 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.UUID;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationGlobalSourceSource;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationGlobalSourceSourceImpl;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSourceImpl;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationTableSource;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsSource;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.hadoop.hbase.wal.WALEdit;
+import org.apache.hadoop.hbase.wal.WALEditInternalHelper;
+import org.apache.hadoop.hbase.wal.WALKeyImpl;
+import org.apache.hadoop.hbase.zookeeper.ZKConfig;
+import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Abstract base class for TestReplicationEndpoint tests. Subclasses must call
+ * {@link #setUpBeforeClass()} and {@link #tearDownAfterClass()} in their
+ * @BeforeAll and @AfterAll methods respectively.
+ */
+public abstract class TestReplicationEndpointBase extends TestReplicationBase {
+
+ private static final Logger LOG = LoggerFactory.getLogger(TestReplicationEndpointBase.class);
+
+ static int numRegionServers;
+
+ public static void setUpBeforeClass() throws Exception {
+ TestReplicationBase.setUpBeforeClass();
+ numRegionServers = UTIL1.getHBaseCluster().getRegionServerThreads().size();
+ }
+
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationBase.tearDownAfterClass();
+ // check stop is called
+ assertTrue(ReplicationEndpointForTest.stoppedCount.get() > 0);
+ }
+
+ @BeforeEach
+ public void setup() throws Exception {
+ setUpBase();
+ ReplicationEndpointForTest.contructedCount.set(0);
+ ReplicationEndpointForTest.startedCount.set(0);
+ ReplicationEndpointForTest.replicateCount.set(0);
+ ReplicationEndpointReturningFalse.replicated.set(false);
+ ReplicationEndpointForTest.lastEntries = null;
+ final List rsThreads = UTIL1.getMiniHBaseCluster().getRegionServerThreads();
+ for (RegionServerThread rs : rsThreads) {
+ UTIL1.getAdmin().rollWALWriter(rs.getRegionServer().getServerName());
+ }
+ // Wait for all log roll to finish
+ UTIL1.waitFor(3000, new Waiter.ExplainingPredicate() {
+ @Override
+ public boolean evaluate() throws Exception {
+ for (RegionServerThread rs : rsThreads) {
+ if (!rs.getRegionServer().walRollRequestFinished()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public String explainFailure() throws Exception {
+ List logRollInProgressRsList = new ArrayList<>();
+ for (RegionServerThread rs : rsThreads) {
+ if (!rs.getRegionServer().walRollRequestFinished()) {
+ logRollInProgressRsList.add(rs.getRegionServer().toString());
+ }
+ }
+ return "Still waiting for log roll on regionservers: " + logRollInProgressRsList;
+ }
+ });
+ }
+
+ @AfterEach
+ public void tearDown() throws Exception {
+ tearDownBase();
+ }
+
+ @Test
+ public void testCustomReplicationEndpoint() throws Exception {
+ // test installing a custom replication endpoint other than the default one.
+ hbaseAdmin.addReplicationPeer("testCustomReplicationEndpoint",
+ ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1))
+ .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()).build());
+
+ // check whether the class has been constructed and started
+ Waiter.waitFor(CONF1, 60000, new Waiter.Predicate() {
+ @Override
+ public boolean evaluate() throws Exception {
+ return ReplicationEndpointForTest.contructedCount.get() >= numRegionServers;
+ }
+ });
+
+ Waiter.waitFor(CONF1, 60000, new Waiter.Predicate() {
+ @Override
+ public boolean evaluate() throws Exception {
+ return ReplicationEndpointForTest.startedCount.get() >= numRegionServers;
+ }
+ });
+
+ assertEquals(0, ReplicationEndpointForTest.replicateCount.get());
+
+ // now replicate some data.
+ doPut(Bytes.toBytes("row42"));
+
+ Waiter.waitFor(CONF1, 60000, new Waiter.Predicate() {
+ @Override
+ public boolean evaluate() throws Exception {
+ return ReplicationEndpointForTest.replicateCount.get() >= 1;
+ }
+ });
+
+ doAssert(Bytes.toBytes("row42"));
+
+ hbaseAdmin.removeReplicationPeer("testCustomReplicationEndpoint");
+ }
+
+ @Test
+ public void testReplicationEndpointReturnsFalseOnReplicate() throws Exception {
+ assertEquals(0, ReplicationEndpointForTest.replicateCount.get());
+ assertTrue(!ReplicationEndpointReturningFalse.replicated.get());
+ int peerCount = hbaseAdmin.listReplicationPeers().size();
+ final String id = "testReplicationEndpointReturnsFalseOnReplicate";
+ hbaseAdmin.addReplicationPeer(id,
+ ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1))
+ .setReplicationEndpointImpl(ReplicationEndpointReturningFalse.class.getName()).build());
+ // This test is flakey and then there is so much stuff flying around in here its, hard to
+ // debug. Peer needs to be up for the edit to make it across. This wait on
+ // peer count seems to be a hack that has us not progress till peer is up.
+ if (hbaseAdmin.listReplicationPeers().size() <= peerCount) {
+ LOG.info("Waiting on peercount to go up from " + peerCount);
+ Threads.sleep(100);
+ }
+ // now replicate some data
+ doPut(row);
+
+ Waiter.waitFor(CONF1, 60000, new Waiter.Predicate() {
+ @Override
+ public boolean evaluate() throws Exception {
+ // Looks like replication endpoint returns false unless we put more than 10 edits. We
+ // only send over one edit.
+ int count = ReplicationEndpointForTest.replicateCount.get();
+ LOG.info("count=" + count);
+ return ReplicationEndpointReturningFalse.replicated.get();
+ }
+ });
+ if (ReplicationEndpointReturningFalse.ex.get() != null) {
+ throw ReplicationEndpointReturningFalse.ex.get();
+ }
+
+ hbaseAdmin.removeReplicationPeer("testReplicationEndpointReturnsFalseOnReplicate");
+ }
+
+ @Test
+ public void testInterClusterReplication() throws Exception {
+ final String id = "testInterClusterReplication";
+
+ List regions = UTIL1.getHBaseCluster().getRegions(tableName);
+ int totEdits = 0;
+
+ // Make sure edits are spread across regions because we do region based batching
+ // before shipping edits.
+ for (HRegion region : regions) {
+ RegionInfo hri = region.getRegionInfo();
+ byte[] row = hri.getStartKey();
+ for (int i = 0; i < 100; i++) {
+ if (row.length > 0) {
+ Put put = new Put(row);
+ put.addColumn(famName, row, row);
+ region.put(put);
+ totEdits++;
+ }
+ }
+ }
+
+ hbaseAdmin.addReplicationPeer(id,
+ ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF2))
+ .setReplicationEndpointImpl(InterClusterReplicationEndpointForTest.class.getName())
+ .build());
+
+ final int numEdits = totEdits;
+ Waiter.waitFor(CONF1, 30000, new Waiter.ExplainingPredicate() {
+ @Override
+ public boolean evaluate() throws Exception {
+ return InterClusterReplicationEndpointForTest.replicateCount.get() == numEdits;
+ }
+
+ @Override
+ public String explainFailure() throws Exception {
+ String failure = "Failed to replicate all edits, expected = " + numEdits + " replicated = "
+ + InterClusterReplicationEndpointForTest.replicateCount.get();
+ return failure;
+ }
+ });
+
+ hbaseAdmin.removeReplicationPeer("testInterClusterReplication");
+ UTIL1.deleteTableData(tableName);
+ }
+
+ @Test
+ public void testWALEntryFilterFromReplicationEndpoint() throws Exception {
+ ReplicationPeerConfig rpc =
+ ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1))
+ .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName())
+ // test that we can create mutliple WALFilters reflectively
+ .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY,
+ EverythingPassesWALEntryFilter.class.getName() + ","
+ + EverythingPassesWALEntryFilterSubclass.class.getName())
+ .build();
+
+ hbaseAdmin.addReplicationPeer("testWALEntryFilterFromReplicationEndpoint", rpc);
+ // now replicate some data.
+ try (Connection connection = ConnectionFactory.createConnection(CONF1)) {
+ doPut(connection, Bytes.toBytes("row1"));
+ doPut(connection, row);
+ doPut(connection, Bytes.toBytes("row2"));
+ }
+
+ Waiter.waitFor(CONF1, 60000, new Waiter.Predicate() {
+ @Override
+ public boolean evaluate() throws Exception {
+ return ReplicationEndpointForTest.replicateCount.get() >= 1;
+ }
+ });
+
+ assertEquals(null, ReplicationEndpointWithWALEntryFilter.ex.get());
+ // make sure our reflectively created filter is in the filter chain
+ assertTrue(EverythingPassesWALEntryFilter.hasPassedAnEntry());
+ hbaseAdmin.removeReplicationPeer("testWALEntryFilterFromReplicationEndpoint");
+ }
+
+ @Test
+ public void testWALEntryFilterAddValidation() {
+ assertThrows(IOException.class, () -> {
+ ReplicationPeerConfig rpc =
+ ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1))
+ .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName())
+ // test that we can create mutliple WALFilters reflectively
+ .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY,
+ "IAmNotARealWalEntryFilter")
+ .build();
+ hbaseAdmin.addReplicationPeer("testWALEntryFilterAddValidation", rpc);
+ });
+ }
+
+ @Test
+ public void testWALEntryFilterUpdateValidation() {
+ assertThrows(IOException.class, () -> {
+ ReplicationPeerConfig rpc =
+ ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1))
+ .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName())
+ // test that we can create mutliple WALFilters reflectively
+ .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY,
+ "IAmNotARealWalEntryFilter")
+ .build();
+ hbaseAdmin.updateReplicationPeerConfig("testWALEntryFilterUpdateValidation", rpc);
+ });
+ }
+
+ @Test
+ public void testMetricsSourceBaseSourcePassThrough() {
+ /*
+ * The replication MetricsSource wraps a MetricsReplicationTableSourceImpl,
+ * MetricsReplicationSourceSourceImpl and a MetricsReplicationGlobalSourceSource, so that
+ * metrics get written to both namespaces. Both of those classes wrap a
+ * MetricsReplicationSourceImpl that implements BaseSource, which allows for custom JMX metrics.
+ * This test checks to make sure the BaseSource decorator logic on MetricsSource actually calls
+ * down through the two layers of wrapping to the actual BaseSource.
+ */
+ String id = "id";
+ DynamicMetricsRegistry mockRegistry = mock(DynamicMetricsRegistry.class);
+ MetricsReplicationSourceImpl singleRms = mock(MetricsReplicationSourceImpl.class);
+ when(singleRms.getMetricsRegistry()).thenReturn(mockRegistry);
+ MetricsReplicationSourceImpl globalRms = mock(MetricsReplicationSourceImpl.class);
+ when(globalRms.getMetricsRegistry()).thenReturn(mockRegistry);
+
+ MetricsReplicationSourceSource singleSourceSource =
+ new MetricsReplicationSourceSourceImpl(singleRms, id);
+ MetricsReplicationGlobalSourceSource globalSourceSource =
+ new MetricsReplicationGlobalSourceSourceImpl(globalRms);
+ MetricsReplicationGlobalSourceSource spyglobalSourceSource = spy(globalSourceSource);
+ doNothing().when(spyglobalSourceSource).incrFailedRecoveryQueue();
+
+ Map singleSourceSourceByTable = new HashMap<>();
+ MetricsSource source =
+ new MetricsSource(id, singleSourceSource, spyglobalSourceSource, singleSourceSourceByTable);
+
+ String gaugeName = "gauge";
+ String singleGaugeName = "source.id." + gaugeName;
+ String globalGaugeName = "source." + gaugeName;
+ long delta = 1;
+ String counterName = "counter";
+ String singleCounterName = "source.id." + counterName;
+ String globalCounterName = "source." + counterName;
+ long count = 2;
+ source.decGauge(gaugeName, delta);
+ source.getMetricsContext();
+ source.getMetricsDescription();
+ source.getMetricsJmxContext();
+ source.getMetricsName();
+ source.incCounters(counterName, count);
+ source.incGauge(gaugeName, delta);
+ source.init();
+ source.removeMetric(gaugeName);
+ source.setGauge(gaugeName, delta);
+ source.updateHistogram(counterName, count);
+ source.incrFailedRecoveryQueue();
+
+ verify(singleRms).decGauge(singleGaugeName, delta);
+ verify(globalRms).decGauge(globalGaugeName, delta);
+ verify(globalRms).getMetricsContext();
+ verify(globalRms).getMetricsJmxContext();
+ verify(globalRms).getMetricsName();
+ verify(singleRms).incCounters(singleCounterName, count);
+ verify(globalRms).incCounters(globalCounterName, count);
+ verify(singleRms).incGauge(singleGaugeName, delta);
+ verify(globalRms).incGauge(globalGaugeName, delta);
+ verify(globalRms).init();
+ verify(singleRms).removeMetric(singleGaugeName);
+ verify(globalRms).removeMetric(globalGaugeName);
+ verify(singleRms).setGauge(singleGaugeName, delta);
+ verify(globalRms).setGauge(globalGaugeName, delta);
+ verify(singleRms).updateHistogram(singleCounterName, count);
+ verify(globalRms).updateHistogram(globalCounterName, count);
+ verify(spyglobalSourceSource).incrFailedRecoveryQueue();
+
+ // check singleSourceSourceByTable metrics.
+ // singleSourceSourceByTable map entry will be created only
+ // after calling #setAgeOfLastShippedOpByTable
+ boolean containsRandomNewTable =
+ source.getSingleSourceSourceByTable().containsKey("RandomNewTable");
+ assertEquals(false, containsRandomNewTable);
+ source.updateTableLevelMetrics(createWALEntriesWithSize("RandomNewTable"));
+ containsRandomNewTable = source.getSingleSourceSourceByTable().containsKey("RandomNewTable");
+ assertEquals(true, containsRandomNewTable);
+ MetricsReplicationTableSource msr = source.getSingleSourceSourceByTable().get("RandomNewTable");
+
+ // age should be greater than zero we created the entry with time in the past
+ assertTrue(msr.getLastShippedAge() > 0);
+ assertTrue(msr.getShippedBytes() > 0);
+
+ }
+
+ private List> createWALEntriesWithSize(String tableName) {
+ List> walEntriesWithSize = new ArrayList<>();
+ byte[] a = new byte[] { 'a' };
+ Entry entry = createEntry(tableName, null, a);
+ walEntriesWithSize.add(new Pair<>(entry, 10L));
+ return walEntriesWithSize;
+ }
+
+ private Entry createEntry(String tableName, TreeMap scopes, byte[]... kvs) {
+ WALKeyImpl key1 = new WALKeyImpl(new byte[0], TableName.valueOf(tableName),
+ EnvironmentEdgeManager.currentTime() - 1L, scopes);
+ WALEdit edit1 = new WALEdit();
+
+ for (byte[] kv : kvs) {
+ WALEditInternalHelper.addExtendedCell(edit1, new KeyValue(kv, kv, kv));
+ }
+ return new Entry(key1, edit1);
+ }
+
+ private void doPut(byte[] row) throws IOException {
+ try (Connection connection = ConnectionFactory.createConnection(CONF1)) {
+ doPut(connection, row);
+ }
+ }
+
+ private void doPut(final Connection connection, final byte[] row) throws IOException {
+ try (Table t = connection.getTable(tableName)) {
+ Put put = new Put(row);
+ put.addColumn(famName, row, row);
+ t.put(put);
+ }
+ }
+
+ private static void doAssert(byte[] row) throws Exception {
+ if (ReplicationEndpointForTest.lastEntries == null) {
+ return; // first call
+ }
+ assertEquals(1, ReplicationEndpointForTest.lastEntries.size());
+ List| cells = ReplicationEndpointForTest.lastEntries.get(0).getEdit().getCells();
+ assertEquals(1, cells.size());
+ assertTrue(Bytes.equals(cells.get(0).getRowArray(), cells.get(0).getRowOffset(),
+ cells.get(0).getRowLength(), row, 0, row.length));
+ }
+
+ public static class ReplicationEndpointForTest extends BaseReplicationEndpoint {
+ static UUID uuid = UTIL1.getRandomUUID();
+ static AtomicInteger contructedCount = new AtomicInteger();
+ static AtomicInteger startedCount = new AtomicInteger();
+ static AtomicInteger stoppedCount = new AtomicInteger();
+ static AtomicInteger replicateCount = new AtomicInteger();
+ static volatile List lastEntries = null;
+
+ public ReplicationEndpointForTest() {
+ replicateCount.set(0);
+ contructedCount.incrementAndGet();
+ }
+
+ @Override
+ public UUID getPeerUUID() {
+ return uuid;
+ }
+
+ @Override
+ public boolean replicate(ReplicateContext replicateContext) {
+ replicateCount.incrementAndGet();
+ lastEntries = new ArrayList<>(replicateContext.entries);
+ return true;
+ }
+
+ @Override
+ public void start() {
+ startAsync();
+ }
+
+ @Override
+ public void stop() {
+ stopAsync();
+ }
+
+ @Override
+ protected void doStart() {
+ startedCount.incrementAndGet();
+ notifyStarted();
+ }
+
+ @Override
+ protected void doStop() {
+ stoppedCount.incrementAndGet();
+ notifyStopped();
+ }
+
+ @Override
+ public boolean canReplicateToSameCluster() {
+ return true;
+ }
+ }
+
+ /**
+ * Not used by unit tests, helpful for manual testing with replication.
+ *
+ * Snippet for `hbase shell`:
+ *
+ *
+ * create 't', 'f'
+ * add_peer '1', ENDPOINT_CLASSNAME => 'org.apache.hadoop.hbase.replication.' + \
+ * 'TestReplicationEndpoint$SleepingReplicationEndpointForTest'
+ * alter 't', {NAME=>'f', REPLICATION_SCOPE=>1}
+ *
+ */
+ public static class SleepingReplicationEndpointForTest extends ReplicationEndpointForTest {
+ private long duration;
+
+ public SleepingReplicationEndpointForTest() {
+ super();
+ }
+
+ @Override
+ public void init(Context context) throws IOException {
+ super.init(context);
+ if (this.ctx != null) {
+ duration = this.ctx.getConfiguration()
+ .getLong("hbase.test.sleep.replication.endpoint.duration.millis", 5000L);
+ }
+ }
+
+ @Override
+ public boolean replicate(ReplicateContext context) {
+ try {
+ Thread.sleep(duration);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ return false;
+ }
+ return super.replicate(context);
+ }
+ }
+
+ public static class InterClusterReplicationEndpointForTest
+ extends HBaseInterClusterReplicationEndpoint {
+
+ static AtomicInteger replicateCount = new AtomicInteger();
+ static boolean failedOnce;
+
+ public InterClusterReplicationEndpointForTest() {
+ replicateCount.set(0);
+ }
+
+ @Override
+ public boolean replicate(ReplicateContext replicateContext) {
+ boolean success = super.replicate(replicateContext);
+ if (success) {
+ replicateCount.addAndGet(replicateContext.entries.size());
+ }
+ return success;
+ }
+
+ @Override
+ protected CompletableFuture asyncReplicate(List entries, int ordinal,
+ int timeout) {
+ // Fail only once, we don't want to slow down the test.
+ if (failedOnce) {
+ return CompletableFuture.completedFuture(ordinal);
+ } else {
+ failedOnce = true;
+ CompletableFuture future = new CompletableFuture();
+ future.completeExceptionally(new IOException("Sample Exception: Failed to replicate."));
+ return future;
+ }
+ }
+ }
+
+ public static class ReplicationEndpointReturningFalse extends ReplicationEndpointForTest {
+ static int COUNT = 10;
+ static AtomicReference ex = new AtomicReference<>(null);
+ static AtomicBoolean replicated = new AtomicBoolean(false);
+
+ @Override
+ public boolean replicate(ReplicateContext replicateContext) {
+ try {
+ // check row
+ doAssert(row);
+ } catch (Exception e) {
+ ex.set(e);
+ }
+
+ super.replicate(replicateContext);
+ LOG.info("Replicated " + Bytes.toString(row) + ", count=" + replicateCount.get());
+
+ replicated.set(replicateCount.get() > COUNT); // first 10 times, we return false
+ return replicated.get();
+ }
+ }
+
+ // return a WALEntry filter which only accepts "row", but not other rows
+ public static class ReplicationEndpointWithWALEntryFilter extends ReplicationEndpointForTest {
+ static AtomicReference ex = new AtomicReference<>(null);
+
+ @Override
+ public boolean replicate(ReplicateContext replicateContext) {
+ try {
+ super.replicate(replicateContext);
+ doAssert(row);
+ } catch (Exception e) {
+ ex.set(e);
+ }
+ return true;
+ }
+
+ @Override
+ public WALEntryFilter getWALEntryfilter() {
+ return new ChainWALEntryFilter(super.getWALEntryfilter(), new WALEntryFilter() {
+ @Override
+ public Entry filter(Entry entry) {
+ ArrayList cells = entry.getEdit().getCells();
+ int size = cells.size();
+ for (int i = size - 1; i >= 0; i--) {
+ Cell cell = cells.get(i);
+ if (
+ !Bytes.equals(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), row, 0,
+ row.length)
+ ) {
+ cells.remove(i);
+ }
+ }
+ return entry;
+ }
+ });
+ }
+ }
+
+ public static class EverythingPassesWALEntryFilter implements WALEntryFilter {
+ private static boolean passedEntry = false;
+
+ @Override
+ public Entry filter(Entry entry) {
+ passedEntry = true;
+ return entry;
+ }
+
+ public static boolean hasPassedAnEntry() {
+ return passedEntry;
+ }
+ }
+
+ public static class EverythingPassesWALEntryFilterSubclass
+ extends EverythingPassesWALEntryFilter {
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRS.java
index 7720d42a6edc..78123ec9c208 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRS.java
@@ -17,31 +17,40 @@
*/
package org.apache.hadoop.hbase.replication;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
/**
* Runs the TestReplicationKillRS test and selects the RS to kill in the master cluster Do not add
* other tests in this class.
*/
-@Category({ ReplicationTests.class, LargeTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(LargeTests.TAG)
public class TestReplicationKillMasterRS extends TestReplicationKillRS {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationKillMasterRS.class);
-
- @BeforeClass
public static void setUpBeforeClass() throws Exception {
NUM_SLAVES1 = 2;
TestReplicationBase.setUpBeforeClass();
}
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationBase.tearDownAfterClass();
+ }
+
+ @BeforeEach
+ public void setUp() throws Exception {
+ setUpBase();
+ }
+
+ @AfterEach
+ public void tearDown() throws Exception {
+ tearDownBase();
+ }
+
@Test
public void killOneMasterRS() throws Exception {
loadTableAndKillRS(UTIL1);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressed.java
index 7140d39adbfe..ab6d299daca9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressed.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressed.java
@@ -17,28 +17,28 @@
*/
package org.apache.hadoop.hbase.replication;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Tag;
/**
* Run the same test as TestReplicationKillMasterRS but with WAL compression enabled Do not add
* other tests in this class.
*/
-@Category({ ReplicationTests.class, LargeTests.class })
-public class TestReplicationKillMasterRSCompressed extends TestReplicationKillMasterRS {
+@Tag(ReplicationTests.TAG)
+@Tag(LargeTests.TAG)
+public class TestReplicationKillMasterRSCompressed
+ extends TestReplicationKillMasterRSCompressedBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationKillMasterRSCompressed.class);
+ @BeforeAll
+ public static void beforeClass() throws Exception {
+ setUpBeforeClass();
+ }
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- CONF1.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);
- TestReplicationKillMasterRS.setUpBeforeClass();
+ @AfterAll
+ public static void afterClass() throws Exception {
+ tearDownAfterClass();
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressedBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressedBase.java
new file mode 100644
index 000000000000..af618350c708
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressedBase.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.junit.jupiter.api.Tag;
+
+/**
+ * Abstract base class for TestReplicationKillMasterRSCompressed tests. Subclasses must call
+ * {@link #setUpBeforeClass()} and {@link #tearDownAfterClass()} in their
+ * @BeforeAll and @AfterAll methods respectively.
+ */
+@Tag(ReplicationTests.TAG)
+@Tag(LargeTests.TAG)
+public abstract class TestReplicationKillMasterRSCompressedBase
+ extends TestReplicationKillMasterRS {
+
+ public static void setUpBeforeClass() throws Exception {
+ CONF1.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);
+ TestReplicationKillMasterRS.setUpBeforeClass();
+ }
+
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationKillMasterRS.tearDownAfterClass();
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSWithSeparateOldWALs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSWithSeparateOldWALs.java
index a5e19c9f4432..40409074967f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSWithSeparateOldWALs.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSWithSeparateOldWALs.java
@@ -17,24 +17,25 @@
*/
package org.apache.hadoop.hbase.replication;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Tag;
-@Category({ ReplicationTests.class, LargeTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(LargeTests.TAG)
public class TestReplicationKillMasterRSWithSeparateOldWALs extends TestReplicationKillMasterRS {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationKillMasterRSWithSeparateOldWALs.class);
-
- @BeforeClass
+ @BeforeAll
public static void setUpBeforeClass() throws Exception {
CONF1.setBoolean(AbstractFSWALProvider.SEPARATE_OLDLOGDIR, true);
TestReplicationKillMasterRS.setUpBeforeClass();
}
+
+ @AfterAll
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationKillMasterRS.tearDownAfterClass();
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java
index 9a4819b2c28f..85df0c789535 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hbase.replication;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.UnknownScannerException;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRS.java
index 6505a4a191d9..560ba8edfc8d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRS.java
@@ -17,31 +17,40 @@
*/
package org.apache.hadoop.hbase.replication;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
/**
* Runs the TestReplicationKillRS test and selects the RS to kill in the slave cluster Do not add
* other tests in this class.
*/
-@Category({ ReplicationTests.class, LargeTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(LargeTests.TAG)
public class TestReplicationKillSlaveRS extends TestReplicationKillRS {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationKillSlaveRS.class);
-
- @BeforeClass
public static void setUpBeforeClass() throws Exception {
NUM_SLAVES2 = 2;
TestReplicationBase.setUpBeforeClass();
}
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationBase.tearDownAfterClass();
+ }
+
+ @BeforeEach
+ public void setUp() throws Exception {
+ setUpBase();
+ }
+
+ @AfterEach
+ public void tearDown() throws Exception {
+ tearDownBase();
+ }
+
@Test
public void killOneSlaveRS() throws Exception {
loadTableAndKillRS(UTIL2);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRSWithSeparateOldWALs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRSWithSeparateOldWALs.java
index 3b0766f6ed9a..056bffe16a25 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRSWithSeparateOldWALs.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRSWithSeparateOldWALs.java
@@ -17,24 +17,25 @@
*/
package org.apache.hadoop.hbase.replication;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Tag;
-@Category({ ReplicationTests.class, LargeTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(LargeTests.TAG)
public class TestReplicationKillSlaveRSWithSeparateOldWALs extends TestReplicationKillSlaveRS {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationKillSlaveRSWithSeparateOldWALs.class);
-
- @BeforeClass
+ @BeforeAll
public static void setUpBeforeClass() throws Exception {
CONF1.setBoolean(AbstractFSWALProvider.SEPARATE_OLDLOGDIR, true);
TestReplicationKillSlaveRS.setUpBeforeClass();
}
+
+ @AfterAll
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationKillSlaveRS.tearDownAfterClass();
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationMetricsforUI.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationMetricsforUI.java
index a5dc1490fc65..8082d459817f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationMetricsforUI.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationMetricsforUI.java
@@ -17,9 +17,12 @@
*/
package org.apache.hadoop.hbase.replication;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
import java.util.Map;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
@@ -28,18 +31,38 @@
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.Assert;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
-@Category({ ReplicationTests.class, MediumTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(MediumTests.TAG)
public class TestReplicationMetricsforUI extends TestReplicationBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationMetricsforUI.class);
private static final byte[] qualName = Bytes.toBytes("q");
+ @BeforeAll
+ public static void setUpBeforeClass() throws Exception {
+ TestReplicationBase.setUpBeforeClass();
+ }
+
+ @AfterAll
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationBase.tearDownAfterClass();
+ }
+
+ @BeforeEach
+ public void setUp() throws Exception {
+ setUpBase();
+ }
+
+ @AfterEach
+ public void tearDown() throws Exception {
+ tearDownBase();
+ }
+
@Test
public void testReplicationMetrics() throws Exception {
try (Admin hbaseAdmin = UTIL1.getConnection().getAdmin()) {
@@ -54,13 +77,13 @@ public void testReplicationMetrics() throws Exception {
Thread.sleep(5000);
HRegionServer rs = UTIL1.getRSForFirstRegionInTable(tableName);
Map metrics = rs.getWalGroupsReplicationStatus();
- Assert.assertEquals("metric size ", 1, metrics.size());
+ assertEquals(1, metrics.size(), "metric size ");
long lastPosition = 0;
for (Map.Entry metric : metrics.entrySet()) {
- Assert.assertEquals("peerId", PEER_ID2, metric.getValue().getPeerId());
- Assert.assertEquals("queue length", 1, metric.getValue().getQueueSize());
- Assert.assertEquals("replication delay", 0, metric.getValue().getReplicationDelay());
- Assert.assertTrue("current position >= 0", metric.getValue().getCurrentPosition() >= 0);
+ assertEquals(PEER_ID2, metric.getValue().getPeerId(), "peerId");
+ assertEquals(1, metric.getValue().getQueueSize(), "queue length");
+ assertEquals(0, metric.getValue().getReplicationDelay(), "replication delay");
+ assertTrue(metric.getValue().getCurrentPosition() >= 0, "current position >= 0");
lastPosition = metric.getValue().getCurrentPosition();
}
for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
@@ -78,11 +101,11 @@ public void testReplicationMetrics() throws Exception {
Path lastPath = null;
for (Map.Entry metric : metrics.entrySet()) {
lastPath = metric.getValue().getCurrentPath();
- Assert.assertEquals("peerId", PEER_ID2, metric.getValue().getPeerId());
- Assert.assertTrue("age of Last Shipped Op should be > 0 ",
- metric.getValue().getAgeOfLastShippedOp() > 0);
- Assert.assertTrue("current position should > last position",
- metric.getValue().getCurrentPosition() - lastPosition > 0);
+ assertEquals(PEER_ID2, metric.getValue().getPeerId(), "peerId");
+ assertTrue(metric.getValue().getAgeOfLastShippedOp() > 0,
+ "age of Last Shipped Op should be > 0 ");
+ assertTrue(metric.getValue().getCurrentPosition() - lastPosition > 0,
+ "current position should > last position");
lastPosition = metric.getValue().getCurrentPosition();
}
@@ -98,10 +121,10 @@ public void testReplicationMetrics() throws Exception {
Thread.sleep(5000);
metrics = rs.getWalGroupsReplicationStatus();
for (Map.Entry metric : metrics.entrySet()) {
- Assert.assertEquals("replication delay", 0, metric.getValue().getReplicationDelay());
- Assert.assertTrue("current position should < last position",
- metric.getValue().getCurrentPosition() < lastPosition);
- Assert.assertNotEquals("current path", lastPath, metric.getValue().getCurrentPath());
+ assertEquals(0, metric.getValue().getReplicationDelay(), "replication delay");
+ assertTrue(metric.getValue().getCurrentPosition() < lastPosition,
+ "current position should < last position");
+ assertNotEquals(lastPath, metric.getValue().getCurrentPath(), "current path");
}
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
index 06fdc47fa3ae..8e629e383996 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
@@ -17,18 +17,19 @@
*/
package org.apache.hadoop.hbase.replication;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
import java.util.ArrayList;
import java.util.List;
import java.util.NavigableMap;
import java.util.TreeMap;
+import java.util.stream.Stream;
import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
@@ -53,52 +54,66 @@
import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.hadoop.hbase.wal.WALEditInternalHelper;
import org.apache.hadoop.hbase.wal.WALKeyImpl;
-import org.junit.Before;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameter;
-import org.junit.runners.Parameterized.Parameters;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.TestTemplate;
+import org.junit.jupiter.params.provider.Arguments;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-@RunWith(Parameterized.class)
-@Category({ ReplicationTests.class, LargeTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(LargeTests.TAG)
+@HBaseParameterizedTestTemplate(name = "{index}: serialPeer={0}")
public class TestReplicationSmallTests extends TestReplicationBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationSmallTests.class);
-
private static final Logger LOG = LoggerFactory.getLogger(TestReplicationSmallTests.class);
private static final String PEER_ID = "2";
- @Parameter
- public boolean serialPeer;
+ private boolean serialPeer;
+
+ public TestReplicationSmallTests(boolean serialPeer) {
+ this.serialPeer = serialPeer;
+ }
@Override
protected boolean isSerialPeer() {
return serialPeer;
}
- @Parameters(name = "{index}: serialPeer={0}")
- public static List parameters() {
- return ImmutableList.of(true, false);
+ public static Stream parameters() {
+ return ImmutableList.of(true, false).stream().map(Arguments::of);
+ }
+
+ @BeforeAll
+ public static void setUpBeforeClass() throws Exception {
+ TestReplicationBase.setUpBeforeClass();
}
- @Before
+ @AfterAll
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationBase.tearDownAfterClass();
+ }
+
+ @BeforeEach
public void setUp() throws Exception {
+ setUpBase();
cleanUp();
}
+ @AfterEach
+ public void tearDown() throws Exception {
+ tearDownBase();
+ }
+
/**
* Verify that version and column delete marker types are replicated correctly.
*/
- @Test
+ @TestTemplate
public void testDeleteTypes() throws Exception {
LOG.info("testDeleteTypes");
final byte[] v1 = Bytes.toBytes("v1");
@@ -184,7 +199,7 @@ public void testDeleteTypes() throws Exception {
/**
* Add a row, check it's replicated, delete it, check's gone
*/
- @Test
+ @TestTemplate
public void testSimplePutDelete() throws Exception {
LOG.info("testSimplePutDelete");
runSimplePutDeleteTest();
@@ -193,7 +208,7 @@ public void testSimplePutDelete() throws Exception {
/**
* Try a small batch upload using the write buffer, check it's replicated
*/
- @Test
+ @TestTemplate
public void testSmallBatch() throws Exception {
LOG.info("testSmallBatch");
runSmallBatchTest();
@@ -203,7 +218,7 @@ public void testSmallBatch() throws Exception {
* Test disable/enable replication, trying to insert, make sure nothing's replicated, enable it,
* the insert should be replicated
*/
- @Test
+ @TestTemplate
public void testDisableEnable() throws Exception {
// Test disabling replication
hbaseAdmin.disableReplicationPeer(PEER_ID);
@@ -243,7 +258,7 @@ public void testDisableEnable() throws Exception {
/**
* Removes and re-add a peer cluster
*/
- @Test
+ @TestTemplate
public void testAddAndRemoveClusters() throws Exception {
LOG.info("testAddAndRemoveClusters");
hbaseAdmin.removeReplicationPeer(PEER_ID);
@@ -296,7 +311,7 @@ public void testAddAndRemoveClusters() throws Exception {
* Do a more intense version testSmallBatch, one that will trigger wal rolling and other
* non-trivial code paths
*/
- @Test
+ @TestTemplate
public void testLoading() throws Exception {
LOG.info("Writing out rows to table1 in testLoading");
List puts = new ArrayList<>(NB_ROWS_IN_BIG_BATCH);
@@ -357,7 +372,7 @@ public void testLoading() throws Exception {
* Create two new Tables with colfamilies enabled for replication then run
* {@link Admin#listReplicatedTableCFs()}. Finally verify the table:colfamilies.
*/
- @Test
+ @TestTemplate
public void testVerifyListReplicatedTable() throws Exception {
LOG.info("testVerifyListReplicatedTable");
@@ -390,7 +405,7 @@ public void testVerifyListReplicatedTable() throws Exception {
// check the matching result
for (int i = 0; i < match.length; i++) {
- assertTrue("listReplicated() does not match table " + i, (match[i] == 1));
+ assertTrue((match[i] == 1), "listReplicated() does not match table " + i);
}
// drop tables
@@ -406,7 +421,7 @@ public void testVerifyListReplicatedTable() throws Exception {
/**
* Test for HBase-15259 WALEdits under replay will also be replicated
*/
- @Test
+ @TestTemplate
public void testReplicationInReplay() throws Exception {
final TableName tableName = htable1.getName();
@@ -448,7 +463,7 @@ public void testReplicationInReplay() throws Exception {
/**
* Test for HBASE-27448 Add an admin method to get replication enabled state
*/
- @Test
+ @TestTemplate
public void testGetReplicationPeerState() throws Exception {
// Test disable replication peer
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTestsSync.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTestsSync.java
index ec36039a8425..afa077b45204 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTestsSync.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTestsSync.java
@@ -17,21 +17,17 @@
*/
package org.apache.hadoop.hbase.replication;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
-import org.junit.ClassRule;
-import org.junit.experimental.categories.Category;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
+import org.junit.jupiter.api.Tag;
-@RunWith(Parameterized.class)
-@Category({ ReplicationTests.class, LargeTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(LargeTests.TAG)
public class TestReplicationSmallTestsSync extends TestReplicationSmallTests {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationSmallTestsSync.class);
+ public TestReplicationSmallTestsSync(boolean serialPeer) {
+ super(serialPeer);
+ }
@Override
protected boolean isSyncPeer() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatus.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatus.java
index 0ad2fd5acea4..e8a13353ad51 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatus.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatus.java
@@ -17,15 +17,14 @@
*/
package org.apache.hadoop.hbase.replication;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.hbase.ClusterMetrics;
import org.apache.hadoop.hbase.ClusterMetrics.Option;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.ServerMetrics;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Waiter;
@@ -37,19 +36,39 @@
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.util.Threads;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-@Category({ ReplicationTests.class, MediumTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(MediumTests.TAG)
public class TestReplicationStatus extends TestReplicationBase {
private static final Logger LOG = LoggerFactory.getLogger(TestReplicationStatus.class);
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationStatus.class);
+ @BeforeAll
+ public static void setUpBeforeClass() throws Exception {
+ TestReplicationBase.setUpBeforeClass();
+ }
+
+ @AfterAll
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationBase.tearDownAfterClass();
+ }
+
+ @BeforeEach
+ public void setUp() throws Exception {
+ setUpBase();
+ }
+
+ @AfterEach
+ public void tearDown() throws Exception {
+ tearDownBase();
+ }
static void insertRowsOnSource() throws IOException {
final byte[] qualName = Bytes.toBytes("q");
@@ -91,21 +110,21 @@ public boolean evaluate() throws Exception {
for (JVMClusterUtil.RegionServerThread thread : UTIL1.getHBaseCluster()
.getRegionServerThreads()) {
ServerName server = thread.getRegionServer().getServerName();
- assertTrue("" + server, metrics.getLiveServerMetrics().containsKey(server));
+ assertTrue(metrics.getLiveServerMetrics().containsKey(server), "" + server);
ServerMetrics sm = metrics.getLiveServerMetrics().get(server);
List rLoadSourceList = sm.getReplicationLoadSourceList();
ReplicationLoadSink rLoadSink = sm.getReplicationLoadSink();
// check SourceList only has one entry, because only has one peer
- assertEquals("Failed to get ReplicationLoadSourceList " + rLoadSourceList + ", " + server, 1,
- rLoadSourceList.size());
+ assertEquals(1, rLoadSourceList.size(),
+ "Failed to get ReplicationLoadSourceList " + rLoadSourceList + ", " + server);
assertEquals(PEER_ID2, rLoadSourceList.get(0).getPeerID());
// check Sink exist only as it is difficult to verify the value on the fly
- assertTrue("failed to get ReplicationLoadSink.AgeOfLastShippedOp ",
- (rLoadSink.getAgeOfLastAppliedOp() >= 0));
- assertTrue("failed to get ReplicationLoadSink.TimeStampsOfLastAppliedOp ",
- (rLoadSink.getTimestampsOfLastAppliedOp() >= 0));
+ assertTrue((rLoadSink.getAgeOfLastAppliedOp() >= 0),
+ "failed to get ReplicationLoadSink.AgeOfLastAppliedOp ");
+ assertTrue((rLoadSink.getTimestampsOfLastAppliedOp() >= 0),
+ "failed to get ReplicationLoadSink.TimeStampsOfLastAppliedOp ");
}
// Stop rs1, then the queue of rs1 will be transfered to rs0
@@ -122,7 +141,7 @@ public boolean evaluate() throws Exception {
List rLoadSourceList = waitOnMetricsReport(1, server);
// The remaining server should now have two queues -- the original and then the one that was
// added because of failover. The original should still be PEER_ID2 though.
- assertEquals("Failed ReplicationLoadSourceList " + rLoadSourceList, 2, rLoadSourceList.size());
+ assertEquals(2, rLoadSourceList.size(), "Failed ReplicationLoadSourceList " + rLoadSourceList);
assertEquals(PEER_ID2, rLoadSourceList.get(0).getPeerID());
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusAfterLagging.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusAfterLagging.java
index c761078dfab3..b67d2551f34b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusAfterLagging.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusAfterLagging.java
@@ -17,14 +17,13 @@
*/
package org.apache.hadoop.hbase.replication;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.hbase.ClusterMetrics;
import org.apache.hadoop.hbase.ClusterMetrics.Option;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Admin;
@@ -32,16 +31,36 @@
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
-@Category({ ReplicationTests.class, MediumTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(MediumTests.TAG)
public class TestReplicationStatusAfterLagging extends TestReplicationBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationStatusAfterLagging.class);
+ @BeforeAll
+ public static void setUpBeforeClass() throws Exception {
+ TestReplicationBase.setUpBeforeClass();
+ }
+
+ @AfterAll
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationBase.tearDownAfterClass();
+ }
+
+ @BeforeEach
+ public void setUp() throws Exception {
+ setUpBase();
+ }
+
+ @AfterEach
+ public void tearDown() throws Exception {
+ tearDownBase();
+ }
@Override
protected String getClusterKey(HBaseTestingUtil util) throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusBothNormalAndRecoveryLagging.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusBothNormalAndRecoveryLagging.java
index de19d0f5f4a2..da14b1f1dc4d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusBothNormalAndRecoveryLagging.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusBothNormalAndRecoveryLagging.java
@@ -17,30 +17,49 @@
*/
package org.apache.hadoop.hbase.replication;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.hbase.ClusterMetrics;
import org.apache.hadoop.hbase.ClusterMetrics.Option;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
-@Category({ ReplicationTests.class, MediumTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(MediumTests.TAG)
public class TestReplicationStatusBothNormalAndRecoveryLagging extends TestReplicationBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationStatusBothNormalAndRecoveryLagging.class);
+ @BeforeAll
+ public static void setUpBeforeClass() throws Exception {
+ TestReplicationBase.setUpBeforeClass();
+ }
+
+ @AfterAll
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationBase.tearDownAfterClass();
+ }
+
+ @BeforeEach
+ public void setUp() throws Exception {
+ setUpBase();
+ }
+
+ @AfterEach
+ public void tearDown() throws Exception {
+ tearDownBase();
+ }
@Test
public void testReplicationStatusBothNormalAndRecoveryLagging() throws Exception {
@@ -83,7 +102,7 @@ public void testReplicationStatusBothNormalAndRecoveryLagging() throws Exception
assertEquals(0, loadSource.getTimestampOfLastShippedOp());
assertTrue(loadSource.getReplicationLag() > 0);
}
- assertTrue("No normal queue found.", foundNormal);
- assertTrue("No recovery queue found.", foundRecovery);
+ assertTrue(foundNormal, "No normal queue found.");
+ assertTrue(foundRecovery, "No recovery queue found.");
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSink.java
index 34dcf2329c46..07ed48a9f166 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSink.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSink.java
@@ -17,27 +17,48 @@
*/
package org.apache.hadoop.hbase.replication;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+
import java.io.IOException;
import java.util.EnumSet;
import org.apache.hadoop.hbase.ClusterMetrics;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.ServerMetrics;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
-import org.junit.Assert;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
-@Category({ ReplicationTests.class, MediumTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(MediumTests.TAG)
public class TestReplicationStatusSink extends TestReplicationBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationStatusSink.class);
+ @BeforeAll
+ public static void setUpBeforeClass() throws Exception {
+ TestReplicationBase.setUpBeforeClass();
+ }
+
+ @AfterAll
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationBase.tearDownAfterClass();
+ }
+
+ @BeforeEach
+ public void setUp() throws Exception {
+ setUpBase();
+ }
+
+ @AfterEach
+ public void tearDown() throws Exception {
+ tearDownBase();
+ }
@Test
public void testReplicationStatusSink() throws Exception {
@@ -46,7 +67,7 @@ public void testReplicationStatusSink() throws Exception {
ReplicationLoadSink loadSink = getLatestSinkMetric(admin, server);
// First checks if status of timestamp of last applied op is same as RS start, since no edits
// were replicated yet
- Assert.assertEquals(loadSink.getTimestampStarted(), loadSink.getTimestampsOfLastAppliedOp());
+ assertEquals(loadSink.getTimestampStarted(), loadSink.getTimestampsOfLastAppliedOp());
// now insert some rows on source, so that it gets delivered to target
TestReplicationStatus.insertRowsOnSource();
long wait =
@@ -54,7 +75,7 @@ public void testReplicationStatusSink() throws Exception {
ReplicationLoadSink loadSink1 = getLatestSinkMetric(admin, server);
return loadSink1.getTimestampsOfLastAppliedOp() > loadSink1.getTimestampStarted();
});
- Assert.assertNotEquals(-1, wait);
+ assertNotEquals(-1, wait);
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNewOp.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNewOp.java
index c9ef613a21f3..d5b1a3769786 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNewOp.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNewOp.java
@@ -17,31 +17,50 @@
*/
package org.apache.hadoop.hbase.replication;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.hbase.ClusterMetrics;
import org.apache.hadoop.hbase.ClusterMetrics.Option;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
-@Category({ ReplicationTests.class, MediumTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(MediumTests.TAG)
public class TestReplicationStatusSourceStartedTargetStoppedNewOp extends TestReplicationBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationStatusSourceStartedTargetStoppedNewOp.class);
+ @BeforeAll
+ public static void setUpBeforeClass() throws Exception {
+ TestReplicationBase.setUpBeforeClass();
+ }
+
+ @AfterAll
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationBase.tearDownAfterClass();
+ }
+
+ @BeforeEach
+ public void setUp() throws Exception {
+ setUpBase();
+ }
+
+ @AfterEach
+ public void tearDown() throws Exception {
+ tearDownBase();
+ }
@Test
public void testReplicationStatusSourceStartedTargetStoppedNewOp() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNoOps.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNoOps.java
index b3e52e858a7e..bd5bfa12cc7d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNoOps.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNoOps.java
@@ -17,28 +17,47 @@
*/
package org.apache.hadoop.hbase.replication;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.hbase.ClusterMetrics;
import org.apache.hadoop.hbase.ClusterMetrics.Option;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
-@Category({ ReplicationTests.class, MediumTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(MediumTests.TAG)
public class TestReplicationStatusSourceStartedTargetStoppedNoOps extends TestReplicationBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationStatusSourceStartedTargetStoppedNoOps.class);
+ @BeforeAll
+ public static void setUpBeforeClass() throws Exception {
+ TestReplicationBase.setUpBeforeClass();
+ }
+
+ @AfterAll
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationBase.tearDownAfterClass();
+ }
+
+ @BeforeEach
+ public void setUp() throws Exception {
+ setUpBase();
+ }
+
+ @AfterEach
+ public void tearDown() throws Exception {
+ tearDownBase();
+ }
@Test
public void testReplicationStatusSourceStartedTargetStoppedNoOps() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedWithRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedWithRecovery.java
index 269fa1b38c70..5982b1bd9e3f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedWithRecovery.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedWithRecovery.java
@@ -17,32 +17,51 @@
*/
package org.apache.hadoop.hbase.replication;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.hbase.ClusterMetrics;
import org.apache.hadoop.hbase.ClusterMetrics.Option;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
-@Category({ ReplicationTests.class, MediumTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(MediumTests.TAG)
public class TestReplicationStatusSourceStartedTargetStoppedWithRecovery
extends TestReplicationBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationStatusSourceStartedTargetStoppedWithRecovery.class);
+ @BeforeAll
+ public static void setUpBeforeClass() throws Exception {
+ TestReplicationBase.setUpBeforeClass();
+ }
+
+ @AfterAll
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationBase.tearDownAfterClass();
+ }
+
+ @BeforeEach
+ public void setUp() throws Exception {
+ setUpBase();
+ }
+
+ @AfterEach
+ public void tearDown() throws Exception {
+ tearDownBase();
+ }
@Test
public void testReplicationStatusSourceStartedTargetStoppedWithRecovery() throws Exception {
@@ -80,7 +99,7 @@ public void testReplicationStatusSourceStartedTargetStoppedWithRecovery() throws
assertEquals(0, loadSource.getReplicationLag());
}
}
- assertTrue("No normal queue found.", foundNormal);
- assertTrue("No recovery queue found.", foundRecovery);
+ assertTrue(foundNormal, "No normal queue found.");
+ assertTrue(foundRecovery, "No recovery queue found.");
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java
index 66de933832b5..d1b64afa567b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java
@@ -21,8 +21,8 @@
import static org.apache.hadoop.hbase.replication.TestReplicationBase.NB_RETRIES;
import static org.apache.hadoop.hbase.replication.TestReplicationBase.NB_ROWS_IN_BATCH;
import static org.apache.hadoop.hbase.replication.TestReplicationBase.SLEEP_TIME;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.ArrayList;
import java.util.List;
@@ -31,7 +31,6 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Put;
@@ -40,19 +39,15 @@
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CommonFSUtils;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-@Category({ ReplicationTests.class, LargeTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(LargeTests.TAG)
public class TestReplicationSyncUpTool extends TestReplicationSyncUpToolBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationSyncUpTool.class);
-
private static final Logger LOG = LoggerFactory.getLogger(TestReplicationSyncUpTool.class);
/**
@@ -159,8 +154,8 @@ private void putAndReplicateRows() throws Exception {
for (int i = 0; i < NB_RETRIES; i++) {
int rowCountHt1TargetAtPeer1 = countRows(ht1TargetAtPeer1);
if (i == NB_RETRIES - 1) {
- assertEquals("t1_syncup has 101 rows on source, and 100 on slave1", rowCountHt1Source - 1,
- rowCountHt1TargetAtPeer1);
+ assertEquals(rowCountHt1Source - 1, rowCountHt1TargetAtPeer1,
+ "t1_syncup has 101 rows on source, and 100 on slave1");
}
if (rowCountHt1Source - 1 == rowCountHt1TargetAtPeer1) {
break;
@@ -172,8 +167,8 @@ private void putAndReplicateRows() throws Exception {
for (int i = 0; i < NB_RETRIES; i++) {
int rowCountHt2TargetAtPeer1 = countRows(ht2TargetAtPeer1);
if (i == NB_RETRIES - 1) {
- assertEquals("t2_syncup has 201 rows on source, and 200 on slave1", rowCountHt2Source - 1,
- rowCountHt2TargetAtPeer1);
+ assertEquals(rowCountHt2Source - 1, rowCountHt2TargetAtPeer1,
+ "t2_syncup has 201 rows on source, and 200 on slave1");
}
if (rowCountHt2Source - 1 == rowCountHt2TargetAtPeer1) {
break;
@@ -203,12 +198,12 @@ private void mimicSyncUpAfterDelete() throws Exception {
ht2Source.delete(list);
int rowCount_ht1Source = countRows(ht1Source);
- assertEquals("t1_syncup has 51 rows on source, after remove 50 of the replicated colfam", 51,
- rowCount_ht1Source);
+ assertEquals(51, rowCount_ht1Source,
+ "t1_syncup has 51 rows on source, after remove 50 of the replicated colfam");
int rowCount_ht2Source = countRows(ht2Source);
- assertEquals("t2_syncup has 101 rows on source, after remove 100 of the replicated colfam", 101,
- rowCount_ht2Source);
+ assertEquals(101, rowCount_ht2Source,
+ "t2_syncup has 101 rows on source, after remove 100 of the replicated colfam");
List sourceRses = UTIL1.getHBaseCluster().getRegionServerThreads().stream()
.map(rst -> rst.getRegionServer().getServerName()).collect(Collectors.toList());
shutDownSourceHBaseCluster();
@@ -219,18 +214,18 @@ private void mimicSyncUpAfterDelete() throws Exception {
// before sync up
int rowCountHt1TargetAtPeer1 = countRows(ht1TargetAtPeer1);
int rowCountHt2TargetAtPeer1 = countRows(ht2TargetAtPeer1);
- assertEquals("@Peer1 t1_syncup should still have 100 rows", 100, rowCountHt1TargetAtPeer1);
- assertEquals("@Peer1 t2_syncup should still have 200 rows", 200, rowCountHt2TargetAtPeer1);
+ assertEquals(100, rowCountHt1TargetAtPeer1, "@Peer1 t1_syncup should still have 100 rows");
+ assertEquals(200, rowCountHt2TargetAtPeer1, "@Peer1 t2_syncup should still have 200 rows");
syncUp(UTIL1);
// After sync up
rowCountHt1TargetAtPeer1 = countRows(ht1TargetAtPeer1);
rowCountHt2TargetAtPeer1 = countRows(ht2TargetAtPeer1);
- assertEquals("@Peer1 t1_syncup should be sync up and have 50 rows", 50,
- rowCountHt1TargetAtPeer1);
- assertEquals("@Peer1 t2_syncup should be sync up and have 100 rows", 100,
- rowCountHt2TargetAtPeer1);
+ assertEquals(50, rowCountHt1TargetAtPeer1,
+ "@Peer1 t1_syncup should be sync up and have 50 rows");
+ assertEquals(100, rowCountHt2TargetAtPeer1,
+ "@Peer1 t2_syncup should be sync up and have 100 rows");
// check we have recorded the dead region servers and also have an info file
Path rootDir = CommonFSUtils.getRootDir(UTIL1.getConfiguration());
@@ -275,9 +270,9 @@ private void mimicSyncUpAfterPut() throws Exception {
ht2Source.put(p);
int rowCount_ht1Source = countRows(ht1Source);
- assertEquals("t1_syncup has 102 rows on source", 102, rowCount_ht1Source);
+ assertEquals(102, rowCount_ht1Source, "t1_syncup has 102 rows on source");
int rowCount_ht2Source = countRows(ht2Source);
- assertEquals("t2_syncup has 202 rows on source", 202, rowCount_ht2Source);
+ assertEquals(202, rowCount_ht2Source, "t2_syncup has 202 rows on source");
shutDownSourceHBaseCluster();
restartTargetHBaseCluster(1);
@@ -287,20 +282,20 @@ private void mimicSyncUpAfterPut() throws Exception {
// before sync up
int rowCountHt1TargetAtPeer1 = countRows(ht1TargetAtPeer1);
int rowCountHt2TargetAtPeer1 = countRows(ht2TargetAtPeer1);
- assertEquals("@Peer1 t1_syncup should be NOT sync up and have 50 rows", 50,
- rowCountHt1TargetAtPeer1);
- assertEquals("@Peer1 t2_syncup should be NOT sync up and have 100 rows", 100,
- rowCountHt2TargetAtPeer1);
+ assertEquals(50, rowCountHt1TargetAtPeer1,
+ "@Peer1 t1_syncup should be NOT sync up and have 50 rows");
+ assertEquals(100, rowCountHt2TargetAtPeer1,
+ "@Peer1 t2_syncup should be NOT sync up and have 100 rows");
syncUp(UTIL1);
// after sync up
rowCountHt1TargetAtPeer1 = countRows(ht1TargetAtPeer1);
rowCountHt2TargetAtPeer1 = countRows(ht2TargetAtPeer1);
- assertEquals("@Peer1 t1_syncup should be sync up and have 100 rows", 100,
- rowCountHt1TargetAtPeer1);
- assertEquals("@Peer1 t2_syncup should be sync up and have 200 rows", 200,
- rowCountHt2TargetAtPeer1);
+ assertEquals(100, rowCountHt1TargetAtPeer1,
+ "@Peer1 t1_syncup should be sync up and have 100 rows");
+ assertEquals(200, rowCountHt2TargetAtPeer1,
+ "@Peer1 t2_syncup should be sync up and have 200 rows");
}
/**
@@ -324,8 +319,8 @@ public void testStartANewSyncUpToolAfterFailed() throws Exception {
try {
syncUp(UTIL1);
} catch (Exception e) {
- assertTrue("e should be a FileAlreadyExistsException",
- (e instanceof FileAlreadyExistsException));
+ assertTrue((e instanceof FileAlreadyExistsException),
+ "e should be a FileAlreadyExistsException");
}
FileStatus fileStatus2 = fs.getFileStatus(replicationInfoPath);
assertEquals(fileStatus1.getModificationTime(), fileStatus2.getModificationTime());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java
index 9455cf567276..9b1981d65ab6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java
@@ -32,8 +32,8 @@
import org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.util.ToolRunner;
-import org.junit.After;
-import org.junit.Before;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
@@ -66,7 +66,7 @@ public abstract class TestReplicationSyncUpToolBase {
protected void customizeClusterConf(Configuration conf) {
}
- @Before
+ @BeforeEach
public void setUp() throws Exception {
customizeClusterConf(UTIL1.getConfiguration());
customizeClusterConf(UTIL2.getConfiguration());
@@ -96,7 +96,7 @@ public void setUp() throws Exception {
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(NO_REP_FAMILY)).build();
}
- @After
+ @AfterEach
public void tearDown() throws Exception {
Closeables.close(ht1Source, true);
Closeables.close(ht2Source, true);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java
index e9acc1bc45ee..71462655e741 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java
@@ -21,7 +21,7 @@
import static org.apache.hadoop.hbase.replication.TestReplicationBase.NB_RETRIES;
import static org.apache.hadoop.hbase.replication.TestReplicationBase.SLEEP_TIME;
import static org.apache.hadoop.hbase.replication.TestReplicationBase.row;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.IOException;
import java.util.ArrayList;
@@ -33,7 +33,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
@@ -45,19 +44,15 @@
import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.HFileTestUtil;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-@Category({ ReplicationTests.class, LargeTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(LargeTests.TAG)
public class TestReplicationSyncUpToolWithBulkLoadedData extends TestReplicationSyncUpToolBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationSyncUpToolWithBulkLoadedData.class);
-
private static final Logger LOG =
LoggerFactory.getLogger(TestReplicationSyncUpToolWithBulkLoadedData.class);
@@ -128,12 +123,12 @@ private void mimicSyncUpAfterBulkLoad(Iterator randomHFileRangeListItera
loadAndReplicateHFiles(false, randomHFileRangeListIterator);
int rowCount_ht1Source = countRows(ht1Source);
- assertEquals("t1_syncup has 206 rows on source, after bulk load of another 103 hfiles", 206,
- rowCount_ht1Source);
+ assertEquals(206, rowCount_ht1Source,
+ "t1_syncup has 206 rows on source, after bulk load of another 103 hfiles");
int rowCount_ht2Source = countRows(ht2Source);
- assertEquals("t2_syncup has 406 rows on source, after bulk load of another 203 hfiles", 406,
- rowCount_ht2Source);
+ assertEquals(406, rowCount_ht2Source,
+ "t2_syncup has 406 rows on source, after bulk load of another 203 hfiles");
shutDownSourceHBaseCluster();
restartTargetHBaseCluster(1);
@@ -143,8 +138,8 @@ private void mimicSyncUpAfterBulkLoad(Iterator randomHFileRangeListItera
// Before sync up
int rowCountHt1TargetAtPeer1 = countRows(ht1TargetAtPeer1);
int rowCountHt2TargetAtPeer1 = countRows(ht2TargetAtPeer1);
- assertEquals("@Peer1 t1_syncup should still have 100 rows", 100, rowCountHt1TargetAtPeer1);
- assertEquals("@Peer1 t2_syncup should still have 200 rows", 200, rowCountHt2TargetAtPeer1);
+ assertEquals(100, rowCountHt1TargetAtPeer1, "@Peer1 t1_syncup should still have 100 rows");
+ assertEquals(200, rowCountHt2TargetAtPeer1, "@Peer1 t2_syncup should still have 200 rows");
// Run sync up tool
syncUp(UTIL1);
@@ -152,10 +147,10 @@ private void mimicSyncUpAfterBulkLoad(Iterator randomHFileRangeListItera
// After syun up
rowCountHt1TargetAtPeer1 = countRows(ht1TargetAtPeer1);
rowCountHt2TargetAtPeer1 = countRows(ht2TargetAtPeer1);
- assertEquals("@Peer1 t1_syncup should be sync up and have 200 rows", 200,
- rowCountHt1TargetAtPeer1);
- assertEquals("@Peer1 t2_syncup should be sync up and have 400 rows", 400,
- rowCountHt2TargetAtPeer1);
+ assertEquals(200, rowCountHt1TargetAtPeer1,
+ "@Peer1 t1_syncup should be sync up and have 200 rows");
+ assertEquals(400, rowCountHt2TargetAtPeer1,
+ "@Peer1 t2_syncup should be sync up and have 400 rows");
}
private void loadAndReplicateHFiles(boolean verifyReplicationOnSlave,
@@ -253,7 +248,7 @@ private void wait(Table target, int expectedCount, String msg)
for (int i = 0; i < NB_RETRIES; i++) {
int rowCountHt2TargetAtPeer1 = countRows(target);
if (i == NB_RETRIES - 1) {
- assertEquals(msg, expectedCount, rowCountHt2TargetAtPeer1);
+ assertEquals(expectedCount, rowCountHt2TargetAtPeer1, msg);
}
if (expectedCount == rowCountHt2TargetAtPeer1) {
break;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithFSPeerStorage.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithFSPeerStorage.java
index 6f5c6c20d8d8..b992d8cb1daa 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithFSPeerStorage.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithFSPeerStorage.java
@@ -17,23 +17,20 @@
*/
package org.apache.hadoop.hbase.replication;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
-@Category({ ReplicationTests.class, LargeTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(LargeTests.TAG)
public class TestReplicationWithFSPeerStorage extends TestReplicationBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationWithFSPeerStorage.class);
-
- @BeforeClass
+ @BeforeAll
public static void setUpBeforeClass() throws Exception {
// enable file system based peer storage
UTIL1.getConfiguration().set(ReplicationStorageFactory.REPLICATION_PEER_STORAGE_IMPL,
@@ -43,11 +40,22 @@ public static void setUpBeforeClass() throws Exception {
TestReplicationBase.setUpBeforeClass();
}
- @Before
+ @AfterAll
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationBase.tearDownAfterClass();
+ }
+
+ @BeforeEach
public void setUp() throws Exception {
+ setUpBase();
cleanUp();
}
+ @AfterEach
+ public void tearDown() throws Exception {
+ tearDownBase();
+ }
+
/**
* Add a row, check it's replicated, delete it, check's gone
*/
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.java
index d3a947fb2404..136a97a70390 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.java
@@ -17,27 +17,28 @@
*/
package org.apache.hadoop.hbase.replication.multiwal;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.replication.TestReplicationEndpoint;
+import org.apache.hadoop.hbase.replication.TestReplicationEndpointBase;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.wal.RegionGroupingProvider;
import org.apache.hadoop.hbase.wal.WALFactory;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Tag;
-@Category({ ReplicationTests.class, MediumTests.class })
-public class TestReplicationEndpointWithMultipleAsyncWAL extends TestReplicationEndpoint {
+@Tag(ReplicationTests.TAG)
+@Tag(MediumTests.TAG)
+public class TestReplicationEndpointWithMultipleAsyncWAL extends TestReplicationEndpointBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationEndpointWithMultipleAsyncWAL.class);
-
- @BeforeClass
+ @BeforeAll
public static void setUpBeforeClass() throws Exception {
CONF1.set(WALFactory.WAL_PROVIDER, "multiwal");
CONF1.set(RegionGroupingProvider.DELEGATE_PROVIDER, "asyncfs");
- TestReplicationEndpoint.setUpBeforeClass();
+ TestReplicationEndpointBase.setUpBeforeClass();
+ }
+
+ @AfterAll
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationEndpointBase.tearDownAfterClass();
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleWAL.java
index a882c5043990..2a31915a851d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleWAL.java
@@ -17,27 +17,28 @@
*/
package org.apache.hadoop.hbase.replication.multiwal;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.replication.TestReplicationEndpoint;
+import org.apache.hadoop.hbase.replication.TestReplicationEndpointBase;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.wal.RegionGroupingProvider;
import org.apache.hadoop.hbase.wal.WALFactory;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Tag;
-@Category({ ReplicationTests.class, MediumTests.class })
-public class TestReplicationEndpointWithMultipleWAL extends TestReplicationEndpoint {
+@Tag(ReplicationTests.TAG)
+@Tag(MediumTests.TAG)
+public class TestReplicationEndpointWithMultipleWAL extends TestReplicationEndpointBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationEndpointWithMultipleWAL.class);
-
- @BeforeClass
+ @BeforeAll
public static void setUpBeforeClass() throws Exception {
CONF1.set(WALFactory.WAL_PROVIDER, "multiwal");
CONF1.set(RegionGroupingProvider.DELEGATE_PROVIDER, "filesystem");
- TestReplicationEndpoint.setUpBeforeClass();
+ TestReplicationEndpointBase.setUpBeforeClass();
+ }
+
+ @AfterAll
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationEndpointBase.tearDownAfterClass();
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL.java
index 623e4c28cd05..2f29c3e37352 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL.java
@@ -17,28 +17,29 @@
*/
package org.apache.hadoop.hbase.replication.multiwal;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.replication.TestReplicationKillMasterRSCompressed;
+import org.apache.hadoop.hbase.replication.TestReplicationKillMasterRSCompressedBase;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.wal.RegionGroupingProvider;
import org.apache.hadoop.hbase.wal.WALFactory;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Tag;
-@Category({ ReplicationTests.class, LargeTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(LargeTests.TAG)
public class TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL
- extends TestReplicationKillMasterRSCompressed {
+ extends TestReplicationKillMasterRSCompressedBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL.class);
-
- @BeforeClass
+ @BeforeAll
public static void setUpBeforeClass() throws Exception {
CONF1.set(WALFactory.WAL_PROVIDER, "multiwal");
CONF1.set(RegionGroupingProvider.DELEGATE_PROVIDER, "asyncfs");
- TestReplicationKillMasterRSCompressed.setUpBeforeClass();
+ TestReplicationKillMasterRSCompressedBase.setUpBeforeClass();
+ }
+
+ @AfterAll
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationKillMasterRSCompressedBase.tearDownAfterClass();
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleWAL.java
index 54921520b1cd..96968feb2edf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleWAL.java
@@ -17,28 +17,29 @@
*/
package org.apache.hadoop.hbase.replication.multiwal;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.replication.TestReplicationKillMasterRSCompressed;
+import org.apache.hadoop.hbase.replication.TestReplicationKillMasterRSCompressedBase;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.wal.RegionGroupingProvider;
import org.apache.hadoop.hbase.wal.WALFactory;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Tag;
-@Category({ ReplicationTests.class, LargeTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(LargeTests.TAG)
public class TestReplicationKillMasterRSCompressedWithMultipleWAL
- extends TestReplicationKillMasterRSCompressed {
+ extends TestReplicationKillMasterRSCompressedBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationKillMasterRSCompressedWithMultipleWAL.class);
-
- @BeforeClass
+ @BeforeAll
public static void setUpBeforeClass() throws Exception {
CONF1.set(WALFactory.WAL_PROVIDER, "multiwal");
CONF1.set(RegionGroupingProvider.DELEGATE_PROVIDER, "filesystem");
- TestReplicationKillMasterRSCompressed.setUpBeforeClass();
+ TestReplicationKillMasterRSCompressedBase.setUpBeforeClass();
+ }
+
+ @AfterAll
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationKillMasterRSCompressedBase.tearDownAfterClass();
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleAsyncWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleAsyncWAL.java
index 83cd41773ca8..f8de45600066 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleAsyncWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleAsyncWAL.java
@@ -18,22 +18,17 @@
package org.apache.hadoop.hbase.replication.multiwal;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.replication.TestReplicationSyncUpTool;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.wal.RegionGroupingProvider;
import org.apache.hadoop.hbase.wal.WALFactory;
-import org.junit.ClassRule;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.Tag;
-@Category({ ReplicationTests.class, LargeTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(LargeTests.TAG)
public class TestReplicationSyncUpToolWithMultipleAsyncWAL extends TestReplicationSyncUpTool {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationSyncUpToolWithMultipleAsyncWAL.class);
-
@Override
protected void customizeClusterConf(Configuration conf) {
conf.set(WALFactory.WAL_PROVIDER, "multiwal");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleWAL.java
index 673b841430eb..6883c48cc8d8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleWAL.java
@@ -18,22 +18,17 @@
package org.apache.hadoop.hbase.replication.multiwal;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.replication.TestReplicationSyncUpTool;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.wal.RegionGroupingProvider;
import org.apache.hadoop.hbase.wal.WALFactory;
-import org.junit.ClassRule;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.Tag;
-@Category({ ReplicationTests.class, LargeTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(LargeTests.TAG)
public class TestReplicationSyncUpToolWithMultipleWAL extends TestReplicationSyncUpTool {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationSyncUpToolWithMultipleWAL.class);
-
@Override
protected void customizeClusterConf(Configuration conf) {
conf.set(WALFactory.WAL_PROVIDER, "multiwal");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshPeerWhileRegionServerRestarts.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshPeerWhileRegionServerRestarts.java
index e9d8e05c8818..3a8b8de946a0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshPeerWhileRegionServerRestarts.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshPeerWhileRegionServerRestarts.java
@@ -17,14 +17,13 @@
*/
package org.apache.hadoop.hbase.replication.regionserver;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.IOException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.Future;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.master.replication.DisablePeerProcedure;
import org.apache.hadoop.hbase.procedure2.Procedure;
@@ -34,9 +33,12 @@
import org.apache.hadoop.hbase.replication.TestReplicationBase;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationState;
@@ -44,13 +46,10 @@
* This UT is used to make sure that we will not accidentally change the way to generate online
* servers. See HBASE-25774 and HBASE-25032 for more details.
*/
-@Category({ MasterTests.class, MediumTests.class })
+@Tag(MasterTests.TAG)
+@Tag(MediumTests.TAG)
public class TestRefreshPeerWhileRegionServerRestarts extends TestReplicationBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestRefreshPeerWhileRegionServerRestarts.class);
-
private static CountDownLatch ARRIVE;
private static CountDownLatch RESUME;
@@ -76,6 +75,26 @@ protected void tryRegionServerReport(long reportStartTime, long reportEndTime)
}
}
+ @BeforeAll
+ public static void setUpBeforeClass() throws Exception {
+ TestReplicationBase.setUpBeforeClass();
+ }
+
+ @AfterAll
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationBase.tearDownAfterClass();
+ }
+
+ @BeforeEach
+ public void setUp() throws Exception {
+ setUpBase();
+ }
+
+ @AfterEach
+ public void tearDown() throws Exception {
+ tearDownBase();
+ }
+
@Test
public void testRestart() throws Exception {
UTIL1.getMiniHBaseCluster().getConfiguration().setClass(HConstants.REGION_SERVER_IMPL,
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java
index 93aa7130926f..f2f415a69ab8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java
@@ -17,13 +17,13 @@
*/
package org.apache.hadoop.hbase.replication.regionserver;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.List;
import java.util.Optional;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
@@ -40,16 +40,13 @@
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.rules.TestName;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -58,25 +55,19 @@
/**
* Testcase for HBASE-24871.
*/
-@Category({ ReplicationTests.class, MediumTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(MediumTests.TAG)
public class TestRefreshRecoveredReplication extends TestReplicationBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestRefreshRecoveredReplication.class);
-
private static final Logger LOG = LoggerFactory.getLogger(TestRefreshRecoveredReplication.class);
private static final int BATCH = 50;
- @Rule
- public TestName name = new TestName();
-
private TableName tablename;
private Table table1;
private Table table2;
- @BeforeClass
+ @BeforeAll
public static void setUpBeforeClass() throws Exception {
// NUM_SLAVES1 is presumed 2 in below.
NUM_SLAVES1 = 2;
@@ -86,16 +77,16 @@ public static void setUpBeforeClass() throws Exception {
TestReplicationBase.setUpBeforeClass();
}
- @AfterClass
+ @AfterAll
public static void tearDownAfterClass() throws Exception {
TestReplicationBase.tearDownAfterClass();
}
- @Before
- public void setup() throws Exception {
+ @BeforeEach
+ public void setup(TestInfo testInfo) throws Exception {
setUpBase();
- tablename = TableName.valueOf(name.getMethodName());
+ tablename = TableName.valueOf(testInfo.getTestMethod().get().getName());
TableDescriptor table =
TableDescriptorBuilder.newBuilder(tablename).setColumnFamily(ColumnFamilyDescriptorBuilder
.newBuilder(famName).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build();
@@ -108,7 +99,7 @@ public void setup() throws Exception {
table2 = UTIL2.getConnection().getTable(tablename);
}
- @After
+ @AfterEach
public void teardown() throws Exception {
tearDownBase();
@@ -130,7 +121,7 @@ public void testReplicationRefreshSource() throws Exception {
Optional server = rss.stream()
.filter(rst -> CollectionUtils.isNotEmpty(rst.getRegionServer().getRegions(tablename)))
.findAny();
- Assert.assertTrue(server.isPresent());
+ assertTrue(server.isPresent());
HRegionServer otherServer = rss.get(0).getRegionServer() == server.get().getRegionServer()
? rss.get(1).getRegionServer()
: rss.get(0).getRegionServer();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationCompressedWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationCompressedWAL.java
index 27c39cc0df2f..4364899719d6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationCompressedWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationCompressedWAL.java
@@ -17,11 +17,10 @@
*/
package org.apache.hadoop.hbase.replication.regionserver;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.fail;
import java.io.IOException;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
@@ -29,36 +28,43 @@
import org.apache.hadoop.hbase.replication.TestReplicationBase;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-@Category(MediumTests.class)
+@Tag(MediumTests.TAG)
public class TestReplicationCompressedWAL extends TestReplicationBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationCompressedWAL.class);
-
static final Logger LOG = LoggerFactory.getLogger(TestReplicationCompressedWAL.class);
static final int NUM_BATCHES = 20;
static final int NUM_ROWS_PER_BATCH = 100;
- @BeforeClass
+ @BeforeAll
public static void setUpBeforeClass() throws Exception {
CONF1.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);
TestReplicationBase.setUpBeforeClass();
}
- @AfterClass
+ @AfterAll
public static void tearDownAfterClass() throws Exception {
TestReplicationBase.tearDownAfterClass();
}
+ @BeforeEach
+ public void setUp() throws Exception {
+ setUpBase();
+ }
+
+ @AfterEach
+ public void tearDown() throws Exception {
+ tearDownBase();
+ }
+
@Test
public void testMultiplePuts() throws Exception {
runMultiplePutTest();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManagerJoin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManagerJoin.java
index d7d23783eacb..59d8a8c674c4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManagerJoin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManagerJoin.java
@@ -17,12 +17,11 @@
*/
package org.apache.hadoop.hbase.replication.regionserver;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Optional;
import java.util.stream.Stream;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
@@ -36,35 +35,45 @@
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.rules.TestName;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInfo;
-@Category({ ReplicationTests.class, MediumTests.class })
+@Tag(ReplicationTests.TAG)
+@Tag(MediumTests.TAG)
public class TestReplicationSourceManagerJoin extends TestReplicationBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationSourceManagerJoin.class);
-
- @Rule
- public TestName testName = new TestName();
-
- @BeforeClass
+ @BeforeAll
public static void setUpBeforeClass() throws Exception {
// NUM_SLAVES1 is presumed 2 in below.
NUM_SLAVES1 = 2;
TestReplicationBase.setUpBeforeClass();
}
+ @AfterAll
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationBase.tearDownAfterClass();
+ }
+
+ @BeforeEach
+ public void setUp() throws Exception {
+ setUpBase();
+ }
+
+ @AfterEach
+ public void tearDown() throws Exception {
+ tearDownBase();
+ }
+
@Test
- public void testReplicationSourcesTerminate() throws Exception {
+ public void testReplicationSourcesTerminate(TestInfo testInfo) throws Exception {
// Create table in source cluster only, let TableNotFoundException block peer to avoid
// recovered source end.
- TableName tableName = TableName.valueOf(testName.getMethodName());
+ TableName tableName = TableName.valueOf(testInfo.getTestMethod().get().getName());
TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName)
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName)
.setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build())
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationValueCompressedWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationValueCompressedWAL.java
index 03b83964dccf..ea6411478392 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationValueCompressedWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationValueCompressedWAL.java
@@ -17,40 +17,46 @@
*/
package org.apache.hadoop.hbase.replication.regionserver;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.regionserver.wal.CompressionContext;
import org.apache.hadoop.hbase.replication.TestReplicationBase;
import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-@Category(MediumTests.class)
+@Tag(MediumTests.TAG)
public class TestReplicationValueCompressedWAL extends TestReplicationBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicationValueCompressedWAL.class);
-
static final Logger LOG = LoggerFactory.getLogger(TestReplicationValueCompressedWAL.class);
- @BeforeClass
+ @BeforeAll
public static void setUpBeforeClass() throws Exception {
CONF1.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);
CONF1.setBoolean(CompressionContext.ENABLE_WAL_VALUE_COMPRESSION, true);
TestReplicationBase.setUpBeforeClass();
}
- @AfterClass
+ @AfterAll
public static void tearDownAfterClass() throws Exception {
TestReplicationBase.tearDownAfterClass();
}
+ @BeforeEach
+ public void setUp() throws Exception {
+ setUpBase();
+ }
+
+ @AfterEach
+ public void tearDown() throws Exception {
+ tearDownBase();
+ }
+
@Test
public void testMultiplePuts() throws Exception {
TestReplicationCompressedWAL.runMultiplePutTest();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
index 979db712ef34..32ee4943586b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
@@ -17,14 +17,13 @@
*/
package org.apache.hadoop.hbase.replication.regionserver;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
@@ -36,33 +35,45 @@
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.wal.WAL.Entry;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
-@Category(MediumTests.class)
+@Tag(MediumTests.TAG)
public class TestReplicator extends TestReplicationBase {
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestReplicator.class);
-
static final Logger LOG = LoggerFactory.getLogger(TestReplicator.class);
static final int NUM_ROWS = 10;
- @BeforeClass
+ @BeforeAll
public static void setUpBeforeClass() throws Exception {
// Set RPC size limit to 10kb (will be applied to both source and sink clusters)
CONF1.setInt(RpcServer.MAX_REQUEST_SIZE, 1024 * 10);
TestReplicationBase.setUpBeforeClass();
}
+ @AfterAll
+ public static void tearDownAfterClass() throws Exception {
+ TestReplicationBase.tearDownAfterClass();
+ }
+
+ @BeforeEach
+ public void setUp() throws Exception {
+ setUpBase();
+ }
+
+ @AfterEach
+ public void tearDown() throws Exception {
+ tearDownBase();
+ }
+
@Test
public void testReplicatorBatching() throws Exception {
// Clear the tables
@@ -104,9 +115,9 @@ public String explainFailure() throws Exception {
}
});
- assertEquals("We sent an incorrect number of batches", NUM_ROWS,
- ReplicationEndpointForTest.getBatchCount());
- assertEquals("We did not replicate enough rows", NUM_ROWS, UTIL2.countRows(htable2));
+ assertEquals(NUM_ROWS, ReplicationEndpointForTest.getBatchCount(),
+ "We sent an incorrect number of batches");
+ assertEquals(NUM_ROWS, UTIL2.countRows(htable2), "We did not replicate enough rows");
} finally {
hbaseAdmin.removeReplicationPeer("testReplicatorBatching");
}
@@ -154,17 +165,12 @@ public String explainFailure() throws Exception {
}
});
- assertEquals("We did not replicate enough rows", NUM_ROWS, UTIL2.countRows(htable2));
+ assertEquals(NUM_ROWS, UTIL2.countRows(htable2), "We did not replicate enough rows");
} finally {
hbaseAdmin.removeReplicationPeer("testReplicatorWithErrors");
}
}
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- TestReplicationBase.tearDownAfterClass();
- }
-
private void truncateTable(HBaseTestingUtil util, TableName tablename) throws IOException {
Admin admin = util.getAdmin();
admin.disableTable(tableName);
| | |