diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java index 18099420b318d..f53271dc47e17 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java @@ -21,7 +21,7 @@ import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; import static org.apache.hadoop.test.GenericTestUtils.assertGlobEquals; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.File; import java.io.IOException; @@ -34,7 +34,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java index c0f0970dfbd7a..67afef1fe8a0d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.ArgumentMatchers.any; import java.io.File; @@ -45,9 +46,8 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger; import org.apache.hadoop.util.Lists; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; import org.mockito.Mockito; @@ -62,7 +62,7 @@ public class TestNNStorageRetentionManager { * For the purpose of this test, purge as many edits as we can * with no extra "safety cushion" */ - @Before + @BeforeEach public void setNoExtraEditRetention() { conf.setLong(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0); } @@ -310,27 +310,24 @@ private void runTest(TestCaseDescription tc) throws IOException { for (FSImageFile captured : imagesPurgedCaptor.getAllValues()) { capturedPaths.add(fileToPath(captured.getFile())); } - Assert.assertEquals("Image file check.", - Joiner.on(",").join(filesToPaths(tc.expectedPurgedImages)), - Joiner.on(",").join(capturedPaths)); + assertEquals(Joiner.on(",").join(filesToPaths(tc.expectedPurgedImages)), + Joiner.on(",").join(capturedPaths), "Image file check."); capturedPaths.clear(); // Check edit logs, and also in progress edits older than minTxIdToKeep for (EditLogFile captured : logsPurgedCaptor.getAllValues()) { capturedPaths.add(fileToPath(captured.getFile())); } - Assert.assertEquals("Check old edits are removed.", - Joiner.on(",").join(filesToPaths(tc.expectedPurgedLogs)), - Joiner.on(",").join(capturedPaths)); + assertEquals(Joiner.on(",").join(filesToPaths(tc.expectedPurgedLogs)), + Joiner.on(",").join(capturedPaths), "Check old edits are removed."); capturedPaths.clear(); // Check in progress edits to keep are marked as stale for (EditLogFile captured : staleLogsCaptor.getAllValues()) { capturedPaths.add(fileToPath(captured.getFile())); } - Assert.assertEquals("Check unnecessary but kept edits are marked stale", - Joiner.on(",").join(filesToPaths(tc.expectedStaleLogs)), - Joiner.on(",").join(capturedPaths)); + assertEquals(Joiner.on(",").join(filesToPaths(tc.expectedStaleLogs)), + Joiner.on(",").join(capturedPaths), "Check unnecessary but kept edits are marked stale"); } private class TestCaseDescription { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java index 1071564cf02aa..57e0c7b66ec44 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java @@ -31,19 +31,24 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.util.ExitUtil; -import org.junit.After; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; public class TestNNThroughputBenchmark { - @BeforeClass + @BeforeAll public static void setUp() { ExitUtil.disableSystemExit(); } - @After + @AfterEach public void cleanUp() { FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory())); } @@ -66,7 +71,8 @@ public void testNNThroughput() throws Exception { * This test runs all benchmarks defined in {@link NNThroughputBenchmark}, * with explicit local -fs option. */ - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testNNThroughputWithFsOption() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); @@ -81,7 +87,8 @@ public void testNNThroughputWithFsOption() throws Exception { /** * This test runs {@link NNThroughputBenchmark} against a mini DFS cluster. */ - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testNNThroughputAgainstRemoteNN() throws Exception { final Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16); @@ -106,7 +113,8 @@ public void testNNThroughputAgainstRemoteNN() throws Exception { * Ranger since only super user e.g. hdfs can enter/exit safemode * but any request from super user is not sent for authorization). */ - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testNNThroughputAgainstRemoteNNNonSuperUser() throws Exception { final Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16); @@ -128,7 +136,8 @@ public void testNNThroughputAgainstRemoteNNNonSuperUser() throws Exception { * This test runs {@link NNThroughputBenchmark} against a mini DFS cluster * with explicit -fs option. */ - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testNNThroughputRemoteAgainstNNWithFsOption() throws Exception { final Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16); @@ -153,7 +162,8 @@ public void testNNThroughputRemoteAgainstNNWithFsOption() throws Exception { * This test runs {@link NNThroughputBenchmark} against a mini DFS cluster * for append operation. */ - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testNNThroughputForAppendOp() throws Exception { final Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16); @@ -178,10 +188,10 @@ public void testNNThroughputForAppendOp() throws Exception { listing = fsNamesystem.getListing("/", HdfsFileStatus.EMPTY_NAME, false); HdfsFileStatus[] partialListingAfter = listing.getPartialListing(); - Assert.assertEquals(partialListing.length, partialListingAfter.length); + assertEquals(partialListing.length, partialListingAfter.length); for (int i = 0; i < partialListing.length; i++) { //Check the modification time after append operation - Assert.assertNotEquals(partialListing[i].getModificationTime(), + assertNotEquals(partialListing[i].getModificationTime(), partialListingAfter[i].getModificationTime()); } @@ -196,7 +206,8 @@ public void testNNThroughputForAppendOp() throws Exception { * This test runs {@link NNThroughputBenchmark} against a mini DFS cluster * for block report operation. */ - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testNNThroughputForBlockReportOp() throws Exception { final Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16); @@ -217,7 +228,8 @@ public void testNNThroughputForBlockReportOp() throws Exception { * This test runs {@link NNThroughputBenchmark} against a mini DFS cluster * with explicit -baseDirName option. */ - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testNNThroughputWithBaseDir() throws Exception { final Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16); @@ -233,13 +245,13 @@ public void testNNThroughputWithBaseDir() throws Exception { NNThroughputBenchmark.runBenchmark(benchConf, new String[] {"-op", "create", "-keepResults", "-files", "3", "-baseDirName", "/nnThroughputBenchmark1", "-close"}); - Assert.assertTrue(fs.exists(new Path("/nnThroughputBenchmark1"))); - Assert.assertFalse(fs.exists(new Path("/nnThroughputBenchmark"))); + assertTrue(fs.exists(new Path("/nnThroughputBenchmark1"))); + assertFalse(fs.exists(new Path("/nnThroughputBenchmark"))); NNThroughputBenchmark.runBenchmark(benchConf, new String[] {"-op", "all", "-baseDirName", "/nnThroughputBenchmark1"}); - Assert.assertTrue(fs.exists(new Path("/nnThroughputBenchmark1"))); - Assert.assertFalse(fs.exists(new Path("/nnThroughputBenchmark"))); + assertTrue(fs.exists(new Path("/nnThroughputBenchmark1"))); + assertFalse(fs.exists(new Path("/nnThroughputBenchmark"))); } finally { if (cluster != null) { cluster.shutdown(); @@ -251,7 +263,8 @@ public void testNNThroughputWithBaseDir() throws Exception { * This test runs {@link NNThroughputBenchmark} against a mini DFS cluster * for blockSize with letter suffix. */ - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testNNThroughputForBlockSizeWithLetterSuffix() throws Exception { final Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16); @@ -271,7 +284,8 @@ public void testNNThroughputForBlockSizeWithLetterSuffix() throws Exception { * This test runs {@link NNThroughputBenchmark} against a mini DFS cluster * with explicit -blockSize option. */ - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testNNThroughputWithBlockSize() throws Exception { final Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16); @@ -290,7 +304,8 @@ public void testNNThroughputWithBlockSize() throws Exception { * This test runs {@link NNThroughputBenchmark} against a mini DFS cluster * with explicit -blockSize option like 1m. */ - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testNNThroughputBlockSizeArgWithLetterSuffix() throws Exception { final Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java index 92b96a533c406..e39b4fe9cd694 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java @@ -18,19 +18,21 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; public class TestNameNodeOptionParsing { - @Test(timeout = 10000) + @Test + @Timeout(value = 10) public void testUpgrade() { StartupOption opt = null; // UPGRADE is set, but nothing else @@ -104,7 +106,8 @@ public void testUpgrade() { assertNull(opt); } - @Test(timeout = 10000) + @Test + @Timeout(value = 10) public void testRollingUpgrade() { { final String[] args = {"-rollingUpgrade"}; @@ -132,7 +135,7 @@ public void testRollingUpgrade() { final String[] args = {"-rollingUpgrade", "foo"}; try { NameNode.parseArguments(args); - Assert.fail(); + fail(); } catch(IllegalArgumentException iae) { // the exception is expected. } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java index edddc318af056..d45bdb2073044 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java @@ -25,9 +25,10 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeAdminBackoffMonitor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeAdminMonitorInterface; import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.Test; -import org.junit.Before; -import org.junit.After; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.AfterEach; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_SERVER_LOG_SLOW_RPC; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_SERVER_LOG_SLOW_RPC_THRESHOLD_MS_DEFAULT; @@ -38,7 +39,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SLOWPEER_COLLECT_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY; -import static org.junit.Assert.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -75,6 +75,12 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK; import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class TestNameNodeReconfigure { @@ -84,7 +90,7 @@ public class TestNameNodeReconfigure { private MiniDFSCluster cluster; private final int customizedBlockInvalidateLimit = 500; - @Before + @BeforeEach public void setUp() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setInt(DFS_BLOCK_INVALIDATE_LIMIT_KEY, @@ -115,21 +121,20 @@ public void testReconfigureCallerContextEnabled() nameNode.reconfigureProperty(HADOOP_CALLER_CONTEXT_ENABLED_KEY, null); // verify default - assertEquals(HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value", false, - nameSystem.getCallerContextEnabled()); - assertEquals(HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value", null, - nameNode.getConf().get(HADOOP_CALLER_CONTEXT_ENABLED_KEY)); + assertEquals(false, nameSystem.getCallerContextEnabled(), + HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value"); + assertEquals(null, nameNode.getConf().get(HADOOP_CALLER_CONTEXT_ENABLED_KEY), + HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value"); } void verifyReconfigureCallerContextEnabled(final NameNode nameNode, final FSNamesystem nameSystem, boolean expected) { - assertEquals(HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value", - expected, nameNode.getNamesystem().getCallerContextEnabled()); - assertEquals( - HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value", - expected, + assertEquals(expected, nameNode.getNamesystem().getCallerContextEnabled(), + HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value"); + assertEquals(expected, nameNode.getConf().getBoolean(HADOOP_CALLER_CONTEXT_ENABLED_KEY, - HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT)); + HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT), + HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value"); } /** @@ -159,18 +164,18 @@ public void testReconfigureIPCBackoff() throws ReconfigurationException { // revert to default nameNode.reconfigureProperty(ipcClientRPCBackoffEnable, null); - assertEquals(ipcClientRPCBackoffEnable + " has wrong value", false, - nnrs.getClientRpcServer().isClientBackoffEnabled()); - assertEquals(ipcClientRPCBackoffEnable + " has wrong value", null, - nameNode.getConf().get(ipcClientRPCBackoffEnable)); + assertEquals(false, nnrs.getClientRpcServer().isClientBackoffEnabled(), + ipcClientRPCBackoffEnable + " has wrong value"); + assertEquals(null, nameNode.getConf().get(ipcClientRPCBackoffEnable), + ipcClientRPCBackoffEnable + " has wrong value"); } void verifyReconfigureIPCBackoff(final NameNode nameNode, final NameNodeRpcServer nnrs, String property, boolean expected) { - assertEquals(property + " has wrong value", expected, nnrs - .getClientRpcServer().isClientBackoffEnabled()); - assertEquals(property + " has wrong value", expected, nameNode.getConf() - .getBoolean(property, IPC_BACKOFF_ENABLE_DEFAULT)); + assertEquals(expected, nnrs.getClientRpcServer().isClientBackoffEnabled(), + property + " has wrong value"); + assertEquals(expected, nameNode.getConf().getBoolean(property, IPC_BACKOFF_ENABLE_DEFAULT), + property + " has wrong value"); } /** @@ -202,33 +207,31 @@ public void testReconfigureHearbeatCheck() throws ReconfigurationException { } // verify change - assertEquals( - DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value", - 6, - nameNode.getConf().getLong(DFS_HEARTBEAT_INTERVAL_KEY, - DFS_HEARTBEAT_INTERVAL_DEFAULT)); - assertEquals(DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value", 6, - datanodeManager.getHeartbeatInterval()); + assertEquals(6, + nameNode.getConf().getLong(DFS_HEARTBEAT_INTERVAL_KEY, DFS_HEARTBEAT_INTERVAL_DEFAULT), + DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value"); + assertEquals(6, datanodeManager.getHeartbeatInterval(), + DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value"); assertEquals( - DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY + " has wrong value", 10 * 60 * 1000, nameNode.getConf().getInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, - DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT)); - assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY - + " has wrong value", 10 * 60 * 1000, - datanodeManager.getHeartbeatRecheckInterval()); + DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT), + DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY + " has wrong value"); + assertEquals(10 * 60 * 1000, + datanodeManager.getHeartbeatRecheckInterval(), + DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY + " has wrong value"); // change to a value with time unit nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY, "1m"); - assertEquals( - DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value", - 60, + assertEquals(60, nameNode.getConf().getLong(DFS_HEARTBEAT_INTERVAL_KEY, - DFS_HEARTBEAT_INTERVAL_DEFAULT)); - assertEquals(DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value", 60, - datanodeManager.getHeartbeatInterval()); + DFS_HEARTBEAT_INTERVAL_DEFAULT), + DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value"); + assertEquals(60, + datanodeManager.getHeartbeatInterval(), + DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value"); // revert to defaults nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY, null); @@ -236,17 +239,18 @@ public void testReconfigureHearbeatCheck() throws ReconfigurationException { null); // verify defaults - assertEquals(DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value", null, - nameNode.getConf().get(DFS_HEARTBEAT_INTERVAL_KEY)); - assertEquals(DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value", - DFS_HEARTBEAT_INTERVAL_DEFAULT, datanodeManager.getHeartbeatInterval()); - - assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY - + " has wrong value", null, - nameNode.getConf().get(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY)); - assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY - + " has wrong value", DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT, - datanodeManager.getHeartbeatRecheckInterval()); + assertEquals(null, + nameNode.getConf().get(DFS_HEARTBEAT_INTERVAL_KEY), + DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value"); + assertEquals(DFS_HEARTBEAT_INTERVAL_DEFAULT, datanodeManager.getHeartbeatInterval(), + DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value"); + + assertEquals(null, + nameNode.getConf().get(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY), + DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY + " has wrong value"); + assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT, + datanodeManager.getHeartbeatRecheckInterval(), + DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY + " has wrong value"); } /** @@ -256,7 +260,8 @@ public void testReconfigureHearbeatCheck() throws ReconfigurationException { * @throws ReconfigurationException * @throws IOException */ - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testReconfigureSPSWithStoragePolicyDisabled() throws ReconfigurationException, IOException { // shutdown cluster @@ -275,22 +280,22 @@ public void testReconfigureSPSWithStoragePolicyDisabled() StoragePolicySatisfierMode.EXTERNAL.toString()); // Since DFS_STORAGE_POLICY_ENABLED_KEY is disabled, SPS can't be enabled. - assertNull("SPS shouldn't start as " - + DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY + " is disabled", - nameNode.getNamesystem().getBlockManager().getSPSManager()); + assertNull(nameNode.getNamesystem().getBlockManager().getSPSManager(), + "SPS shouldn't start as " + DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY + " is disabled"); verifySPSEnabled(nameNode, DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, StoragePolicySatisfierMode.EXTERNAL, false); - assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value", - StoragePolicySatisfierMode.EXTERNAL.toString(), nameNode.getConf() + assertEquals(StoragePolicySatisfierMode.EXTERNAL.toString(), nameNode.getConf() .get(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, - DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT)); + DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT), + DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value"); } /** * Tests enable/disable Storage Policy Satisfier dynamically. */ - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testReconfigureStoragePolicySatisfierEnabled() throws ReconfigurationException { final NameNode nameNode = cluster.getNameNode(); @@ -318,21 +323,22 @@ public void testReconfigureStoragePolicySatisfierEnabled() // enable external SPS nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, StoragePolicySatisfierMode.EXTERNAL.toString()); - assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value", - false, nameNode.getNamesystem().getBlockManager().getSPSManager() - .isSatisfierRunning()); - assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value", - StoragePolicySatisfierMode.EXTERNAL.toString(), + assertEquals(false, + nameNode.getNamesystem().getBlockManager().getSPSManager().isSatisfierRunning(), + DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value"); + assertEquals(StoragePolicySatisfierMode.EXTERNAL.toString(), nameNode.getConf().get(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, - DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT)); - assertNotNull("SPS Manager should be created", - nameNode.getNamesystem().getBlockManager().getSPSManager()); + DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT), + DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value"); + assertNotNull(nameNode.getNamesystem().getBlockManager().getSPSManager(), + "SPS Manager should be created"); } /** * Test to satisfy storage policy after disabled storage policy satisfier. */ - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testSatisfyStoragePolicyAfterSatisfierDisabled() throws ReconfigurationException, IOException { final NameNode nameNode = cluster.getNameNode(); @@ -342,8 +348,8 @@ public void testSatisfyStoragePolicyAfterSatisfierDisabled() StoragePolicySatisfierMode.NONE.toString()); verifySPSEnabled(nameNode, DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, StoragePolicySatisfierMode.NONE, false); - assertNull("SPS Manager should be null", - nameNode.getNamesystem().getBlockManager().getSPSManager()); + assertNull(nameNode.getNamesystem().getBlockManager().getSPSManager(), + "SPS Manager should be null"); Path filePath = new Path("/testSPS"); DistributedFileSystem fileSystem = cluster.getFileSystem(); @@ -367,11 +373,11 @@ void verifySPSEnabled(final NameNode nameNode, String property, .getNamesystem().getBlockManager().getSPSManager(); boolean isSPSRunning = spsMgr != null ? spsMgr.isSatisfierRunning() : false; - assertEquals(property + " has wrong value", isSatisfierRunning, isSPSRunning); + assertEquals(isSatisfierRunning, isSPSRunning, property + " has wrong value"); String actual = nameNode.getConf().get(property, DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT); - assertEquals(property + " has wrong value", expected, - StoragePolicySatisfierMode.fromString(actual)); + assertEquals(expected, StoragePolicySatisfierMode.fromString(actual), + property + " has wrong value"); } @Test @@ -381,29 +387,26 @@ public void testBlockInvalidateLimitAfterReconfigured() final DatanodeManager datanodeManager = nameNode.namesystem .getBlockManager().getDatanodeManager(); - assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not correctly set", - customizedBlockInvalidateLimit, - datanodeManager.getBlockInvalidateLimit()); + assertEquals(customizedBlockInvalidateLimit, datanodeManager.getBlockInvalidateLimit(), + DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not correctly set"); nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY, Integer.toString(6)); // 20 * 6 = 120 < 500 // Invalid block limit should stay same as before after reconfiguration. - assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY - + " is not honored after reconfiguration", - customizedBlockInvalidateLimit, - datanodeManager.getBlockInvalidateLimit()); + assertEquals(customizedBlockInvalidateLimit, datanodeManager.getBlockInvalidateLimit(), + DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not honored after reconfiguration"); nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY, Integer.toString(50)); // 20 * 50 = 1000 > 500 // Invalid block limit should be reset to 1000 - assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY - + " is not reconfigured correctly", - 1000, - datanodeManager.getBlockInvalidateLimit()); + assertEquals(1000, + datanodeManager.getBlockInvalidateLimit(), + DFS_BLOCK_INVALIDATE_LIMIT_KEY + + " is not reconfigured correctly"); } @Test @@ -477,8 +480,8 @@ public void testBlockInvalidateLimit() throws ReconfigurationException { final DatanodeManager datanodeManager = nameNode.namesystem .getBlockManager().getDatanodeManager(); - assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not correctly set", - customizedBlockInvalidateLimit, datanodeManager.getBlockInvalidateLimit()); + assertEquals(customizedBlockInvalidateLimit, datanodeManager.getBlockInvalidateLimit(), + DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not correctly set"); try { nameNode.reconfigureProperty(DFS_BLOCK_INVALIDATE_LIMIT_KEY, "non-numeric"); @@ -491,15 +494,15 @@ public void testBlockInvalidateLimit() throws ReconfigurationException { nameNode.reconfigureProperty(DFS_BLOCK_INVALIDATE_LIMIT_KEY, "2500"); - assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not honored after reconfiguration", 2500, - datanodeManager.getBlockInvalidateLimit()); + assertEquals(2500, datanodeManager.getBlockInvalidateLimit(), + DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not honored after reconfiguration"); nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY, "500"); // 20 * 500 (10000) > 2500 // Hence, invalid block limit should be reset to 10000 - assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not reconfigured correctly", 10000, - datanodeManager.getBlockInvalidateLimit()); + assertEquals(10000, datanodeManager.getBlockInvalidateLimit(), + DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not reconfigured correctly"); } @Test @@ -508,8 +511,8 @@ public void testSlowPeerTrackerEnabled() throws Exception { final DatanodeManager datanodeManager = nameNode.namesystem.getBlockManager() .getDatanodeManager(); - assertFalse("SlowNode tracker is already enabled. It should be disabled by default", - datanodeManager.getSlowPeerTracker().isSlowPeerTrackerEnabled()); + assertFalse(datanodeManager.getSlowPeerTracker().isSlowPeerTrackerEnabled(), + "SlowNode tracker is already enabled. It should be disabled by default"); assertTrue(datanodeManager.isSlowPeerCollectorInitialized()); try { @@ -522,13 +525,13 @@ public void testSlowPeerTrackerEnabled() throws Exception { } nameNode.reconfigurePropertyImpl(DFS_DATANODE_PEER_STATS_ENABLED_KEY, "True"); - assertTrue("SlowNode tracker is still disabled. Reconfiguration could not be successful", - datanodeManager.getSlowPeerTracker().isSlowPeerTrackerEnabled()); + assertTrue(datanodeManager.getSlowPeerTracker().isSlowPeerTrackerEnabled(), + "SlowNode tracker is still disabled. Reconfiguration could not be successful"); assertFalse(datanodeManager.isSlowPeerCollectorInitialized()); nameNode.reconfigurePropertyImpl(DFS_DATANODE_PEER_STATS_ENABLED_KEY, null); - assertFalse("SlowNode tracker is still enabled. Reconfiguration could not be successful", - datanodeManager.getSlowPeerTracker().isSlowPeerTrackerEnabled()); + assertFalse(datanodeManager.getSlowPeerTracker().isSlowPeerTrackerEnabled(), + "SlowNode tracker is still enabled. Reconfiguration could not be successful"); } @@ -538,8 +541,8 @@ public void testSlowPeerMaxNodesToReportReconf() throws Exception { final DatanodeManager datanodeManager = nameNode.namesystem.getBlockManager() .getDatanodeManager(); nameNode.reconfigurePropertyImpl(DFS_DATANODE_PEER_STATS_ENABLED_KEY, "true"); - assertTrue("SlowNode tracker is still disabled. Reconfiguration could not be successful", - datanodeManager.getSlowPeerTracker().isSlowPeerTrackerEnabled()); + assertTrue(datanodeManager.getSlowPeerTracker().isSlowPeerTrackerEnabled(), + "SlowNode tracker is still disabled. Reconfiguration could not be successful"); SlowPeerTracker tracker = datanodeManager.getSlowPeerTracker(); @@ -674,8 +677,8 @@ public void testReconfigureMinBlocksForWrite() throws Exception { LambdaTestUtils.intercept(ReconfigurationException.class, () -> nameNode.reconfigurePropertyImpl(key, "-20")); assertTrue(reconfigurationException.getCause() instanceof IllegalArgumentException); - assertEquals(key + " = '-20' is invalid. It should be a " - +"positive, non-zero integer value.", reconfigurationException.getCause().getMessage()); + assertEquals(key + " = '-20' is invalid. It should be a " + "positive, non-zero integer value.", + reconfigurationException.getCause().getMessage()); // Ensure none of the values were updated from the defaults assertEquals(defaultVal, bm.getMinBlocksForWrite(BlockType.CONTIGUOUS)); @@ -684,8 +687,8 @@ public void testReconfigureMinBlocksForWrite() throws Exception { reconfigurationException = LambdaTestUtils.intercept(ReconfigurationException.class, () -> nameNode.reconfigurePropertyImpl(key, "0")); assertTrue(reconfigurationException.getCause() instanceof IllegalArgumentException); - assertEquals(key + " = '0' is invalid. It should be a " - +"positive, non-zero integer value.", reconfigurationException.getCause().getMessage()); + assertEquals(key + " = '0' is invalid. It should be a " + "positive, non-zero integer value.", + reconfigurationException.getCause().getMessage()); // Ensure none of the values were updated from the defaults assertEquals(defaultVal, bm.getMinBlocksForWrite(BlockType.CONTIGUOUS)); @@ -777,8 +780,8 @@ public void testReconfigureFSNamesystemLockMetricsParameters() fail("should not reach here"); } catch (ReconfigurationException e) { assertEquals( - "Could not change property dfs.namenode.lock.detailed-metrics.enabled from " + - "'false' to 'non-boolean'", e.getMessage()); + "Could not change property dfs.namenode.lock.detailed-metrics.enabled from " + + "'false' to 'non-boolean'", e.getMessage()); } // try correct metricsEnabled. @@ -795,8 +798,8 @@ public void testReconfigureFSNamesystemLockMetricsParameters() fail("Should not reach here"); } catch (ReconfigurationException e) { assertEquals("Could not change property " + - "dfs.namenode.read-lock-reporting-threshold-ms from '" + - defaultReadLockMS + "' to 'non-numeric'", e.getMessage()); + "dfs.namenode.read-lock-reporting-threshold-ms from '" + defaultReadLockMS + + "' to 'non-numeric'", e.getMessage()); } // try correct readLockMS. @@ -812,8 +815,8 @@ public void testReconfigureFSNamesystemLockMetricsParameters() fail("Should not reach here"); } catch (ReconfigurationException e) { assertEquals("Could not change property " + - "dfs.namenode.write-lock-reporting-threshold-ms from '" + - defaultWriteLockMS + "' to 'non-numeric'", e.getMessage()); + "dfs.namenode.write-lock-reporting-threshold-ms from '" + defaultWriteLockMS + + "' to 'non-numeric'", e.getMessage()); } // try correct writeLockMS. @@ -829,8 +832,8 @@ public void testReconfigureSlowPeerCollectInterval() throws Exception { final DatanodeManager datanodeManager = nameNode.namesystem.getBlockManager().getDatanodeManager(); - assertFalse("SlowNode tracker is already enabled. It should be disabled by default", - datanodeManager.getSlowPeerTracker().isSlowPeerTrackerEnabled()); + assertFalse(datanodeManager.getSlowPeerTracker().isSlowPeerTrackerEnabled(), + "SlowNode tracker is already enabled. It should be disabled by default"); assertTrue(datanodeManager.isSlowPeerCollectorInitialized()); try { @@ -840,8 +843,8 @@ public void testReconfigureSlowPeerCollectInterval() throws Exception { } nameNode.reconfigureProperty(DFS_DATANODE_PEER_STATS_ENABLED_KEY, "True"); - assertTrue("SlowNode tracker is still disabled. Reconfiguration could not be successful", - datanodeManager.getSlowPeerTracker().isSlowPeerTrackerEnabled()); + assertTrue(datanodeManager.getSlowPeerTracker().isSlowPeerTrackerEnabled(), + "SlowNode tracker is still disabled. Reconfiguration could not be successful"); assertFalse(datanodeManager.isSlowPeerCollectorInitialized()); assertEquals(1800000, datanodeManager.getSlowPeerCollectionInterval()); @@ -862,7 +865,7 @@ public void testReconfigureSlowPeerCollectInterval() throws Exception { assertEquals(600000, datanodeManager.getSlowPeerCollectionInterval()); } - @After + @AfterEach public void shutDown() throws IOException { if (cluster != null) { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java index 8a701a31b9fb9..e67d3136226c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.spy; @@ -51,10 +51,10 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.StringUtils; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -62,9 +62,10 @@ * This tests data recovery mode for the NameNode. */ -@RunWith(Parameterized.class) +@MethodSource("data") +@ParameterizedClass public class TestNameNodeRecovery { - @Parameters + public static Collection data() { Collection params = new ArrayList(); params.add(new Object[]{ Boolean.FALSE }); @@ -258,20 +259,23 @@ public Set getValidTxIds() { } /** Test an empty edit log */ - @Test(timeout=180000) + @Test + @Timeout(value = 180) public void testEmptyLog() throws IOException { runEditLogTest(new EltsTestEmptyLog(0)); } /** Test an empty edit log with padding */ - @Test(timeout=180000) + @Test + @Timeout(value = 180) public void testEmptyPaddedLog() throws IOException { runEditLogTest(new EltsTestEmptyLog( EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH)); } /** Test an empty edit log with extra-long padding */ - @Test(timeout=180000) + @Test + @Timeout(value = 180) public void testEmptyExtraPaddedLog() throws IOException { runEditLogTest(new EltsTestEmptyLog( 3 * EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH)); @@ -308,7 +312,8 @@ public int getMaxOpSize() { } /** Test an empty edit log with extra-long padding */ - @Test(timeout=180000) + @Test + @Timeout(value = 180) public void testNonDefaultMaxOpSize() throws IOException { runEditLogTest(new EltsTestNonDefaultMaxOpSize()); } @@ -345,13 +350,15 @@ public Set getValidTxIds() { } } - @Test(timeout=180000) + @Test + @Timeout(value = 180) public void testOpcodesAfterPadding() throws IOException { runEditLogTest(new EltsTestOpcodesAfterPadding( EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH)); } - @Test(timeout=180000) + @Test + @Timeout(value = 180) public void testOpcodesAfterExtraPadding() throws IOException { runEditLogTest(new EltsTestOpcodesAfterPadding( 3 * EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH)); @@ -393,7 +400,8 @@ public Set getValidTxIds() { /** Test that we can successfully recover from a situation where there is * garbage in the middle of the edit log file output stream. */ - @Test(timeout=180000) + @Test + @Timeout(value = 180) public void testSkipEdit() throws IOException { runEditLogTest(new EltsTestGarbageInEditLog()); } @@ -584,7 +592,7 @@ static void testNameNodeRecoveryImpl(Corruptor corruptor, boolean finalize) } File editFile = FSImageTestUtil.findLatestEditsLog(sd).getFile(); - assertTrue("Should exist: " + editFile, editFile.exists()); + assertTrue(editFile.exists(), "Should exist: " + editFile); // Corrupt the edit log LOG.info("corrupting edit log file '" + editFile + "'"); @@ -654,7 +662,8 @@ static void testNameNodeRecoveryImpl(Corruptor corruptor, boolean finalize) /** Test that we can successfully recover from a situation where the last * entry in the edit log has been truncated. */ - @Test(timeout=180000) + @Test + @Timeout(value = 180) public void testRecoverTruncatedEditLog() throws IOException { testNameNodeRecoveryImpl(new TruncatingCorruptor(), true); testNameNodeRecoveryImpl(new TruncatingCorruptor(), false); @@ -662,7 +671,8 @@ public void testRecoverTruncatedEditLog() throws IOException { /** Test that we can successfully recover from a situation where the last * entry in the edit log has been padded with garbage. */ - @Test(timeout=180000) + @Test + @Timeout(value = 180) public void testRecoverPaddedEditLog() throws IOException { testNameNodeRecoveryImpl(new PaddingCorruptor(), true); testNameNodeRecoveryImpl(new PaddingCorruptor(), false); @@ -670,7 +680,8 @@ public void testRecoverPaddedEditLog() throws IOException { /** Test that don't need to recover from a situation where the last * entry in the edit log has been padded with 0. */ - @Test(timeout=180000) + @Test + @Timeout(value = 180) public void testRecoverZeroPaddedEditLog() throws IOException { testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)0), true); testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)0), false); @@ -678,7 +689,8 @@ public void testRecoverZeroPaddedEditLog() throws IOException { /** Test that don't need to recover from a situation where the last * entry in the edit log has been padded with 0xff bytes. */ - @Test(timeout=180000) + @Test + @Timeout(value = 180) public void testRecoverNegativeOnePaddedEditLog() throws IOException { testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)-1), true); testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)-1), false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java index f3e187b5e3cd9..4e4b7b0271985 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; import java.io.IOException; @@ -35,8 +35,8 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.CheckedVolume; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Time; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; public class TestNameNodeResourceChecker { @@ -45,7 +45,7 @@ public class TestNameNodeResourceChecker { private File baseDir; private File nameDir; - @Before + @BeforeEach public void setUp () throws IOException { conf = new Configuration(); nameDir = new File(BASE_DIR, "resource-check-name-dir"); @@ -63,9 +63,9 @@ public void testCheckAvailability() conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, 0); NameNodeResourceChecker nb = new NameNodeResourceChecker(conf); assertTrue( + nb.hasAvailableDiskSpace(), "isResourceAvailable must return true if " + - "disk usage is lower than threshold", - nb.hasAvailableDiskSpace()); + "disk usage is lower than threshold"); } /** @@ -77,9 +77,9 @@ public void testCheckAvailabilityNeg() throws IOException { conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, Long.MAX_VALUE); NameNodeResourceChecker nb = new NameNodeResourceChecker(conf); assertFalse( + nb.hasAvailableDiskSpace(), "isResourceAvailable must return false if " + - "disk usage is higher than threshold", - nb.hasAvailableDiskSpace()); + "disk usage is higher than threshold"); } /** @@ -114,10 +114,8 @@ public void testCheckThatNameNodeResourceMonitorIsRunning() break; } } - assertTrue("NN resource monitor should be running", - isNameNodeMonitorRunning); - assertFalse("NN should not presently be in safe mode", - cluster.getNameNode().isInSafeMode()); + assertTrue(isNameNodeMonitorRunning, "NN resource monitor should be running"); + assertFalse(cluster.getNameNode().isInSafeMode(), "NN should not presently be in safe mode"); mockResourceChecker.setResourcesAvailable(false); @@ -128,16 +126,16 @@ public void testCheckThatNameNodeResourceMonitorIsRunning() Thread.sleep(1000); } - assertTrue("NN should be in safe mode after resources crossed threshold", - cluster.getNameNode().isInSafeMode()); + assertTrue(cluster.getNameNode().isInSafeMode(), + "NN should be in safe mode after resources crossed threshold"); mockResourceChecker.setResourcesAvailable(true); while (cluster.getNameNode().isInSafeMode() && Time.now() < startMillis + (60 * 1000)) { Thread.sleep(1000); } - assertTrue("NN should leave safe mode after resources not crossed threshold", - !cluster.getNameNode().isInSafeMode()); + assertTrue(!cluster.getNameNode().isInSafeMode(), + "NN should leave safe mode after resources not crossed threshold"); } finally { if (cluster != null) cluster.shutdown(); @@ -161,8 +159,8 @@ public void testChecking2NameDirsOnOneVolume() throws IOException { NameNodeResourceChecker nb = new NameNodeResourceChecker(conf); - assertEquals("Should not check the same volume more than once.", - 1, nb.getVolumesLowOnSpace().size()); + assertEquals(1, nb.getVolumesLowOnSpace().size(), + "Should not check the same volume more than once."); } /** @@ -180,8 +178,8 @@ public void testCheckingExtraVolumes() throws IOException { NameNodeResourceChecker nb = new NameNodeResourceChecker(conf); - assertEquals("Should not check the same volume more than once.", - 1, nb.getVolumesLowOnSpace().size()); + assertEquals(1, nb.getVolumesLowOnSpace().size(), + "Should not check the same volume more than once."); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java index 073ee37781973..a0652cb2f6d1f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java @@ -17,15 +17,15 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.util.ArrayList; import java.util.Collection; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.LoggerFactory; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java index 594b07b583ee3..0217a6323469f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java @@ -17,12 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; -import static org.hamcrest.core.Is.is; -import static org.hamcrest.core.IsNot.not; +import static org.junit.jupiter.api.Assertions.assertFalse; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -37,7 +35,8 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -75,7 +74,8 @@ private static String getLifelineRpcServerAddress(MiniDFSCluster cluster) { .toString(); } - @Test (timeout=300000) + @Test + @Timeout(value = 300) public void testRpcBindHostKey() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; @@ -87,8 +87,9 @@ public void testRpcBindHostKey() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); String address = getRpcServerAddress(cluster); - assertThat("Bind address not expected to be wildcard by default.", - address, not("/" + WILDCARD_ADDRESS)); + assertThat(address) + .as("Bind address not expected to be wildcard by default.") + .isNotEqualTo("/" + WILDCARD_ADDRESS); } finally { if (cluster != null) { cluster.shutdown(); @@ -106,8 +107,9 @@ public void testRpcBindHostKey() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); String address = getRpcServerAddress(cluster); - assertThat("Bind address " + address + " is not wildcard.", - address, is("/" + WILDCARD_ADDRESS)); + assertThat(address) + .as("Bind address " + address + " is not wildcard.") + .isEqualTo("/" + WILDCARD_ADDRESS); } finally { if (cluster != null) { cluster.shutdown(); @@ -115,7 +117,8 @@ public void testRpcBindHostKey() throws IOException { } } - @Test (timeout=300000) + @Test + @Timeout(value = 300) public void testServiceRpcBindHostKey() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; @@ -129,8 +132,9 @@ public void testServiceRpcBindHostKey() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); String address = getServiceRpcServerAddress(cluster); - assertThat("Bind address not expected to be wildcard by default.", - address, not("/" + WILDCARD_ADDRESS)); + assertThat(address) + .as("Bind address not expected to be wildcard by default.") + .isNotEqualTo("/" + WILDCARD_ADDRESS); } finally { if (cluster != null) { cluster.shutdown(); @@ -148,8 +152,9 @@ public void testServiceRpcBindHostKey() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); String address = getServiceRpcServerAddress(cluster); - assertThat("Bind address " + address + " is not wildcard.", - address, is("/" + WILDCARD_ADDRESS)); + assertThat(address) + .as("Bind address " + address + " is not wildcard.") + .isEqualTo("/" + WILDCARD_ADDRESS); } finally { if (cluster != null) { cluster.shutdown(); @@ -157,7 +162,8 @@ public void testServiceRpcBindHostKey() throws IOException { } } - @Test (timeout=300000) + @Test + @Timeout(value = 300) public void testLifelineRpcBindHostKey() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; @@ -171,8 +177,9 @@ public void testLifelineRpcBindHostKey() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); String address = getLifelineRpcServerAddress(cluster); - assertThat("Bind address not expected to be wildcard by default.", - address, not("/" + WILDCARD_ADDRESS)); + assertThat(address) + .as("Bind address not expected to be wildcard by default.") + .isNotEqualTo("/" + WILDCARD_ADDRESS); } finally { if (cluster != null) { cluster.shutdown(); @@ -190,8 +197,9 @@ public void testLifelineRpcBindHostKey() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); String address = getLifelineRpcServerAddress(cluster); - assertThat("Bind address " + address + " is not wildcard.", - address, is("/" + WILDCARD_ADDRESS)); + assertThat(address) + .as("Bind address " + address + " is not wildcard.") + .isEqualTo("/" + WILDCARD_ADDRESS); } finally { if (cluster != null) { cluster.shutdown(); @@ -199,7 +207,8 @@ public void testLifelineRpcBindHostKey() throws IOException { } } - @Test(timeout=300000) + @Test + @Timeout(value = 300) public void testHttpBindHostKey() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; @@ -212,8 +221,8 @@ public void testHttpBindHostKey() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); String address = cluster.getNameNode().getHttpAddress().toString(); - assertFalse("HTTP Bind address not expected to be wildcard by default.", - address.startsWith(WILDCARD_ADDRESS)); + assertFalse(address.startsWith(WILDCARD_ADDRESS), + "HTTP Bind address not expected to be wildcard by default."); } finally { if (cluster != null) { cluster.shutdown(); @@ -232,8 +241,8 @@ public void testHttpBindHostKey() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); String address = cluster.getNameNode().getHttpAddress().toString(); - assertTrue("HTTP Bind address " + address + " is not wildcard.", - address.startsWith(WILDCARD_ADDRESS)); + assertTrue(address.startsWith(WILDCARD_ADDRESS), + "HTTP Bind address " + address + " is not wildcard."); } finally { if (cluster != null) { cluster.shutdown(); @@ -265,7 +274,8 @@ private static void setupSsl() throws Exception { * pick a different host/port combination. * @throws Exception */ - @Test (timeout=300000) + @Test + @Timeout(value = 300) public void testHttpsBindHostKey() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; @@ -286,8 +296,8 @@ public void testHttpsBindHostKey() throws Exception { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); String address = cluster.getNameNode().getHttpsAddress().toString(); - assertFalse("HTTP Bind address not expected to be wildcard by default.", - address.startsWith(WILDCARD_ADDRESS)); + assertFalse(address.startsWith(WILDCARD_ADDRESS), + "HTTP Bind address not expected to be wildcard by default."); } finally { if (cluster != null) { cluster.shutdown(); @@ -306,8 +316,8 @@ public void testHttpsBindHostKey() throws Exception { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); String address = cluster.getNameNode().getHttpsAddress().toString(); - assertTrue("HTTP Bind address " + address + " is not wildcard.", - address.startsWith(WILDCARD_ADDRESS)); + assertTrue(address.startsWith(WILDCARD_ADDRESS), + "HTTP Bind address " + address + " is not wildcard."); } finally { if (cluster != null) { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java index 93a7338f794b8..d4806b1804b37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java @@ -28,11 +28,11 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; -import org.junit.Before; -import org.junit.After; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY; /** @@ -53,7 +53,7 @@ public class TestNameNodeRetryCacheMetrics { private RetryCacheMetrics metrics; /** Start a cluster */ - @Before + @BeforeEach public void setup() throws Exception { conf = new HdfsConfiguration(); conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true); @@ -74,7 +74,7 @@ public void setup() throws Exception { * Cleanup after the test * @throws IOException **/ - @After + @AfterEach public void cleanup() throws IOException { if (cluster != null) { cluster.shutdown(); @@ -98,9 +98,9 @@ public void testRetryCacheMetrics() throws IOException { } private void checkMetrics(long hit, long cleared, long updated) { - assertEquals("CacheHit", hit, metrics.getCacheHit()); - assertEquals("CacheCleared", cleared, metrics.getCacheCleared()); - assertEquals("CacheUpdated", updated, metrics.getCacheUpdated()); + assertEquals(hit, metrics.getCacheHit(), "CacheHit"); + assertEquals(cleared, metrics.getCacheCleared(), "CacheCleared"); + assertEquals(updated, metrics.getCacheUpdated(), "CacheUpdated"); } private void trySaveNamespace() throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java index d29e11cffee71..7c203208ee4c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java @@ -26,10 +26,10 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_IP_PROXY_USERS; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -55,7 +55,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; public class TestNameNodeRpcServer { @@ -73,8 +73,8 @@ public void testNamenodeRpcBindAny() throws IOException { try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); - assertEquals("0.0.0.0", ((NameNodeRpcServer)cluster.getNameNodeRpc()) - .getClientRpcServer().getListenerAddress().getHostName()); + assertEquals("0.0.0.0", ((NameNodeRpcServer) cluster.getNameNodeRpc()).getClientRpcServer() + .getListenerAddress().getHostName()); } finally { if (cluster != null) { cluster.shutdown(); @@ -240,7 +240,7 @@ public void testNamenodeRpcClientIpProxy() throws IOException { // found some other host, so things are good break; } else if (trial == ITERATIONS_TO_USE - 1) { - assertNotEquals("Failed to get non-node1", hosts[0], host); + assertNotEquals(hosts[0], host, "Failed to get non-node1"); } } // Run as fake joe to authorize the test @@ -252,7 +252,7 @@ public void testNamenodeRpcClientIpProxy() throws IOException { // As joe, we should get all node1. for (int trial = 0; trial < ITERATIONS_TO_USE; ++trial) { String host = getPreferredLocation(joeFs, fooName); - assertEquals("Trial " + trial + " failed", hosts[0], host); + assertEquals(hosts[0], host, "Trial " + trial + " failed"); } } finally { CallerContext.setCurrent(original); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServerMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServerMethods.java index 50740bd06e831..c752ed54c8878 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServerMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServerMethods.java @@ -31,12 +31,12 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; public class TestNameNodeRpcServerMethods { private static NamenodeProtocols nnRpc; @@ -44,7 +44,7 @@ public class TestNameNodeRpcServerMethods { private static MiniDFSCluster cluster; /** Start a cluster */ - @Before + @BeforeEach public void setup() throws Exception { conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).build(); @@ -60,7 +60,7 @@ public void setup() throws Exception { * @throws SafeModeException * @throws AccessControlException */ - @After + @AfterEach public void cleanup() throws IOException { if (cluster != null) { cluster.shutdown(); @@ -73,7 +73,7 @@ public void testDeleteSnapshotWhenSnapshotNameIsEmpty() throws Exception { String dir = "/testNamenodeRetryCache/testDelete"; try { nnRpc.deleteSnapshot(dir, null); - Assert.fail("testdeleteSnapshot is not thrown expected exception "); + fail("testdeleteSnapshot is not thrown expected exception "); } catch (IOException e) { // expected GenericTestUtils.assertExceptionContains( @@ -81,7 +81,7 @@ public void testDeleteSnapshotWhenSnapshotNameIsEmpty() throws Exception { } try { nnRpc.deleteSnapshot(dir, ""); - Assert.fail("testdeleteSnapshot is not thrown expected exception"); + fail("testdeleteSnapshot is not thrown expected exception"); } catch (IOException e) { // expected GenericTestUtils.assertExceptionContains( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java index a2896cec9fd09..b465b19c12f3b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.server.namenode; import java.util.function.Supplier; + +import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -26,8 +28,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import javax.management.MBeanServer; import javax.management.ObjectName; @@ -35,6 +36,9 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + /** * Class for testing {@link NameNodeStatusMXBean} implementation. */ @@ -43,7 +47,8 @@ public class TestNameNodeStatusMXBean { public static final Logger LOG = LoggerFactory.getLogger( TestNameNodeStatusMXBean.class); - @Test(timeout = 120000L) + @Test + @Timeout(120) public void testNameNodeStatusMXBean() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = null; @@ -60,41 +65,41 @@ public void testNameNodeStatusMXBean() throws Exception { // Get attribute "NNRole" String nnRole = (String)mbs.getAttribute(mxbeanName, "NNRole"); - Assert.assertEquals(nn.getNNRole(), nnRole); + assertEquals(nn.getNNRole(), nnRole); // Get attribute "State" String state = (String)mbs.getAttribute(mxbeanName, "State"); - Assert.assertEquals(nn.getState(), state); + assertEquals(nn.getState(), state); // Get attribute "HostAndPort" String hostAndPort = (String)mbs.getAttribute(mxbeanName, "HostAndPort"); - Assert.assertEquals(nn.getHostAndPort(), hostAndPort); + assertEquals(nn.getHostAndPort(), hostAndPort); // Get attribute "SecurityEnabled" boolean securityEnabled = (boolean)mbs.getAttribute(mxbeanName, "SecurityEnabled"); - Assert.assertEquals(nn.isSecurityEnabled(), securityEnabled); + assertEquals(nn.isSecurityEnabled(), securityEnabled); // Get attribute "LastHATransitionTime" long lastHATransitionTime = (long)mbs.getAttribute(mxbeanName, "LastHATransitionTime"); - Assert.assertEquals(nn.getLastHATransitionTime(), lastHATransitionTime); + assertEquals(nn.getLastHATransitionTime(), lastHATransitionTime); // Get attribute "BytesWithFutureGenerationStamps" long bytesWithFutureGenerationStamps = (long)mbs.getAttribute( mxbeanName, "BytesWithFutureGenerationStamps"); - Assert.assertEquals(nn.getBytesWithFutureGenerationStamps(), + assertEquals(nn.getBytesWithFutureGenerationStamps(), bytesWithFutureGenerationStamps); // Get attribute "SlowPeersReport" String slowPeersReport = (String)mbs.getAttribute(mxbeanName, "SlowPeersReport"); - Assert.assertEquals(nn.getSlowPeersReport(), slowPeersReport); + assertEquals(nn.getSlowPeersReport(), slowPeersReport); // Get attribute "SlowDisksReport" String slowDisksReport = (String)mbs.getAttribute(mxbeanName, "SlowDisksReport"); - Assert.assertEquals(nn.getSlowDisksReport(), slowDisksReport); + assertEquals(nn.getSlowDisksReport(), slowDisksReport); } finally { if (cluster != null) { cluster.shutdown(); @@ -115,7 +120,7 @@ public void testNameNodeMXBeanSlowDisksEnabled() throws Exception { try { List datanodes = cluster.getDataNodes(); - Assert.assertEquals(datanodes.size(), 1); + assertEquals(datanodes.size(), 1); DataNode datanode = datanodes.get(0); String slowDiskPath = "test/data1/slowVolume"; datanode.getDiskMetrics().addSlowDiskForTesting(slowDiskPath, null); @@ -137,9 +142,8 @@ public Boolean get() { String slowDisksReport = (String)mbs.getAttribute( mxbeanName, "SlowDisksReport"); - Assert.assertEquals(datanodeManager.getSlowDisksReport(), - slowDisksReport); - Assert.assertTrue(slowDisksReport.contains(slowDiskPath)); + assertEquals(datanodeManager.getSlowDisksReport(), slowDisksReport); + assertTrue(slowDisksReport.contains(slowDiskPath)); } finally { if (cluster != null) { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java index 7f6f39902377d..da538fc732ea8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java @@ -19,10 +19,12 @@ -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.EnumSet; import java.util.HashMap; @@ -60,10 +62,10 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.LightWeightCache; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; /** * Tests for ensuring the namenode retry cache works correctly for @@ -94,7 +96,7 @@ public class TestNamenodeRetryCache { private static final int BlockSize = 512; /** Start a cluster */ - @Before + @BeforeEach public void setup() throws Exception { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize); @@ -112,7 +114,7 @@ public void setup() throws Exception { * @throws UnresolvedLinkException * @throws SafeModeException * @throws AccessControlException */ - @After + @AfterEach public void cleanup() throws IOException { if (cluster != null) { cluster.shutdown(); @@ -173,7 +175,7 @@ public void testConcat() throws Exception { try { // Second non-retry call should fail with an exception nnRpc.concat(file1, new String[]{file2}); - Assert.fail("testConcat - expected exception is not thrown"); + fail("testConcat - expected exception is not thrown"); } catch (IOException e) { // Expected } @@ -189,13 +191,13 @@ public void testDelete() throws Exception { newCall(); nnRpc.mkdirs(dir, perm, true); newCall(); - Assert.assertTrue(nnRpc.delete(dir, false)); - Assert.assertTrue(nnRpc.delete(dir, false)); - Assert.assertTrue(nnRpc.delete(dir, false)); + assertTrue(nnRpc.delete(dir, false)); + assertTrue(nnRpc.delete(dir, false)); + assertTrue(nnRpc.delete(dir, false)); // non-retried call fails and gets false as return newCall(); - Assert.assertFalse(nnRpc.delete(dir, false)); + assertFalse(nnRpc.delete(dir, false)); } /** @@ -216,7 +218,7 @@ public void testCreateSymlink() throws Exception { try { // Second non-retry call should fail with an exception nnRpc.createSymlink(target, "/a/b", perm, true); - Assert.fail("testCreateSymlink - expected exception is not thrown"); + fail("testCreateSymlink - expected exception is not thrown"); } catch (IOException e) { // Expected } @@ -233,19 +235,21 @@ public void testCreate() throws Exception { HdfsFileStatus status = nnRpc.create(src, perm, "holder", new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null, null, null); - Assert.assertEquals(status, nnRpc.create(src, perm, "holder", - new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)), true, - (short) 1, BlockSize, null, null, null)); - Assert.assertEquals(status, nnRpc.create(src, perm, "holder", - new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)), true, - (short) 1, BlockSize, null, null, null)); + assertEquals(status, + nnRpc.create(src, perm, "holder", + new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, + BlockSize, null, null, null)); + assertEquals(status, + nnRpc.create(src, perm, "holder", + new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, + BlockSize, null, null, null)); // A non-retried call fails newCall(); try { nnRpc.create(src, perm, "holder", new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null, null, null); - Assert.fail("testCreate - expected exception is not thrown"); + fail("testCreate - expected exception is not thrown"); } catch (IOException e) { // expected } @@ -265,17 +269,17 @@ public void testAppend() throws Exception { newCall(); LastBlockWithStatus b = nnRpc.append(src, "holder", new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))); - Assert.assertEquals(b, nnRpc.append(src, "holder", - new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)))); - Assert.assertEquals(b, nnRpc.append(src, "holder", - new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)))); + assertEquals(b, + nnRpc.append(src, "holder", new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)))); + assertEquals(b, + nnRpc.append(src, "holder", new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)))); // non-retried call fails newCall(); try { nnRpc.append(src, "holder", new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))); - Assert.fail("testAppend - expected exception is not thrown"); + fail("testAppend - expected exception is not thrown"); } catch (Exception e) { // Expected } @@ -294,13 +298,13 @@ public void testRename1() throws Exception { // Retried renames succeed newCall(); - Assert.assertTrue(nnRpc.rename(src, target)); - Assert.assertTrue(nnRpc.rename(src, target)); - Assert.assertTrue(nnRpc.rename(src, target)); + assertTrue(nnRpc.rename(src, target)); + assertTrue(nnRpc.rename(src, target)); + assertTrue(nnRpc.rename(src, target)); // A non-retried request fails newCall(); - Assert.assertFalse(nnRpc.rename(src, target)); + assertFalse(nnRpc.rename(src, target)); } /** @@ -323,7 +327,7 @@ public void testRename2() throws Exception { newCall(); try { nnRpc.rename2(src, target, Rename.NONE); - Assert.fail("testRename 2 expected exception is not thrown"); + fail("testRename 2 expected exception is not thrown"); } catch (IOException e) { // expected } @@ -333,7 +337,8 @@ public void testRename2() throws Exception { * Make sure a retry call does not hang because of the exception thrown in the * first call. */ - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testUpdatePipelineWithFailOver() throws Exception { cluster.shutdown(); nnRpc = null; @@ -378,15 +383,15 @@ public void testSnapshotMethods() throws Exception { // Test retry of create snapshot newCall(); String name = nnRpc.createSnapshot(dir, "snap1"); - Assert.assertEquals(name, nnRpc.createSnapshot(dir, "snap1")); - Assert.assertEquals(name, nnRpc.createSnapshot(dir, "snap1")); - Assert.assertEquals(name, nnRpc.createSnapshot(dir, "snap1")); + assertEquals(name, nnRpc.createSnapshot(dir, "snap1")); + assertEquals(name, nnRpc.createSnapshot(dir, "snap1")); + assertEquals(name, nnRpc.createSnapshot(dir, "snap1")); // Non retried calls should fail newCall(); try { nnRpc.createSnapshot(dir, "snap1"); - Assert.fail("testSnapshotMethods expected exception is not thrown"); + fail("testSnapshotMethods expected exception is not thrown"); } catch (IOException e) { // exptected } @@ -401,7 +406,7 @@ public void testSnapshotMethods() throws Exception { newCall(); try { nnRpc.renameSnapshot(dir, "snap1", "snap2"); - Assert.fail("testSnapshotMethods expected exception is not thrown"); + fail("testSnapshotMethods expected exception is not thrown"); } catch (IOException e) { // expected } @@ -416,7 +421,7 @@ public void testSnapshotMethods() throws Exception { newCall(); try { nnRpc.deleteSnapshot(dir, "snap2"); - Assert.fail("testSnapshotMethods expected exception is not thrown"); + fail("testSnapshotMethods expected exception is not thrown"); } catch (IOException e) { // expected } @@ -426,11 +431,11 @@ public void testSnapshotMethods() throws Exception { public void testRetryCacheConfig() { // By default retry configuration should be enabled Configuration conf = new HdfsConfiguration(); - Assert.assertNotNull(FSNamesystem.initRetryCache(conf)); + assertNotNull(FSNamesystem.initRetryCache(conf)); // If retry cache is disabled, it should not be created conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, false); - Assert.assertNull(FSNamesystem.initRetryCache(conf)); + assertNull(FSNamesystem.initRetryCache(conf)); } /** @@ -444,7 +449,7 @@ public void testRetryCacheRebuild() throws Exception { LightWeightCache cacheSet = (LightWeightCache) namesystem.getRetryCache().getCacheSet(); - assertEquals("Retry cache size is wrong", 39, cacheSet.size()); + assertEquals(39, cacheSet.size(), "Retry cache size is wrong"); Map oldEntries = new HashMap(); @@ -463,7 +468,7 @@ public void testRetryCacheRebuild() throws Exception { assertTrue(namesystem.hasRetryCache()); cacheSet = (LightWeightCache) namesystem .getRetryCache().getCacheSet(); - assertEquals("Retry cache size is wrong", 39, cacheSet.size()); + assertEquals(39, cacheSet.size(), "Retry cache size is wrong"); iter = cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry = iter.next(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeStorageDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeStorageDirectives.java index 0469b91988451..ffeeea59c2063 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeStorageDirectives.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeStorageDirectives.java @@ -30,8 +30,9 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy; import org.apache.hadoop.net.Node; -import org.junit.After; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.IOException; import java.net.InetSocketAddress; @@ -42,9 +43,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Test to ensure that the StorageType and StorageID sent from Namenode @@ -58,7 +59,7 @@ public class TestNamenodeStorageDirectives { private MiniDFSCluster cluster; - @After + @AfterEach public void tearDown() { shutdown(); } @@ -192,7 +193,8 @@ private void testStorageTypes(StorageType[][] storageTypes, * Types. * @throws IOException */ - @Test(timeout=120000) + @Test + @Timeout(value = 120) public void testTargetStorageTypes() throws ReconfigurationException, InterruptedException, TimeoutException, IOException { // DISK and not anything else. @@ -311,7 +313,8 @@ private DatanodeStorageInfo getDatanodeStorageInfo(int dnIndex) return dnManager.getDatanode(dnId).getStorageInfos()[0]; } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testStorageIDBlockPlacementSpecific() throws ReconfigurationException, InterruptedException, TimeoutException, IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNestedEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNestedEncryptionZones.java index d1a1ce7968677..46af54df1a432 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNestedEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNestedEncryptionZones.java @@ -34,17 +34,18 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ToolRunner; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; import java.io.File; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * Test the behavior of nested encryption zones. @@ -88,7 +89,7 @@ private void setProvider() { .getProvider()); } - @Before + @BeforeEach public void setup() throws Exception { Configuration conf = new HdfsConfiguration(); FileSystemTestHelper fsHelper = new FileSystemTestHelper(); @@ -114,7 +115,7 @@ public void setup() throws Exception { DFSTestUtil.createKey(NESTED_EZ_KEY, cluster, conf); } - @After + @AfterEach public void tearDown() throws Exception { if (cluster != null) { cluster.shutdown(); @@ -122,7 +123,8 @@ public void tearDown() throws Exception { } } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testNestedEncryptionZones() throws Exception { initTopEZDirAndNestedEZDir(new Path(rootDir, "topEZ")); verifyEncryption(); @@ -166,7 +168,8 @@ public void testNestedEncryptionZones() throws Exception { new Path(rootDir, "newTopEZ/newNestedEZ")); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testNestedEZWithRoot() throws Exception { initTopEZDirAndNestedEZDir(rootDir); verifyEncryption(); @@ -188,12 +191,10 @@ public void testNestedEZWithRoot() throws Exception { final Path expectedNestedEZTrash = fs.makeQualified( new Path(nestedEZDir, suffixTrashPath)); - assertEquals("Top ez trash should be " + expectedTopEZTrash, - expectedTopEZTrash, topEZTrash); - assertEquals("Root trash should be equal with TopEZFile trash", - topEZTrash, rootTrash); - assertEquals("Nested ez Trash should be " + expectedNestedEZTrash, - expectedNestedEZTrash, nestedEZTrash); + assertEquals(expectedTopEZTrash, topEZTrash, "Top ez trash should be " + expectedTopEZTrash); + assertEquals(topEZTrash, rootTrash, "Root trash should be equal with TopEZFile trash"); + assertEquals(expectedNestedEZTrash, nestedEZTrash, + "Nested ez Trash should be " + expectedNestedEZTrash); // delete rename file and test trash FsShell shell = new FsShell(fs.getConf()); @@ -205,9 +206,9 @@ public void testNestedEZWithRoot() throws Exception { ToolRunner.run(shell, new String[]{"-rm", topEZFile.toString()}); ToolRunner.run(shell, new String[]{"-rm", nestedEZFile.toString()}); - assertTrue("File not in trash : " + topTrashFile, fs.exists(topTrashFile)); + assertTrue(fs.exists(topTrashFile), "File not in trash : " + topTrashFile); assertTrue( - "File not in trash : " + nestedTrashFile, fs.exists(nestedTrashFile)); + fs.exists(nestedTrashFile), "File not in trash : " + nestedTrashFile); } private void renameChildrenOfEZ() throws Exception{ @@ -285,14 +286,11 @@ private void initTopEZDirAndNestedEZDir(Path topPath) throws Exception { } private void verifyEncryption() throws Exception { - assertEquals("Top EZ dir is encrypted", - true, fs.getFileStatus(topEZDir).isEncrypted()); - assertEquals("Nested EZ dir is encrypted", - true, fs.getFileStatus(nestedEZDir).isEncrypted()); - assertEquals("Top zone file is encrypted", - true, fs.getFileStatus(topEZFile).isEncrypted()); - assertEquals("Nested zone file is encrypted", - true, fs.getFileStatus(nestedEZFile).isEncrypted()); + assertEquals(true, fs.getFileStatus(topEZDir).isEncrypted(), "Top EZ dir is encrypted"); + assertEquals(true, fs.getFileStatus(nestedEZDir).isEncrypted(), "Nested EZ dir is encrypted"); + assertEquals(true, fs.getFileStatus(topEZFile).isEncrypted(), "Top zone file is encrypted"); + assertEquals(true, fs.getFileStatus(nestedEZFile).isEncrypted(), + "Nested zone file is encrypted"); DFSTestUtil.verifyFilesEqual(fs, topEZBaseFile, topEZFile, len); DFSTestUtil.verifyFilesEqual(fs, nestedEZBaseFile, nestedEZFile, len); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java index 7796ed4182ee6..87a582c02598d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.StaticMapping; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -34,8 +34,8 @@ import java.util.Iterator; import java.util.Map; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; public class TestNetworkTopologyServlet { @@ -83,8 +83,7 @@ public void testPrintTopologyTextFormat() throws IOException { assertTrue(topology.contains("/rack4")); // assert node number - assertEquals(topology.split("127.0.0.1").length - 1, - dataNodesNum); + assertEquals(topology.split("127.0.0.1").length - 1, dataNodesNum); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java index 86ae642fb8a8c..40d6125790e6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; import java.util.Collections; @@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; -import org.junit.Test; +import org.junit.jupiter.api.Test; /** * A JUnit test for checking if restarting DFS preserves integrity. @@ -69,8 +69,7 @@ public void testRestartDFS() throws Exception { String[] nameNodeDirs = conf.getStrings( DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new String[] {}); numNamenodeDirs = nameNodeDirs.length; - assertTrue("failed to get number of Namenode StorageDirs", - numNamenodeDirs != 0); + assertTrue(numNamenodeDirs != 0, "failed to get number of Namenode StorageDirs"); FileSystem fs = cluster.getFileSystem(); files.createFiles(fs, dir); @@ -93,8 +92,7 @@ public void testRestartDFS() throws Exception { .numDataNodes(NUM_DATANODES).build(); fsn = cluster.getNamesystem(); FileSystem fs = cluster.getFileSystem(); - assertTrue("Filesystem corrupted after restart.", - files.checkFiles(fs, dir)); + assertTrue(files.checkFiles(fs, dir), "Filesystem corrupted after restart."); final FileStatus newrootstatus = fs.getFileStatus(rootpath); assertEquals(rootmtime, newrootstatus.getModificationTime()); @@ -114,9 +112,9 @@ public void testRestartDFS() throws Exception { fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER); cluster.getNameNodeRpc().saveNamespace(0, 0); final String checkAfterModify = checkImages(fsn, numNamenodeDirs); - assertFalse("Modified namespace should change fsimage contents. " + - "was: " + checkAfterRestart + " now: " + checkAfterModify, - checkAfterRestart.equals(checkAfterModify)); + assertFalse(checkAfterRestart.equals(checkAfterModify), + "Modified namespace should change fsimage contents. " + "was: " + checkAfterRestart + + " now: " + checkAfterModify); fsn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); files.cleanup(fs, dir); } finally { @@ -139,10 +137,10 @@ public static String checkImages( throws Exception { NNStorage stg = fsn.getFSImage().getStorage(); //any failed StorageDirectory is removed from the storageDirs list - assertEquals("Some StorageDirectories failed Upgrade", - numImageDirs, stg.getNumStorageDirs(NameNodeDirType.IMAGE)); - assertTrue("Not enough fsimage copies in MiniDFSCluster " + - "to test parallel write", numImageDirs > 1); + assertEquals(numImageDirs, stg.getNumStorageDirs(NameNodeDirType.IMAGE), + "Some StorageDirectories failed Upgrade"); + assertTrue(numImageDirs > 1, + "Not enough fsimage copies in MiniDFSCluster " + "to test parallel write"); // List of "current/" directory from each SD List dirs = FSImageTestUtil.getCurrentDirs(stg, NameNodeDirType.IMAGE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java index 189f34cab0f4b..5f189c03474a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java @@ -17,11 +17,11 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.Arrays; import org.apache.hadoop.hdfs.DFSUtil; -import org.junit.Test; +import org.junit.jupiter.api.Test; /** * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java index 5a6d12a27ebd9..a016c2acd0961 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java @@ -35,7 +35,8 @@ import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier; import org.apache.hadoop.hdfs.server.sps.ExternalSPSContext; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.util.function.Supplier; @@ -43,7 +44,8 @@ import java.util.List; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Test persistence of satisfying files/directories. @@ -188,7 +190,8 @@ private void clusterShutdown() throws IOException{ * 4. make sure all the storage policies are satisfied. * @throws Exception */ - @Test(timeout = 300000) + @Test + @Timeout(value = 300) public void testWithCheckpoint() throws Exception { SecondaryNameNode secondary = null; try { @@ -235,7 +238,8 @@ public void testWithCheckpoint() throws Exception { * 6. check whether all the blocks are satisfied. * @throws Exception */ - @Test(timeout = 300000) + @Test + @Timeout(value = 300) public void testWithRestarts() throws Exception { try { clusterSetUp(); @@ -271,7 +275,8 @@ public void testWithRestarts() throws Exception { * 4. make sure step 3 works as expected. * @throws Exception */ - @Test(timeout = 300000) + @Test + @Timeout(value = 300) public void testMultipleSatisfyStoragePolicy() throws Exception { try { // Lower block movement check for testing. @@ -310,7 +315,8 @@ public void testMultipleSatisfyStoragePolicy() throws Exception { * 3. make sure sps xattr is removed. * @throws Exception */ - @Test(timeout = 300000000) + @Test + @Timeout(value = 300000) public void testDropSPS() throws Exception { try { clusterSetUp(); @@ -334,7 +340,8 @@ public void testDropSPS() throws Exception { * * @throws Exception */ - @Test(timeout = 300000) + @Test + @Timeout(value = 300) public void testSPSShouldNotLeakXattrIfStorageAlreadySatisfied() throws Exception { try { @@ -367,7 +374,8 @@ public void testSPSShouldNotLeakXattrIfStorageAlreadySatisfied() * 5. restart the namenode. * NameNode should be started successfully. */ - @Test(timeout = 300000) + @Test + @Timeout(value = 300) public void testNameNodeRestartWhenSPSCalledOnChildFileAndParentDir() throws Exception { try { @@ -403,7 +411,8 @@ public void testNameNodeRestartWhenSPSCalledOnChildFileAndParentDir() * 5. restart the namenode. * All the file blocks should satisfy the policy. */ - @Test(timeout = 300000) + @Test + @Timeout(value = 300) public void testSPSOnChildAndParentDirectory() throws Exception { try { clusterSetUp(); @@ -423,7 +432,8 @@ public void testSPSOnChildAndParentDirectory() throws Exception { * Test SPS xAttr on directory. xAttr should be removed from the directory * once all the files blocks moved to specific storage. */ - @Test(timeout = 300000) + @Test + @Timeout(value = 300) public void testSPSxAttrWhenSpsCalledForDir() throws Exception { try { clusterSetUp(); @@ -449,8 +459,7 @@ public void testSPSxAttrWhenSpsCalledForDir() throws Exception { FSNamesystem namesystem = cluster.getNamesystem(); INode inode = namesystem.getFSDirectory().getINode("/parent"); XAttrFeature f = inode.getXAttrFeature(); - assertTrue("SPS xAttr should be exist", - f.getXAttr(XATTR_SATISFY_STORAGE_POLICY) != null); + assertTrue(f.getXAttr(XATTR_SATISFY_STORAGE_POLICY) != null, "SPS xAttr should be exist"); // check for the child, SPS xAttr should not be there for (int i = 0; i < 5; i++) { @@ -478,7 +487,8 @@ public void testSPSxAttrWhenSpsCalledForDir() throws Exception { * Test SPS xAttr on file. xAttr should be removed from the file * once all the blocks moved to specific storage. */ - @Test(timeout = 300000) + @Test + @Timeout(value = 300) public void testSPSxAttrWhenSpsCalledForFile() throws Exception { try { clusterSetUp(); @@ -497,8 +507,7 @@ public void testSPSxAttrWhenSpsCalledForFile() throws Exception { FSNamesystem namesystem = cluster.getNamesystem(); INode inode = namesystem.getFSDirectory().getINode("/file"); XAttrFeature f = inode.getXAttrFeature(); - assertTrue("SPS xAttr should be exist", - f.getXAttr(XATTR_SATISFY_STORAGE_POLICY) != null); + assertTrue(f.getXAttr(XATTR_SATISFY_STORAGE_POLICY) != null, "SPS xAttr should be exist"); cluster.restartDataNode(stopDataNode, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java index d17d800b4ed03..5899e78cad8a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; import java.io.IOException; @@ -35,7 +35,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; public class TestProcessCorruptBlocks { /** @@ -160,7 +161,8 @@ public void testByAddingAnExtraDataNode() throws Exception { * (corrupt replica should be removed since number of good * replicas (1) is equal to replication factor (1)) */ - @Test(timeout=20000) + @Test + @Timeout(value = 20) public void testWithReplicationFactorAsOne() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L); @@ -288,7 +290,7 @@ private void corruptBlock(MiniDFSCluster cluster, FileSystem fs, final Path file if (scanLogFile.exists()) { // wait for one minute for deletion to succeed; for (int i = 0; !scanLogFile.delete(); i++) { - assertTrue("Could not delete log file in one minute", i < 60); + assertTrue(i < 60, "Could not delete log file in one minute"); try { Thread.sleep(1000); } catch (InterruptedException ignored) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java index ea68ee705bafb..c76c759cc24e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java @@ -32,9 +32,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,22 +42,19 @@ import java.util.*; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROTECTED_SUBDIRECTORIES_ENABLE; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_PROTECTED_DIRECTORIES; +import static org.assertj.core.api.Assertions.assertThat; /** * Verify that the dfs.namenode.protected.directories setting is respected. */ +@Timeout(300) public class TestProtectedDirectories { static final Logger LOG = LoggerFactory.getLogger( TestProtectedDirectories.class); - @Rule - public Timeout timeout = new Timeout(300000); - /** * Start a namenode-only 'cluster' which is configured to protect * the given list of directories. @@ -239,21 +235,21 @@ public void testReconfigureProtectedPaths() throws Throwable { FSDirectory fsDirectory = nn.getNamesystem().getFSDirectory(); // verify change - assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), - protectedPathsNew, fsDirectory.getProtectedDirectories()); + assertEquals(protectedPathsNew, fsDirectory.getProtectedDirectories(), + String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES)); - assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), - protectedPathsStrNew, nn.getConf().get(FS_PROTECTED_DIRECTORIES)); + assertEquals(protectedPathsStrNew, nn.getConf().get(FS_PROTECTED_DIRECTORIES), + String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES)); // revert to default nn.reconfigureProperty(FS_PROTECTED_DIRECTORIES, null); // verify default - assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), - new TreeSet(), fsDirectory.getProtectedDirectories()); + assertEquals(new TreeSet(), fsDirectory.getProtectedDirectories(), + String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES)); - assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), - null, nn.getConf().get(FS_PROTECTED_DIRECTORIES)); + assertEquals(null, nn.getConf().get(FS_PROTECTED_DIRECTORIES), + String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES)); } @Test @@ -269,16 +265,15 @@ public void testDelete() throws Throwable { FileSystem fs = cluster.getFileSystem(); for (Path path : testMatrixEntry.getAllPathsToBeDeleted()) { final long countBefore = cluster.getNamesystem().getFilesTotal(); - assertThat( - testMatrixEntry + ": Testing whether " + path + " can be deleted", - deletePath(fs, path), - is(testMatrixEntry.canPathBeDeleted(path))); + assertThat(deletePath(fs, path)) + .as(testMatrixEntry + ": Testing whether " + path + " can be deleted") + .isEqualTo(testMatrixEntry.canPathBeDeleted(path)); final long countAfter = cluster.getNamesystem().getFilesTotal(); if (!testMatrixEntry.canPathBeDeleted(path)) { - assertThat( - "Either all paths should be deleted or none", - countAfter, is(countBefore)); + assertThat(countAfter) + .as("Either all paths should be deleted or none") + .isEqualTo(countBefore); } } } finally { @@ -301,10 +296,10 @@ public void testMoveToTrash() throws Throwable { FileSystem fs = cluster.getFileSystem(); for (Path path : testMatrixEntry.getAllPathsToBeDeleted()) { assertThat( - testMatrixEntry + ": Testing whether " + path + - " can be moved to trash", - moveToTrash(fs, path, conf), - is(testMatrixEntry.canPathBeDeleted(path))); + moveToTrash(fs, path, conf)) + .as(testMatrixEntry + ": Testing whether " + path + + " can be moved to trash") + .isEqualTo(testMatrixEntry.canPathBeDeleted(path)); } } finally { cluster.shutdown(); @@ -328,11 +323,11 @@ public void testRename() throws Throwable { FileSystem fs = cluster.getFileSystem(); for (Path srcPath : testMatrixEntry.getAllPathsToBeDeleted()) { assertThat( - testMatrixEntry + ": Testing whether " - + srcPath + " can be renamed", renamePath(fs, srcPath, - new Path(srcPath.toString() + "_renamed")), - is(testMatrixEntry.canPathBeRenamed(srcPath))); + new Path(srcPath.toString() + "_renamed"))) + .as(testMatrixEntry + ": Testing whether " + + srcPath + " can be renamed") + .isEqualTo(testMatrixEntry.canPathBeRenamed(srcPath)); } } finally { cluster.shutdown(); @@ -355,11 +350,11 @@ public void testRenameProtectSubDirs() throws Throwable { FileSystem fs = cluster.getFileSystem(); for (Path srcPath : testMatrixEntry.getAllPathsToBeDeleted()) { assertThat( - testMatrixEntry + ": Testing whether " - + srcPath + " can be renamed", - renamePath(fs, srcPath, - new Path(srcPath.toString() + "_renamed")), - is(testMatrixEntry.canPathBeRenamed(srcPath))); + renamePath(fs, srcPath, + new Path(srcPath.toString() + "_renamed"))) + .as(testMatrixEntry + ": Testing whether " + + srcPath + " can be renamed") + .isEqualTo(testMatrixEntry.canPathBeRenamed(srcPath)); } } finally { cluster.shutdown(); @@ -383,10 +378,10 @@ public void testMoveProtectedSubDirsToTrash() throws Throwable { FileSystem fs = cluster.getFileSystem(); for (Path srcPath : testMatrixEntry.getAllPathsToBeDeleted()) { assertThat( - testMatrixEntry + ": Testing whether " - + srcPath + " can be moved to trash", - moveToTrash(fs, srcPath, conf), - is(testMatrixEntry.canPathBeRenamed(srcPath))); + moveToTrash(fs, srcPath, conf)) + .as(testMatrixEntry + ": Testing whether " + + srcPath + " can be moved to trash") + .isEqualTo(moveToTrash(fs, srcPath, conf)); } } finally { cluster.shutdown(); @@ -409,17 +404,16 @@ public void testDeleteProtectSubDirs() throws Throwable { FileSystem fs = cluster.getFileSystem(); for (Path path : testMatrixEntry.getAllPathsToBeDeleted()) { final long countBefore = cluster.getNamesystem().getFilesTotal(); - assertThat( - testMatrixEntry + ": Testing whether " - + path + " can be deleted", - deletePath(fs, path), - is(testMatrixEntry.canPathBeDeleted(path))); + assertThat(deletePath(fs, path)) + .as(testMatrixEntry + ": Testing whether " + + path + " can be deleted") + .isEqualTo(testMatrixEntry.canPathBeDeleted(path)); final long countAfter = cluster.getNamesystem().getFilesTotal(); if (!testMatrixEntry.canPathBeDeleted(path)) { - assertThat( - "Either all paths should be deleted or none", - countAfter, is(countBefore)); + assertThat(countAfter) + .as("Either all paths should be deleted or none") + .isEqualTo(countBefore); } } } finally { @@ -439,8 +433,8 @@ public void testProtectedDirNormalization1() { CommonConfigurationKeys.FS_PROTECTED_DIRECTORIES, "/foo//bar"); Collection paths = FSDirectory.parseProtectedDirectories(conf); - assertThat(paths.size(), is(1)); - assertThat(paths.iterator().next(), is("/foo/bar")); + assertThat(paths.size()).isEqualTo(1); + assertThat(paths.iterator().next()).isEqualTo("/foo/bar"); } /** @@ -470,8 +464,8 @@ public void testProtectedDirIsCanonicalized() { CommonConfigurationKeys.FS_PROTECTED_DIRECTORIES, "/foo/../bar/"); Collection paths = FSDirectory.parseProtectedDirectories(conf); - assertThat(paths.size(), is(1)); - assertThat(paths.iterator().next(), is("/bar")); + assertThat(paths.size()).isEqualTo(1); + assertThat(paths.iterator().next()).isEqualTo("/bar"); } /** @@ -483,8 +477,8 @@ public void testProtectedRootDirectory() { conf.set( CommonConfigurationKeys.FS_PROTECTED_DIRECTORIES, "/"); Collection paths = FSDirectory.parseProtectedDirectories(conf); - assertThat(paths.size(), is(1)); - assertThat(paths.iterator().next(), is("/")); + assertThat(paths.size()).isEqualTo(1); + assertThat(paths.iterator().next()).isEqualTo("/"); } /** @@ -498,8 +492,9 @@ public void testBadPathsInConfig() { CommonConfigurationKeys.FS_PROTECTED_DIRECTORIES, "hdfs://foo/,/.reserved/foo"); Collection paths = FSDirectory.parseProtectedDirectories(conf); - assertThat("Unexpected directories " + paths, - paths.size(), is(0)); + assertThat(paths.size()) + .as("Unexpected directories " + paths) + .isEqualTo(0); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java index 0449def3efbbb..24c60533d30c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java @@ -33,19 +33,18 @@ import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.IOException; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.allOf; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThat; public class TestQuotaByStorageType { @@ -62,7 +61,7 @@ public class TestQuotaByStorageType { protected static final Logger LOG = LoggerFactory.getLogger(TestQuotaByStorageType.class); - @Before + @BeforeEach public void setUp() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); @@ -78,7 +77,7 @@ public void setUp() throws Exception { refreshClusterState(); } - @After + @AfterEach public void tearDown() throws Exception { if (cluster != null) { cluster.shutdown(); @@ -93,7 +92,8 @@ private void refreshClusterState() throws IOException{ fsn = cluster.getNamesystem(); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testQuotaByStorageTypeWithFileCreateOneSSD() throws Exception { testQuotaByStorageTypeWithFileCreateCase( HdfsConstants.ONESSD_STORAGE_POLICY_NAME, @@ -101,7 +101,8 @@ public void testQuotaByStorageTypeWithFileCreateOneSSD() throws Exception { (short)1); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testQuotaByStorageTypeWithFileCreateAllSSD() throws Exception { testQuotaByStorageTypeWithFileCreateCase( HdfsConstants.ALLSSD_STORAGE_POLICY_NAME, @@ -136,7 +137,8 @@ void testQuotaByStorageTypeWithFileCreateCase( assertEquals(file1Len * replication, storageTypeConsumed); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testQuotaByStorageTypeWithFileCreateAppend() throws Exception { final Path foo = new Path(dir, "foo"); Path createdFile1 = new Path(foo, "created_file1.data"); @@ -176,7 +178,8 @@ public void testQuotaByStorageTypeWithFileCreateAppend() throws Exception { assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testQuotaByStorageTypeWithFileCreateDelete() throws Exception { final Path foo = new Path(dir, "foo"); Path createdFile1 = new Path(foo, "created_file1.data"); @@ -208,8 +211,8 @@ public void testQuotaByStorageTypeWithFileCreateDelete() throws Exception { QuotaCounts counts = fnode.computeQuotaUsage( fsn.getBlockManager().getStoragePolicySuite(), true); - assertEquals(fnode.dumpTreeRecursively().toString(), 0, - counts.getTypeSpaces().get(StorageType.SSD)); + assertEquals(0, counts.getTypeSpaces().get(StorageType.SSD), + fnode.dumpTreeRecursively().toString()); ContentSummary cs = dfs.getContentSummary(foo); assertEquals(cs.getSpaceConsumed(), 0); @@ -217,7 +220,8 @@ public void testQuotaByStorageTypeWithFileCreateDelete() throws Exception { assertEquals(cs.getTypeConsumed(StorageType.DISK), 0); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testQuotaByStorageTypeWithFileCreateRename() throws Exception { final Path foo = new Path(dir, "foo"); dfs.mkdirs(foo); @@ -267,7 +271,8 @@ public void testQuotaByStorageTypeWithFileCreateRename() throws Exception { * Test if the quota can be correctly updated for create file even * QuotaByStorageTypeExceededException is thrown */ - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testQuotaByStorageTypeExceptionWithFileCreate() throws Exception { final Path foo = new Path(dir, "foo"); Path createdFile1 = new Path(foo, "created_file1.data"); @@ -313,7 +318,8 @@ public void testQuotaByStorageTypeExceptionWithFileCreate() throws Exception { } } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testQuotaByStorageTypeParentOffChildOff() throws Exception { final Path parent = new Path(dir, "parent"); final Path child = new Path(parent, "child"); @@ -339,7 +345,8 @@ public void testQuotaByStorageTypeParentOffChildOff() throws Exception { } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testQuotaByStorageTypeParentOffChildOn() throws Exception { final Path parent = new Path(dir, "parent"); final Path child = new Path(parent, "child"); @@ -364,7 +371,8 @@ public void testQuotaByStorageTypeParentOffChildOn() throws Exception { } } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testQuotaByStorageTypeParentOnChildOff() throws Exception { short replication = 1; final Path parent = new Path(dir, "parent"); @@ -405,7 +413,8 @@ public void testQuotaByStorageTypeParentOnChildOff() throws Exception { } } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testQuotaByStorageTypeParentOnChildOn() throws Exception { final Path parent = new Path(dir, "parent"); final Path child = new Path(parent, "child"); @@ -434,7 +443,8 @@ public void testQuotaByStorageTypeParentOnChildOn() throws Exception { * Both traditional space quota and the storage type quota for SSD are set and * not exceeded. */ - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testQuotaByStorageTypeWithTraditionalQuota() throws Exception { final Path foo = new Path(dir, "foo"); dfs.mkdirs(foo); @@ -467,10 +477,8 @@ public void testQuotaByStorageTypeWithTraditionalQuota() throws Exception { // Validate the computeQuotaUsage() QuotaCounts counts = fnode.computeQuotaUsage( fsn.getBlockManager().getStoragePolicySuite(), true); - assertEquals(fnode.dumpTreeRecursively().toString(), 1, - counts.getNameSpace()); - assertEquals(fnode.dumpTreeRecursively().toString(), 0, - counts.getStorageSpace()); + assertEquals(1, counts.getNameSpace(), fnode.dumpTreeRecursively().toString()); + assertEquals(0, counts.getStorageSpace(), fnode.dumpTreeRecursively().toString()); } /** @@ -478,7 +486,8 @@ public void testQuotaByStorageTypeWithTraditionalQuota() throws Exception { * exceeded. expect DSQuotaExceededException is thrown as we check traditional * space quota first and then storage type quota. */ - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testQuotaByStorageTypeAndTraditionalQuotaException1() throws Exception { testQuotaByStorageTypeOrTraditionalQuotaExceededCase( @@ -489,7 +498,8 @@ public void testQuotaByStorageTypeAndTraditionalQuotaException1() * Both traditional space quota and the storage type quota for SSD are set and * SSD quota is exceeded but traditional space quota is not exceeded. */ - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testQuotaByStorageTypeAndTraditionalQuotaException2() throws Exception { testQuotaByStorageTypeOrTraditionalQuotaExceededCase( @@ -500,7 +510,8 @@ public void testQuotaByStorageTypeAndTraditionalQuotaException2() * Both traditional space quota and the storage type quota for SSD are set and * traditional space quota is exceeded but SSD quota is not exceeded. */ - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testQuotaByStorageTypeAndTraditionalQuotaException3() throws Exception { testQuotaByStorageTypeOrTraditionalQuotaExceededCase( @@ -538,12 +549,12 @@ private void testQuotaByStorageTypeOrTraditionalQuotaExceededCase( LOG.info("Got expected exception ", t); long currentSSDConsumed = testDirNode.asDirectory().getDirectoryWithQuotaFeature() .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD); - assertEquals(Math.min(ssdQuota, storageSpaceQuota/replication), - currentSSDConsumed); + assertEquals(Math.min(ssdQuota, storageSpaceQuota / replication), currentSSDConsumed); } } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testQuotaByStorageTypeWithSnapshot() throws Exception { final Path sub1 = new Path(dir, "Sub1"); dfs.mkdirs(sub1); @@ -579,8 +590,8 @@ public void testQuotaByStorageTypeWithSnapshot() throws Exception { QuotaCounts counts1 = sub1Node.computeQuotaUsage( fsn.getBlockManager().getStoragePolicySuite(), true); - assertEquals(sub1Node.dumpTreeRecursively().toString(), file1Len, - counts1.getTypeSpaces().get(StorageType.SSD)); + assertEquals(file1Len, counts1.getTypeSpaces().get(StorageType.SSD), + sub1Node.dumpTreeRecursively().toString()); ContentSummary cs1 = dfs.getContentSummary(sub1); assertEquals(cs1.getSpaceConsumed(), file1Len * REPLICATION); @@ -597,8 +608,8 @@ public void testQuotaByStorageTypeWithSnapshot() throws Exception { QuotaCounts counts2 = sub1Node.computeQuotaUsage( fsn.getBlockManager().getStoragePolicySuite(), true); - assertEquals(sub1Node.dumpTreeRecursively().toString(), 0, - counts2.getTypeSpaces().get(StorageType.SSD)); + assertEquals(0, counts2.getTypeSpaces().get(StorageType.SSD), + sub1Node.dumpTreeRecursively().toString()); ContentSummary cs2 = dfs.getContentSummary(sub1); assertEquals(cs2.getSpaceConsumed(), 0); @@ -606,7 +617,8 @@ public void testQuotaByStorageTypeWithSnapshot() throws Exception { assertEquals(cs2.getTypeConsumed(StorageType.DISK), 0); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testQuotaByStorageTypeWithFileCreateTruncate() throws Exception { final Path foo = new Path(dir, "foo"); Path createdFile1 = new Path(foo, "created_file1.data"); @@ -748,7 +760,8 @@ public void testQuotaByStorageTypePersistenceInFsImage() throws IOException { assertEquals(file1Len, ssdConsumedAfterNNRestart); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testContentSummaryWithoutQuotaByStorageType() throws Exception { final Path foo = new Path(dir, "foo"); Path createdFile1 = new Path(foo, "created_file1.data"); @@ -773,7 +786,8 @@ public void testContentSummaryWithoutQuotaByStorageType() throws Exception { assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testContentSummaryWithoutStoragePolicy() throws Exception { final Path foo = new Path(dir, "foo"); Path createdFile1 = new Path(foo, "created_file1.data"); @@ -851,10 +865,10 @@ public void testStorageSpaceQuotaWithWarmPolicy() throws IOException { fail("should fail on QuotaByStorageTypeExceededException"); } catch (QuotaByStorageTypeExceededException e) { LOG.info("Got expected exception ", e); - assertThat(e.toString(), - is(allOf(containsString("Quota by storage type"), - containsString("DISK on path"), - containsString(testDir.toString())))); + assertThat(e.toString()) + .contains("Quota by storage type", + "DISK on path", + testDir.toString()); } } @@ -862,7 +876,8 @@ public void testStorageSpaceQuotaWithWarmPolicy() throws IOException { * Tests if changing replication factor results in copying file as quota * doesn't exceed. */ - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testStorageSpaceQuotaWithRepFactor() throws IOException { final Path testDir = new Path(dir, GenericTestUtils.getMethodName()); @@ -888,9 +903,8 @@ public void testStorageSpaceQuotaWithRepFactor() throws IOException { fail("should fail on DSQuotaExceededException"); } catch (DSQuotaExceededException e) { LOG.info("Got expected exception ", e); - assertThat(e.toString(), - is(allOf(containsString("DiskSpace quota"), - containsString(testDir.toString())))); + assertThat(e.toString()) + .contains("DiskSpace quota", testDir.toString()); } /* try creating file again with 2 replicas */ @@ -907,7 +921,8 @@ public void testStorageSpaceQuotaWithRepFactor() throws IOException { * * @throws IOException */ - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testStorageSpaceQuotaPerQuotaClear() throws IOException { final Path testDir = new Path(dir, GenericTestUtils.getMethodName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaCounts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaCounts.java index e731f6856992c..637c928ab6002 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaCounts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaCounts.java @@ -19,10 +19,10 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertSame; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertSame; /** * Test QuotaCounts. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java index e89ed8dbb6bc1..aa67138e34c3b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java @@ -31,18 +31,19 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.io.IOUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.Rule; -import org.junit.rules.Timeout; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.IOException; +import static org.junit.jupiter.api.Assertions.assertEquals; + /** * Make sure we correctly update the quota usage with the striped blocks. */ +@Timeout(300) public class TestQuotaWithStripedBlocks { private int blockSize; private ErasureCodingPolicy ecPolicy; @@ -61,10 +62,7 @@ public ErasureCodingPolicy getEcPolicy() { return StripedFileTestUtil.getDefaultECPolicy(); } - @Rule - public Timeout globalTimeout = new Timeout(300000); - - @Before + @BeforeEach public void setUp() throws IOException { blockSize = 1024 * 1024; ecPolicy = getEcPolicy(); @@ -92,7 +90,7 @@ public void setUp() throws IOException { dfs.setStoragePolicy(ecDir, HdfsConstants.HOT_STORAGE_POLICY_NAME); } - @After + @AfterEach public void tearDown() { if (cluster != null) { cluster.shutdown(); @@ -124,8 +122,8 @@ public void testUpdatingQuotaCount() throws Exception { final long diskUsed = dirNode.getDirectoryWithQuotaFeature() .getSpaceConsumed().getTypeSpaces().get(StorageType.DISK); // When we add a new block we update the quota using the full block size. - Assert.assertEquals(blockSize * groupSize, spaceUsed); - Assert.assertEquals(blockSize * groupSize, diskUsed); + assertEquals(blockSize * groupSize, spaceUsed); + assertEquals(blockSize * groupSize, diskUsed); dfs.getClient().getNamenode().complete(file.toString(), dfs.getClient().getClientName(), previous, fileNode.getId()); @@ -135,10 +133,8 @@ public void testUpdatingQuotaCount() throws Exception { final long actualDiskUsed = dirNode.getDirectoryWithQuotaFeature() .getSpaceConsumed().getTypeSpaces().get(StorageType.DISK); // In this case the file's real size is cell size * block group size. - Assert.assertEquals(cellSize * groupSize, - actualSpaceUsed); - Assert.assertEquals(cellSize * groupSize, - actualDiskUsed); + assertEquals(cellSize * groupSize, actualSpaceUsed); + assertEquals(cellSize * groupSize, actualDiskUsed); } finally { IOUtils.cleanupWithLogger(null, out); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java index 8086c6b54163e..f855b2e0a30a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java @@ -49,8 +49,8 @@ import org.apache.hadoop.hdfs.util.StripedBlockUtil; import org.apache.hadoop.hdfs.util.RwLockMode; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -59,9 +59,9 @@ import java.util.Iterator; import java.util.List; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; public class TestReconstructStripedBlocks { public static final Logger LOG = LoggerFactory.getLogger( @@ -138,8 +138,7 @@ private void doTestMissingStripedBlock(int numOfMissed, int numOfBusy) for (BlockInfo blk : blocks) { assertTrue(blk.isStriped()); assertTrue(blk.isComplete()); - assertEquals(cellSize * dataBlocks, - blk.getNumBytes()); + assertEquals(cellSize * dataBlocks, blk.getNumBytes()); final BlockInfoStriped sb = (BlockInfoStriped) blk; assertEquals(groupSize, sb.numNodes()); } @@ -173,15 +172,14 @@ private void doTestMissingStripedBlock(int numOfMissed, int numOfBusy) DataNode lastDn = cluster.getDataNodes().get(groupSize); DatanodeDescriptor last = bm.getDatanodeManager().getDatanode(lastDn.getDatanodeId()); - assertEquals("Counting the number of outstanding EC tasks", numBlocks, - last.getNumberOfBlocksToBeErasureCoded()); + assertEquals(numBlocks, last.getNumberOfBlocksToBeErasureCoded(), + "Counting the number of outstanding EC tasks"); List reconstruction = last.getErasureCodeCommand(numBlocks); for (BlockECReconstructionInfo info : reconstruction) { assertEquals(1, info.getTargetDnInfos().length); assertEquals(last, info.getTargetDnInfos()[0]); - assertEquals(info.getSourceDnInfos().length, - info.getLiveBlockIndices().length); + assertEquals(info.getSourceDnInfos().length, info.getLiveBlockIndices().length); if (groupSize - numOfMissed == dataBlocks) { // It's a QUEUE_HIGHEST_PRIORITY block, so the busy DNs will be chosen // to make sure we have NUM_DATA_BLOCKS DNs to do reconstruction @@ -190,8 +188,7 @@ private void doTestMissingStripedBlock(int numOfMissed, int numOfBusy) } else { // The block has no highest priority, so we don't use the busy DNs as // sources - assertEquals(groupSize - numOfMissed - numOfBusy, - info.getSourceDnInfos().length); + assertEquals(groupSize - numOfMissed - numOfBusy, info.getSourceDnInfos().length); } } BlockManagerTestUtil.updateState(bm); @@ -354,7 +351,7 @@ public void testCountLiveReplicas() throws Exception { Thread.sleep(1000); } } - Assert.assertTrue(reconstructed); + assertTrue(reconstructed); blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0); block = (LocatedStripedBlock) blks.getLastLocatedBlock(); @@ -363,14 +360,15 @@ public void testCountLiveReplicas() throws Exception { bitSet.set(index); } for (int i = 0; i < groupSize; i++) { - Assert.assertTrue(bitSet.get(i)); + assertTrue(bitSet.get(i)); } } finally { cluster.shutdown(); } } - @Test(timeout=120000) // 2 min timeout + @Test + @Timeout(value = 120) public void testReconstructionWork() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); @@ -570,7 +568,7 @@ public void testReconstructionWithStorageTypeNotEnough() throws Exception { bitSet.set(index); } for (int i = 0; i < groupSize; i++) { - Assert.assertTrue(bitSet.get(i)); + assertTrue(bitSet.get(i)); } } finally { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRedudantBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRedudantBlocks.java index f5d54d29fdcf0..73ba79d7d32c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRedudantBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRedudantBlocks.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.util.Arrays; @@ -39,9 +39,9 @@ import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.util.StripedBlockUtil; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; /** * Test RedudantBlocks. @@ -62,7 +62,7 @@ public class TestRedudantBlocks { private final int blockSize = stripesPerBlock * cellSize; private final int numDNs = groupSize; - @Before + @BeforeEach public void setup() throws IOException { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); @@ -78,7 +78,7 @@ public void setup() throws IOException { ecPolicy.getName()); } - @After + @AfterEach public void tearDown() { if (cluster != null) { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRedundantEditLogInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRedundantEditLogInputStream.java index 4f19bc37dd6bb..a7eb982ad607f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRedundantEditLogInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRedundantEditLogInputStream.java @@ -20,13 +20,13 @@ import java.io.IOException; import java.util.ArrayList; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java index d4f79b56a477f..a27e4d8676ecb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java @@ -28,10 +28,9 @@ import org.apache.hadoop.util.KMSUtil; import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.test.Whitebox; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -45,22 +44,21 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_HANDLER_RATIO_KEY; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.fail; /** * Test class for ReencryptionHandler. */ +@Timeout(180) public class TestReencryptionHandler { protected static final org.slf4j.Logger LOG = LoggerFactory.getLogger(TestReencryptionHandler.class); - @Rule - public Timeout globalTimeout = new Timeout(180 * 1000); - @Before + @BeforeEach public void setup() { GenericTestUtils.setLogLevel(ReencryptionHandler.LOG, Level.TRACE); } @@ -108,10 +106,9 @@ public void testThrottle() throws Exception { final StopWatch sw = new StopWatch().start(); rh.getTraverser().throttle(); sw.stop(); - assertTrue("should have throttled for at least 8 second", - sw.now(TimeUnit.MILLISECONDS) > 8000); - assertTrue("should have throttled for at most 12 second", - sw.now(TimeUnit.MILLISECONDS) < 12000); + assertTrue(sw.now(TimeUnit.MILLISECONDS) > 8000, "should have throttled for at least 8 second"); + assertTrue(sw.now(TimeUnit.MILLISECONDS) < 12000, + "should have throttled for at most 12 second"); } @Test @@ -139,8 +136,7 @@ public void testThrottleNoOp() throws Exception { StopWatch sw = new StopWatch().start(); rh.getTraverser().throttle(); sw.stop(); - assertTrue("should not have throttled", - sw.now(TimeUnit.MILLISECONDS) < 1000); + assertTrue(sw.now(TimeUnit.MILLISECONDS) < 1000, "should not have throttled"); } @Test @@ -199,7 +195,7 @@ public void run() { rh.getTraverser().throttle(); sw.stop(); LOG.info("Throttle completed, consumed {}", sw.now(TimeUnit.MILLISECONDS)); - assertTrue("should have throttled for at least 3 second", - sw.now(TimeUnit.MILLISECONDS) >= 3000); + assertTrue(sw.now(TimeUnit.MILLISECONDS) >= 3000, + "should have throttled for at least 3 second"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshBlockPlacementPolicy.java index b431db7ac652c..157de56f51c00 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshBlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshBlockPlacementPolicy.java @@ -29,9 +29,9 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.net.Node; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.io.OutputStream; @@ -41,7 +41,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * Test refresh block placement policy. @@ -67,7 +67,7 @@ public DatanodeStorageInfo[] chooseTarget(String srcPath, } } - @Before + @BeforeEach public void setup() throws IOException { config = new Configuration(); config.setClass(DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, @@ -78,7 +78,7 @@ public void setup() throws IOException { cluster.waitActive(); } - @After + @AfterEach public void cleanup() throws IOException { cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshNamenodeReplicationConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshNamenodeReplicationConfig.java index 8336a432b9a41..05396e42fa9fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshNamenodeReplicationConfig.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshNamenodeReplicationConfig.java @@ -24,12 +24,13 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.IOException; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * This class tests the replication related parameters in the namenode can @@ -39,7 +40,7 @@ public class TestRefreshNamenodeReplicationConfig { private MiniDFSCluster cluster = null; private BlockManager bm; - @Before + @BeforeEach public void setup() throws IOException { Configuration config = new Configuration(); config.setInt( @@ -60,7 +61,7 @@ public void setup() throws IOException { bm = cluster.getNameNode().getNamesystem().getBlockManager(); } - @After + @AfterEach public void teardown() throws IOException { cluster.shutdown(); } @@ -69,7 +70,8 @@ public void teardown() throws IOException { * Tests to ensure each of the block replication parameters can be passed * updated successfully. */ - @Test(timeout = 90000) + @Test + @Timeout(value = 90) public void testParamsCanBeReconfigured() throws ReconfigurationException { assertEquals(8, bm.getMaxReplicationStreams()); @@ -99,7 +101,8 @@ public void testParamsCanBeReconfigured() throws ReconfigurationException { * Tests to ensure reconfiguration fails with a negative, zero or string value * value for each parameter. */ - @Test(timeout = 90000) + @Test + @Timeout(value = 90) public void testReconfigureFailsWithInvalidValues() throws Exception { String[] keys = new String[]{ DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, @@ -114,8 +117,9 @@ public void testReconfigureFailsWithInvalidValues() throws Exception { LambdaTestUtils.intercept(ReconfigurationException.class, () -> cluster.getNameNode().reconfigurePropertyImpl(key, "-20")); assertTrue(e.getCause() instanceof IllegalArgumentException); - assertEquals(key+" = '-20' is invalid. It should be a " - +"positive, non-zero integer value.", e.getCause().getMessage()); + assertEquals( + key + " = '-20' is invalid. It should be a " + "positive, non-zero integer value.", + e.getCause().getMessage()); } // Ensure none of the values were updated from the defaults assertEquals(8, bm.getMaxReplicationStreams()); @@ -128,8 +132,8 @@ public void testReconfigureFailsWithInvalidValues() throws Exception { LambdaTestUtils.intercept(ReconfigurationException.class, () -> cluster.getNameNode().reconfigurePropertyImpl(key, "0")); assertTrue(e.getCause() instanceof IllegalArgumentException); - assertEquals(key+" = '0' is invalid. It should be a " - +"positive, non-zero integer value.", e.getCause().getMessage()); + assertEquals(key + " = '0' is invalid. It should be a " + "positive, non-zero integer value.", + e.getCause().getMessage()); } // Ensure none of the values were updated from the defaults diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java index 6ffc8c4110d3c..b363752123e44 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java @@ -18,10 +18,10 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; @@ -68,8 +68,8 @@ import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.hadoop.test.Whitebox; import org.slf4j.event.Level; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -254,7 +254,8 @@ private void saveNamespaceWithInjectedFault(Fault fault) throws Exception { * Verify that a saveNamespace command brings faulty directories * in fs.name.dir and fs.edit.dir back online. */ - @Test (timeout=30000) + @Test + @Timeout(value = 30) public void testReinsertnamedirsInSavenamespace() throws Exception { // create a configuration with the key to restore error // directories in fs.name.dir @@ -290,10 +291,10 @@ public void testReinsertnamedirsInSavenamespace() throws Exception { fsn.saveNamespace(0, 0); LOG.info("First savenamespace sucessful."); - assertTrue("Savenamespace should have marked one directory as bad." + + assertTrue(storage.getRemovedStorageDirs().size() == 1, + "Savenamespace should have marked one directory as bad." + " But found " + storage.getRemovedStorageDirs().size() + - " bad directories.", - storage.getRemovedStorageDirs().size() == 1); + " bad directories."); fs.setPermission(rootPath, permissionAll); @@ -303,11 +304,11 @@ public void testReinsertnamedirsInSavenamespace() throws Exception { LOG.info("Doing the second savenamespace."); fsn.saveNamespace(0, 0); LOG.warn("Second savenamespace sucessful."); - assertTrue("Savenamespace should have been successful in removing " + + assertTrue(storage.getRemovedStorageDirs().size() == 0, + "Savenamespace should have been successful in removing " + " bad directories from Image." + " But found " + storage.getRemovedStorageDirs().size() + - " bad directories.", - storage.getRemovedStorageDirs().size() == 0); + " bad directories."); // Now shut down and restart the namesystem LOG.info("Shutting down fsimage."); @@ -339,27 +340,32 @@ public void testReinsertnamedirsInSavenamespace() throws Exception { } } - @Test (timeout=30000) + @Test + @Timeout(value = 30) public void testRTEWhileSavingSecondImage() throws Exception { saveNamespaceWithInjectedFault(Fault.SAVE_SECOND_FSIMAGE_RTE); } - @Test (timeout=30000) + @Test + @Timeout(value = 30) public void testIOEWhileSavingSecondImage() throws Exception { saveNamespaceWithInjectedFault(Fault.SAVE_SECOND_FSIMAGE_IOE); } - @Test (timeout=30000) + @Test + @Timeout(value = 30) public void testCrashInAllImageDirs() throws Exception { saveNamespaceWithInjectedFault(Fault.SAVE_ALL_FSIMAGES); } - @Test (timeout=30000) + @Test + @Timeout(value = 30) public void testCrashWhenWritingVersionFiles() throws Exception { saveNamespaceWithInjectedFault(Fault.WRITE_STORAGE_ALL); } - @Test (timeout=30000) + @Test + @Timeout(value = 30) public void testCrashWhenWritingVersionFileInOneDir() throws Exception { saveNamespaceWithInjectedFault(Fault.WRITE_STORAGE_ONE); } @@ -371,7 +377,8 @@ public void testCrashWhenWritingVersionFileInOneDir() throws Exception { * failed checkpoint since it only affected ".ckpt" files, not * valid image files */ - @Test (timeout=30000) + @Test + @Timeout(value = 30) public void testFailedSaveNamespace() throws Exception { doTestFailedSaveNamespace(false); } @@ -381,7 +388,8 @@ public void testFailedSaveNamespace() throws Exception { * the operator restores the directories and calls it again. * This should leave the NN in a clean state for next start. */ - @Test (timeout=30000) + @Test + @Timeout(value = 30) public void testFailedSaveNamespaceWithRecovery() throws Exception { doTestFailedSaveNamespace(true); } @@ -455,7 +463,8 @@ public void doTestFailedSaveNamespace(boolean restoreStorageAfterFailure) } } - @Test (timeout=30000) + @Test + @Timeout(value = 30) public void testSaveWhileEditsRolled() throws Exception { Configuration conf = getConf(); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); @@ -491,7 +500,8 @@ public void testSaveWhileEditsRolled() throws Exception { } } - @Test (timeout=30000) + @Test + @Timeout(value = 30) public void testTxIdPersistence() throws Exception { Configuration conf = getConf(); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); @@ -529,7 +539,8 @@ public void testTxIdPersistence() throws Exception { } } - @Test(timeout=20000) + @Test + @Timeout(value = 20) public void testCancelSaveNamespace() throws Exception { Configuration conf = getConf(); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); @@ -617,7 +628,8 @@ public Void call() throws Exception { * open lease and destination directory exist. * This test is a regression for HDFS-2827 */ - @Test (timeout=30000) + @Test + @Timeout(value = 30) public void testSaveNamespaceWithRenamedLease() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration()) .numDataNodes(1).build(); @@ -638,7 +650,8 @@ public void testSaveNamespaceWithRenamedLease() throws Exception { } } - @Test (timeout=30000) + @Test + @Timeout(value = 30) public void testSaveNamespaceWithDanglingLease() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration()) .numDataNodes(1).build(); @@ -675,8 +688,8 @@ public void testSkipSnapshotSection() throws Exception { .saveSelf2Snapshot(-1, file, null, false); // make sure it has a diff - assertTrue("Snapshot fileDiff is missing.", - file.getFileWithSnapshotFeature().getDiffs() != null); + assertTrue(file.getFileWithSnapshotFeature().getDiffs() != null, + "Snapshot fileDiff is missing."); // saveNamespace fs.setSafeMode(SafeModeAction.ENTER); @@ -690,8 +703,8 @@ public void testSkipSnapshotSection() throws Exception { // there should be no snapshot feature for the inode, when there is // no snapshot. - assertTrue("There should be no snapshot feature for this INode.", - file.getFileWithSnapshotFeature() == null); + assertTrue(file.getFileWithSnapshotFeature() == null, + "There should be no snapshot feature for this INode."); } finally { cluster.shutdown(); } @@ -718,7 +731,7 @@ public void testSaveNamespaceBeforeShutdown() throws Exception { // make sure no new checkpoint was done long after = fsimage.getStorage().getMostRecentCheckpointTxId(); - Assert.assertEquals(before, after); + assertEquals(before, after); Thread.sleep(1000); // do another checkpoint. this time set the timewindow to 1s @@ -727,7 +740,7 @@ public void testSaveNamespaceBeforeShutdown() throws Exception { fs.setSafeMode(SafeModeAction.LEAVE); after = fsimage.getStorage().getMostRecentCheckpointTxId(); - Assert.assertTrue(after > before); + assertTrue(after > before); fs.mkdirs(new Path("/foo/bar/baz")); // 3 new tx @@ -735,17 +748,18 @@ public void testSaveNamespaceBeforeShutdown() throws Exception { cluster.getNameNodeRpc().saveNamespace(3600, 5); // 3 + end/start segment long after2 = fsimage.getStorage().getMostRecentCheckpointTxId(); // no checkpoint should be made - Assert.assertEquals(after, after2); + assertEquals(after, after2); cluster.getNameNodeRpc().saveNamespace(3600, 3); after2 = fsimage.getStorage().getMostRecentCheckpointTxId(); // a new checkpoint should be done - Assert.assertTrue(after2 > after); + assertTrue(after2 > after); } finally { cluster.shutdown(); } } - @Test(timeout=30000) + @Test + @Timeout(value = 30) public void testTxFaultTolerance() throws Exception { String baseDir = MiniDFSCluster.getBaseDirectory(); List nameDirs = new ArrayList<>(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java index 345a21c8d59ad..2c86fcf952b54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java @@ -24,8 +24,8 @@ import java.util.List; import java.util.Map; -import org.junit.Test; -import org.junit.Before; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.BeforeEach; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -35,9 +35,10 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.junit.Assert; import org.apache.hadoop.test.GenericTestUtils; +import static org.junit.jupiter.api.Assertions.fail; + /** * Regression test for HDFS-3597, SecondaryNameNode upgrade -- when a 2NN * starts up with an existing directory structure with an old VERSION file, it @@ -45,7 +46,7 @@ */ public class TestSecondaryNameNodeUpgrade { - @Before + @BeforeEach public void cleanupCluster() throws IOException { File hdfsDir = new File(MiniDFSCluster.getBaseDirectory()).getCanonicalFile(); System.out.println("cleanupCluster deleting " + hdfsDir); @@ -114,7 +115,7 @@ public void testUpgradePreFedSucceeds() throws IOException { public void testChangeNsIDFails() throws IOException { try { doIt(ImmutableMap.of("namespaceID", "2")); - Assert.fail("Should throw InconsistentFSStateException"); + fail("Should throw InconsistentFSStateException"); } catch(IOException e) { GenericTestUtils.assertExceptionContains("Inconsistent checkpoint fields", e); System.out.println("Correctly failed with inconsistent namespaceID: " + e); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java index 1d3187dffe0b1..f2ba557936f61 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java @@ -18,14 +18,14 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; import javax.management.*; import java.io.IOException; @@ -37,7 +37,7 @@ public class TestSecondaryWebUi { private static SecondaryNameNode snn; private static final Configuration conf = new Configuration(); - @BeforeClass + @BeforeAll public static void setUpCluster() throws IOException { conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0"); @@ -49,7 +49,7 @@ public static void setUpCluster() throws IOException { snn = new SecondaryNameNode(conf); } - @AfterClass + @AfterAll public static void shutDownCluster() { if (cluster != null) { cluster.shutdown(); @@ -70,10 +70,9 @@ public void testSecondaryWebUi() String[] checkpointDir = (String[]) mbs.getAttribute(mxbeanName, "CheckpointDirectories"); - Assert.assertArrayEquals(checkpointDir, snn.getCheckpointDirectories()); + assertArrayEquals(checkpointDir, snn.getCheckpointDirectories()); String[] checkpointEditlogDir = (String[]) mbs.getAttribute(mxbeanName, "CheckpointEditlogDirectories"); - Assert.assertArrayEquals(checkpointEditlogDir, - snn.getCheckpointEditlogDirectories()); + assertArrayEquals(checkpointEditlogDir, snn.getCheckpointEditlogDirectories()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java index 60601a6ab9133..3d1584e9a0715 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java @@ -17,9 +17,9 @@ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.security.PrivilegedExceptionAction; @@ -36,8 +36,8 @@ import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import static org.apache.hadoop.security.SecurityUtilTestHelper.isExternalKdcRunning; import org.junit.Assume; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; /** * This test brings up a MiniDFSCluster with 1 NameNode and 0 @@ -56,7 +56,7 @@ public class TestSecureNameNodeWithExternalKdc { final static private int NUM_OF_DATANODES = 0; - @Before + @BeforeEach public void testExternalKdcRunning() { // Tests are skipped if external KDC is not running. Assume.assumeTrue(isExternalKdcRunning()); @@ -71,10 +71,9 @@ public void testSecureNameNode() throws IOException, InterruptedException { String nnSpnegoPrincipal = System.getProperty("dfs.namenode.kerberos.internal.spnego.principal"); String nnKeyTab = System.getProperty("dfs.namenode.keytab.file"); - assertNotNull("NameNode principal was not specified", nnPrincipal); - assertNotNull("NameNode SPNEGO principal was not specified", - nnSpnegoPrincipal); - assertNotNull("NameNode keytab was not specified", nnKeyTab); + assertNotNull(nnPrincipal, "NameNode principal was not specified"); + assertNotNull(nnSpnegoPrincipal, "NameNode SPNEGO principal was not specified"); + assertNotNull(nnKeyTab, "NameNode keytab was not specified"); Configuration conf = new HdfsConfiguration(); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, @@ -96,8 +95,8 @@ public void testSecureNameNode() throws IOException, InterruptedException { // The user specified should not be a superuser String userPrincipal = System.getProperty("user.principal"); String userKeyTab = System.getProperty("user.keytab"); - assertNotNull("User principal was not specified", userPrincipal); - assertNotNull("User keytab was not specified", userKeyTab); + assertNotNull(userPrincipal, "User principal was not specified"); + assertNotNull(userKeyTab, "User keytab was not specified"); UserGroupInformation ugi = UserGroupInformation .loginUserFromKeytabAndReturnUGI(userPrincipal, userKeyTab); @@ -117,8 +116,7 @@ public FileSystem run() throws Exception { Path p = new Path("/tmp/alpha"); fs.mkdirs(p); assertNotNull(fs.listStatus(p)); - assertEquals(AuthenticationMethod.KERBEROS, - ugi.getAuthenticationMethod()); + assertEquals(AuthenticationMethod.KERBEROS, ugi.getAuthenticationMethod()); } finally { if (cluster != null) { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java index e52c9d241b655..368a9a8460010 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; -import static org.junit.Assert.assertEquals; import java.io.File; import java.io.IOException; @@ -39,11 +38,14 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.*; /** @@ -165,7 +167,7 @@ public void testEditLog() throws IOException { FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0); long numEdits = loader.loadFSEdits( new EditLogFileInputStream(editFile), 1); - assertEquals("Verification for " + editFile, expectedTransactions, numEdits); + assertEquals(expectedTransactions, numEdits, "Verification for " + editFile); } } finally { if(fileSys != null) fileSys.close(); @@ -173,7 +175,8 @@ public void testEditLog() throws IOException { } } - @Test(timeout=10000) + @Test + @Timeout(value = 10) public void testEditsForCancelOnTokenExpire() throws IOException, InterruptedException { long renewInterval = 2000; @@ -197,8 +200,8 @@ public void testEditsForCancelOnTokenExpire() throws IOException, @Override public Void answer(InvocationOnMock invocation) throws Throwable { // fsn claims read lock if either read or write locked. - Assert.assertTrue(fsnRef.get().hasReadLock(RwLockMode.FS)); - Assert.assertFalse(fsnRef.get().hasWriteLock(RwLockMode.FS)); + assertTrue(fsnRef.get().hasReadLock(RwLockMode.FS)); + assertFalse(fsnRef.get().hasWriteLock(RwLockMode.FS)); return null; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java index 98dc6ce6c2cac..92acbf74a0362 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java @@ -17,9 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.FileNotFoundException; import java.util.ArrayList; @@ -34,14 +35,15 @@ import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; import static org.apache.hadoop.test.MockitoUtil.verifyZeroInteractions; +import static org.junit.jupiter.api.Assertions.fail; /** Test snapshot related operations. */ public class TestSnapshotPathINodes { @@ -59,7 +61,7 @@ public class TestSnapshotPathINodes { static private DistributedFileSystem hdfs; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { Configuration conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf) @@ -73,13 +75,13 @@ public static void setUp() throws Exception { hdfs = cluster.getFileSystem(); } - @Before + @BeforeEach public void reset() throws Exception { DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed); DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, seed); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { if (cluster != null) { cluster.shutdown(); @@ -87,26 +89,27 @@ public static void tearDown() throws Exception { } /** Test allow-snapshot operation. */ - @Test (timeout=15000) + @Test + @Timeout(value = 15) public void testAllowSnapshot() throws Exception { final String pathStr = sub1.toString(); final INode before = fsdir.getINode(pathStr); // Before a directory is snapshottable - Assert.assertFalse(before.asDirectory().isSnapshottable()); + assertFalse(before.asDirectory().isSnapshottable()); // After a directory is snapshottable final Path path = new Path(pathStr); hdfs.allowSnapshot(path); { final INode after = fsdir.getINode(pathStr); - Assert.assertTrue(after.asDirectory().isSnapshottable()); + assertTrue(after.asDirectory().isSnapshottable()); } hdfs.disallowSnapshot(path); { final INode after = fsdir.getINode(pathStr); - Assert.assertFalse(after.asDirectory().isSnapshottable()); + assertFalse(after.asDirectory().isSnapshottable()); } } @@ -125,8 +128,7 @@ static void assertSnapshot(INodesInPath inodesInPath, boolean isSnapshot, assertEquals(Snapshot.getSnapshotId(isSnapshot ? snapshot : null), inodesInPath.getPathSnapshotId()); if (!isSnapshot) { - assertEquals(Snapshot.getSnapshotId(snapshot), - inodesInPath.getLatestSnapshotId()); + assertEquals(Snapshot.getSnapshotId(snapshot), inodesInPath.getLatestSnapshotId()); } if (isSnapshot && index >= 0) { assertEquals(Snapshot.Root.class, inodesInPath.getINode(index).getClass()); @@ -141,7 +143,8 @@ static void assertINodeFile(INode inode, Path path) { /** * for normal (non-snapshot) file. */ - @Test (timeout=15000) + @Test + @Timeout(value = 15) public void testNonSnapshotPathINodes() throws Exception { // Get the inodes by resolving the path of a normal file byte[][] components = INode.getPathComponents(file1.toString()); @@ -158,22 +161,18 @@ public void testNonSnapshotPathINodes() throws Exception { } // The last INode should be associated with file1 - assertTrue("file1=" + file1 + ", nodesInPath=" + nodesInPath, - nodesInPath.getINode(components.length - 1) != null); - assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(), - file1.toString()); - assertEquals(nodesInPath.getINode(components.length - 2).getFullPathName(), - sub1.toString()); - assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(), - dir.toString()); + assertTrue(nodesInPath.getINode(components.length - 1) != null, + "file1=" + file1 + ", nodesInPath=" + nodesInPath); + assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(), file1.toString()); + assertEquals(nodesInPath.getINode(components.length - 2).getFullPathName(), sub1.toString()); + assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(), dir.toString()); assertEquals(Path.SEPARATOR, nodesInPath.getPath(0)); assertEquals(dir.toString(), nodesInPath.getPath(1)); assertEquals(sub1.toString(), nodesInPath.getPath(2)); assertEquals(file1.toString(), nodesInPath.getPath(3)); - assertEquals(file1.getParent().toString(), - nodesInPath.getParentINodesInPath().getPath()); + assertEquals(file1.getParent().toString(), nodesInPath.getParentINodesInPath().getPath()); nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false); assertEquals(nodesInPath.length(), components.length); @@ -184,7 +183,8 @@ public void testNonSnapshotPathINodes() throws Exception { /** * for snapshot file. */ - @Test (timeout=15000) + @Test + @Timeout(value = 15) public void testSnapshotPathINodes() throws Exception { // Create a snapshot for the dir, and check the inodes for the path // pointing to a snapshot file @@ -202,8 +202,7 @@ public void testSnapshotPathINodes() throws Exception { // SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s1, file1} final Snapshot snapshot = getSnapshot(nodesInPath, "s1", 3); assertSnapshot(nodesInPath, true, snapshot, 3); - assertEquals(".snapshot/s1", - DFSUtil.bytes2String(nodesInPath.getPathComponent(3))); + assertEquals(".snapshot/s1", DFSUtil.bytes2String(nodesInPath.getPathComponent(3))); assertTrue(nodesInPath.getINode(3) instanceof Snapshot.Root); assertEquals("s1", nodesInPath.getINode(3).getLocalName()); @@ -226,12 +225,10 @@ public void testSnapshotPathINodes() throws Exception { // The number of INodes returned should still be components.length // since we put a null in the inode array for ".snapshot" assertEquals(nodesInPath.length(), components.length); - assertEquals(".snapshot", - DFSUtil.bytes2String(nodesInPath.getLastLocalName())); + assertEquals(".snapshot", DFSUtil.bytes2String(nodesInPath.getLastLocalName())); assertNull(nodesInPath.getLastINode()); // ensure parent inodes can strip the .snapshot - assertEquals(sub1.toString(), - nodesInPath.getParentINodesInPath().getPath()); + assertEquals(sub1.toString(), nodesInPath.getParentINodesInPath().getPath()); // No SnapshotRoot dir is included in the resolved inodes assertSnapshot(nodesInPath, true, snapshot, -1); @@ -246,7 +243,7 @@ public void testSnapshotPathINodes() throws Exception { invalidPath = new Path(invalidPath, invalidPathComponent[i]); try { hdfs.getFileStatus(invalidPath); - Assert.fail(); + fail(); } catch(FileNotFoundException fnfe) { System.out.println("The exception is expected: " + fnfe); } @@ -258,7 +255,8 @@ public void testSnapshotPathINodes() throws Exception { /** * for snapshot file after deleting the original file. */ - @Test (timeout=15000) + @Test + @Timeout(value = 15) public void testSnapshotPathINodesAfterDeletion() throws Exception { // Create a snapshot for the dir, and check the inodes for the path // pointing to a snapshot file @@ -303,10 +301,8 @@ public void testSnapshotPathINodesAfterDeletion() throws Exception { // The last INode should be null, and the one before should be associated // with sub1 assertNull(nodesInPath.getINode(components.length - 1)); - assertEquals(nodesInPath.getINode(components.length - 2).getFullPathName(), - sub1.toString()); - assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(), - dir.toString()); + assertEquals(nodesInPath.getINode(components.length - 2).getFullPathName(), sub1.toString()); + assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(), dir.toString()); hdfs.deleteSnapshot(sub1, "s2"); hdfs.disallowSnapshot(sub1); } @@ -323,7 +319,8 @@ private int getNumNonNull(INodesInPath iip) { /** * for snapshot file while adding a new file after snapshot. */ - @Test (timeout=15000) + @Test + @Timeout(value = 15) public void testSnapshotPathINodesWithAddedFile() throws Exception { // Create a snapshot for the dir, and check the inodes for the path // pointing to a snapshot file @@ -367,12 +364,9 @@ public void testSnapshotPathINodesWithAddedFile() throws Exception { assertSnapshot(nodesInPath, false, s4, -1); // The last INode should be associated with file3 - assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(), - file3.toString()); - assertEquals(nodesInPath.getINode(components.length - 2).getFullPathName(), - sub1.toString()); - assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(), - dir.toString()); + assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(), file3.toString()); + assertEquals(nodesInPath.getINode(components.length - 2).getFullPathName(), sub1.toString()); + assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(), dir.toString()); hdfs.deleteSnapshot(sub1, "s4"); hdfs.disallowSnapshot(sub1); } @@ -380,7 +374,8 @@ public void testSnapshotPathINodesWithAddedFile() throws Exception { /** * for snapshot file while modifying file after snapshot. */ - @Test (timeout=15000) + @Test + @Timeout(value = 15) public void testSnapshotPathINodesAfterModification() throws Exception { // First check the INode for /TestSnapshot/sub1/file1 byte[][] components = INode.getPathComponents(file1.toString()); @@ -390,8 +385,7 @@ public void testSnapshotPathINodesAfterModification() throws Exception { assertEquals(nodesInPath.length(), components.length); // The last INode should be associated with file1 - assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(), - file1.toString()); + assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(), file1.toString()); // record the modification time of the inode final long modTime = nodesInPath.getINode(nodesInPath.length() - 1) .getModificationTime(); @@ -420,8 +414,7 @@ public void testSnapshotPathINodesAfterModification() throws Exception { assertTrue(snapshotFileNode.asFile().isWithSnapshot()); // The modification time of the snapshot INode should be the same with the // original INode before modification - assertEquals(modTime, - snapshotFileNode.getModificationTime(ssNodesInPath.getPathSnapshotId())); + assertEquals(modTime, snapshotFileNode.getModificationTime(ssNodesInPath.getPathSnapshotId())); // Check the INode for /TestSnapshot/sub1/file1 again components = INode.getPathComponents(file1.toString()); @@ -432,10 +425,9 @@ public void testSnapshotPathINodesAfterModification() throws Exception { assertEquals(newNodesInPath.length(), components.length); // The last INode should be associated with file1 final int last = components.length - 1; - assertEquals(newNodesInPath.getINode(last).getFullPathName(), - file1.toString()); + assertEquals(newNodesInPath.getINode(last).getFullPathName(), file1.toString()); // The modification time of the INode for file3 should have been changed - Assert.assertFalse(modTime == newNodesInPath.getINode(last).getModificationTime()); + assertFalse(modTime == newNodesInPath.getINode(last).getModificationTime()); hdfs.deleteSnapshot(sub1, "s3"); hdfs.disallowSnapshot(sub1); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 67c8f3c18f118..af0a94c331e56 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -19,14 +19,11 @@ import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption.IMPORT; import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; -import static org.hamcrest.CoreMatchers.allOf; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThat; import java.io.File; import java.io.IOException; @@ -75,9 +72,10 @@ import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.StringUtils; import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import javax.management.MBeanServer; import javax.management.ObjectName; @@ -98,7 +96,7 @@ public class TestStartup { static final int fileSize = 8192; private long editsLength=0, fsimageLength=0; - @Before + @BeforeEach public void setUp() throws Exception { ExitUtil.disableSystemExit(); ExitUtil.resetFirstExitException(); @@ -127,7 +125,7 @@ public void setUp() throws Exception { /** * clean up */ - @After + @AfterEach public void tearDown() throws Exception { if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory in tearDown '" + hdfsDir + "'"); @@ -287,12 +285,12 @@ private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expecte img.getStorage(); File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0); LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize); - assertEquals(expectedImgSize, imf.length()); + assertEquals(expectedImgSize, imf.length()); } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) { img.getStorage(); File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0); LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length() + "; expected = " + expectedEditsSize); - assertEquals(expectedEditsSize, edf.length()); + assertEquals(expectedEditsSize, edf.length()); } else { fail("Image/Edits directories are not different"); } @@ -419,7 +417,8 @@ public void testSNNStartup() throws IOException{ } } - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testSNNStartupWithRuntimeException() throws Exception { String[] argv = new String[] { "-checkpoint" }; try { @@ -428,7 +427,7 @@ public void testSNNStartupWithRuntimeException() throws Exception { } catch (ExitException ee) { GenericTestUtils.assertExceptionContains( ExitUtil.EXIT_EXCEPTION_MESSAGE, ee); - assertTrue("Didn't terminate properly ", ExitUtil.terminateCalled()); + assertTrue(ExitUtil.terminateCalled(), "Didn't terminate properly "); } } @@ -553,7 +552,8 @@ private void testImageChecksum(boolean compress) throws Exception { } } - @Test(timeout=30000) + @Test + @Timeout(value = 30) public void testCorruptImageFallback() throws IOException { // Create two checkpoints createCheckPoint(2); @@ -572,7 +572,8 @@ public void testCorruptImageFallback() throws IOException { } } - @Test(timeout=30000) + @Test + @Timeout(value = 30) public void testCorruptImageFallbackLostECPolicy() throws IOException { final ErasureCodingPolicy defaultPolicy = StripedFileTestUtil .getDefaultECPolicy(); @@ -657,8 +658,8 @@ public void testNNRestart() throws IOException, InterruptedException { Thread.sleep(HEARTBEAT_INTERVAL * 1000); info = nn.getDatanodeReport(DatanodeReportType.LIVE); } - assertEquals("Number of live nodes should be "+numDatanodes, numDatanodes, - info.length); + assertEquals(numDatanodes, info.length, + "Number of live nodes should be " + numDatanodes); } catch (IOException e) { fail(StringUtils.stringifyException(e)); @@ -671,7 +672,8 @@ public void testNNRestart() throws IOException, InterruptedException { } } - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testXattrConfiguration() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; @@ -709,7 +711,8 @@ public void testXattrConfiguration() throws Exception { } } - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testNNFailToStartOnReadOnlyNNDir() throws Exception { /* set NN dir */ final String nnDirStr = Paths.get( @@ -727,10 +730,8 @@ public void testNNFailToStartOnReadOnlyNNDir() throws Exception { final Collection nnDirs = FSNamesystem.getNamespaceDirs(config); assertNotNull(nnDirs); assertTrue(nnDirs.iterator().hasNext()); - assertEquals( - "NN dir should be created after NN startup.", - new File(nnDirStr), - new File(nnDirs.iterator().next().getPath())); + assertEquals(new File(nnDirStr), new File(nnDirs.iterator().next().getPath()), + "NN dir should be created after NN startup."); final File nnDir = new File(nnDirStr); assertTrue(nnDir.exists()); assertTrue(nnDir.isDirectory()); @@ -738,21 +739,19 @@ public void testNNFailToStartOnReadOnlyNNDir() throws Exception { try { /* set read only */ assertTrue( - "Setting NN dir read only should succeed.", - FileUtil.setWritable(nnDir, false)); + + FileUtil.setWritable(nnDir, false), "Setting NN dir read only should succeed."); cluster.restartNameNodes(); fail("Restarting NN should fail on read only NN dir."); } catch (InconsistentFSStateException e) { - assertThat(e.toString(), is(allOf( - containsString("InconsistentFSStateException"), - containsString(nnDirStr), - containsString("in an inconsistent state"), - containsString( - "storage directory does not exist or is not accessible.")))); + assertThat(e.toString()) + .contains("InconsistentFSStateException", + nnDirStr, + "in an inconsistent state", + "storage directory does not exist or is not accessible."); } finally { /* set back to writable in order to clean it */ - assertTrue("Setting NN dir should succeed.", - FileUtil.setWritable(nnDir, true)); + assertTrue(FileUtil.setWritable(nnDir, true), "Setting NN dir should succeed."); } } } @@ -766,7 +765,8 @@ public void testNNFailToStartOnReadOnlyNNDir() throws Exception { * 4. NN will mark DatanodeStorageInfo#blockContentsStale to false. * @throws Exception */ - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testStorageBlockContentsStaleAfterNNRestart() throws Exception { MiniDFSCluster dfsCluster = null; try { @@ -791,7 +791,8 @@ public void testStorageBlockContentsStaleAfterNNRestart() throws Exception { return; } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testDirectoryPermissions() throws Exception { Configuration conf = new Configuration(); try (MiniDFSCluster dfsCluster @@ -809,8 +810,7 @@ public void testDirectoryPermissions() throws Exception { DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT)); for (URI uri : nameDirUris) { FileStatus fileStatus = fs.getFileLinkStatus(new Path(uri)); - assertEquals(permission.toOctal(), - fileStatus.getPermission().toOctal()); + assertEquals(permission.toOctal(), fileStatus.getPermission().toOctal()); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java index ef51acc044936..f986c0839cf37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.net.URI; import java.util.Arrays; @@ -29,18 +29,17 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; /** * This class tests various upgrade cases from earlier versions to current * version with and without clusterid. */ -@RunWith(value = Parameterized.class) +@MethodSource("startOption") +@ParameterizedClass public class TestStartupOptionUpgrade { private Configuration conf; @@ -48,19 +47,17 @@ public class TestStartupOptionUpgrade { private int layoutVersion; NNStorage storage; - @Parameters public static Collection startOption() { Object[][] params = new Object[][] { { StartupOption.UPGRADE }, { StartupOption.UPGRADEONLY } }; return Arrays.asList(params); } - public TestStartupOptionUpgrade(StartupOption startOption) { - super(); + public TestStartupOptionUpgrade(StartupOption startOption) throws Exception { this.startOpt = startOption; + setUp(); } - - @Before + public void setUp() throws Exception { conf = new HdfsConfiguration(); startOpt.setClusterId(null); @@ -69,7 +66,7 @@ public void setUp() throws Exception { Collections.emptyList()); } - @After + @AfterEach public void tearDown() throws Exception { conf = null; startOpt = null; @@ -86,8 +83,8 @@ public void tearDown() throws Exception { public void testStartupOptUpgradeFrom204() throws Exception { layoutVersion = Feature.RESERVED_REL20_204.getInfo().getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); - assertTrue("Clusterid should start with CID", storage.getClusterID() - .startsWith("CID")); + assertTrue(storage.getClusterID().startsWith("CID"), + "Clusterid should start with CID"); } /** @@ -102,8 +99,8 @@ public void testStartupOptUpgradeFrom22WithCID() throws Exception { startOpt.setClusterId("cid"); layoutVersion = Feature.RESERVED_REL22.getInfo().getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); - assertEquals("Clusterid should match with the given clusterid", - "cid", storage.getClusterID()); + assertEquals("cid", storage.getClusterID(), + "Clusterid should match with the given clusterid"); } /** @@ -120,8 +117,8 @@ public void testStartupOptUpgradeFromFederation() storage.setClusterID("currentcid"); layoutVersion = Feature.FEDERATION.getInfo().getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); - assertEquals("Clusterid should match with the existing one", - "currentcid", storage.getClusterID()); + assertEquals("currentcid", storage.getClusterID(), + "Clusterid should match with the existing one"); } /** @@ -138,8 +135,8 @@ public void testStartupOptUpgradeFromFederationWithWrongCID() storage.setClusterID("currentcid"); layoutVersion = Feature.FEDERATION.getInfo().getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); - assertEquals("Clusterid should match with the existing one", - "currentcid", storage.getClusterID()); + assertEquals("currentcid", storage.getClusterID(), + "Clusterid should match with the existing one"); } /** @@ -156,7 +153,7 @@ public void testStartupOptUpgradeFromFederationWithCID() storage.setClusterID("currentcid"); layoutVersion = Feature.FEDERATION.getInfo().getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); - assertEquals("Clusterid should match with the existing one", - "currentcid", storage.getClusterID()); + assertEquals("currentcid", storage.getClusterID(), + "Clusterid should match with the existing one"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java index d28f0a4c2920e..999cb9862f5d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java @@ -18,7 +18,8 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgressTestHelper.*; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.mockito.Mockito.*; import java.io.ByteArrayOutputStream; @@ -35,8 +36,8 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.eclipse.jetty.util.ajax.JSON; public class TestStartupProgressServlet { @@ -47,7 +48,7 @@ public class TestStartupProgressServlet { private StartupProgress startupProgress; private StartupProgressServlet servlet; - @Before + @BeforeEach public void setUp() throws Exception { startupProgress = new StartupProgress(); ServletContext context = mock(ServletContext.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java index cf04db0d401af..2b344992de620 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java @@ -29,8 +29,10 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import static org.junit.jupiter.api.Assertions.fail; /** * Tests that StoragePolicySatisfier is able to work with HA enabled. @@ -84,7 +86,8 @@ private void startCluster(final Configuration conf, * Tests to verify that SPS should run/stop automatically when NN state * changes between Standby and Active. */ - @Test(timeout = 90000) + @Test + @Timeout(value = 90) public void testWhenNNHAStateChanges() throws IOException { try { createCluster(); @@ -95,7 +98,7 @@ public void testWhenNNHAStateChanges() throws IOException { cluster.getNameNode(0).reconfigurePropertyImpl( DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, StoragePolicySatisfierMode.NONE.toString()); - Assert.fail("It's not allowed to enable or disable" + fail("It's not allowed to enable or disable" + " StoragePolicySatisfier on Standby NameNode"); } catch (ReconfigurationException e) { GenericTestUtils.assertExceptionContains("Could not change property " diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySummary.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySummary.java index 60c9318bac179..1d5e2e5e6ebd9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySummary.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySummary.java @@ -25,8 +25,9 @@ import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.namenode.StoragePolicySummary.StorageTypeAllocation; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; public class TestStoragePolicySummary { @@ -51,13 +52,13 @@ public void testMultipleHots() { sts.add(new StorageType[]{StorageType.DISK, StorageType.DISK,StorageType.DISK,StorageType.DISK},hot); Map actualOutput = convertToStringMap(sts); - Assert.assertEquals(4,actualOutput.size()); + assertEquals(4, actualOutput.size()); Map expectedOutput = new HashMap<>(); expectedOutput.put("HOT|DISK:1(HOT)", 1l); expectedOutput.put("HOT|DISK:2(HOT)", 1l); expectedOutput.put("HOT|DISK:3(HOT)", 1l); expectedOutput.put("HOT|DISK:4(HOT)", 1l); - Assert.assertEquals(expectedOutput,actualOutput); + assertEquals(expectedOutput, actualOutput); } @Test @@ -75,13 +76,13 @@ public void testMultipleHotsWithDifferentCounts() { sts.add(new StorageType[]{StorageType.DISK, StorageType.DISK,StorageType.DISK,StorageType.DISK},hot); Map actualOutput = convertToStringMap(sts); - Assert.assertEquals(4,actualOutput.size()); + assertEquals(4, actualOutput.size()); Map expectedOutput = new HashMap<>(); expectedOutput.put("HOT|DISK:1(HOT)", 1l); expectedOutput.put("HOT|DISK:2(HOT)", 2l); expectedOutput.put("HOT|DISK:3(HOT)", 2l); expectedOutput.put("HOT|DISK:4(HOT)", 1l); - Assert.assertEquals(expectedOutput,actualOutput); + assertEquals(expectedOutput, actualOutput); } @Test @@ -110,13 +111,13 @@ public void testMultipleWarmsInDifferentOrder() { sts.add(new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,StorageType.DISK,StorageType.DISK},warm); Map actualOutput = convertToStringMap(sts); - Assert.assertEquals(4,actualOutput.size()); + assertEquals(4, actualOutput.size()); Map expectedOutput = new HashMap<>(); expectedOutput.put("WARM|DISK:1,ARCHIVE:1(WARM)", 2l); expectedOutput.put("WARM|DISK:2,ARCHIVE:1", 3l); expectedOutput.put("WARM|DISK:1,ARCHIVE:2(WARM)", 3l); expectedOutput.put("WARM|DISK:2,ARCHIVE:2", 1l); - Assert.assertEquals(expectedOutput,actualOutput); + assertEquals(expectedOutput, actualOutput); } @Test @@ -150,7 +151,7 @@ public void testDifferentSpecifiedPolicies() { sts.add(new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,StorageType.ARCHIVE},cold); Map actualOutput = convertToStringMap(sts); - Assert.assertEquals(9,actualOutput.size()); + assertEquals(9, actualOutput.size()); Map expectedOutput = new HashMap<>(); expectedOutput.put("HOT|DISK:3(HOT)", 2l); expectedOutput.put("COLD|DISK:1,ARCHIVE:2(WARM)", 2l); @@ -161,7 +162,7 @@ public void testDifferentSpecifiedPolicies() { expectedOutput.put("WARM|DISK:1,ARCHIVE:2(WARM)", 1l); expectedOutput.put("COLD|ARCHIVE:3(COLD)", 1l); expectedOutput.put("HOT|DISK:1,ARCHIVE:2(WARM)", 1l); - Assert.assertEquals(expectedOutput,actualOutput); + assertEquals(expectedOutput, actualOutput); } @Test @@ -191,11 +192,11 @@ public void testSortInDescendingOrder() { sts.add(new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,StorageType.ARCHIVE},cold); Map actualOutput = convertToStringMap(sts); - Assert.assertEquals(3,actualOutput.size()); + assertEquals(3, actualOutput.size()); Map expectedOutput = new LinkedHashMap<>(); expectedOutput.put("COLD|ARCHIVE:3(COLD)", 4l); expectedOutput.put("WARM|DISK:1,ARCHIVE:2(WARM)", 3l); expectedOutput.put("HOT|DISK:3(HOT)", 2l); - Assert.assertEquals(expectedOutput.toString(),actualOutput.toString()); + assertEquals(expectedOutput.toString(), actualOutput.toString()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java index 3664704183edc..1f894b3c6fdda 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java @@ -21,9 +21,9 @@ import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.spy; @@ -49,8 +49,8 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; import org.apache.hadoop.util.Shell; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; /** @@ -69,7 +69,8 @@ public class TestStorageRestore { static final int fileSize = 8192; private File path1, path2, path3; private MiniDFSCluster cluster; - @Before + + @BeforeEach public void setUpNameDirs() throws Exception { config = new HdfsConfiguration(); hdfsDir = new File(MiniDFSCluster.getBaseDirectory()).getCanonicalFile(); @@ -199,17 +200,17 @@ public void testStorageRestore() throws Exception { FSImageTestUtil.assertFileContentsSame( new File(path1, "current/" + getImageFileName(4)), new File(path2, "current/" + getImageFileName(4))); - assertFalse("Should not have any image in an edits-only directory", - new File(path3, "current/" + getImageFileName(4)).exists()); + assertFalse(new File(path3, "current/" + getImageFileName(4)).exists(), + "Should not have any image in an edits-only directory"); // Should have finalized logs in the directory that didn't fail - assertTrue("Should have finalized logs in the directory that didn't fail", - new File(path1, "current/" + getFinalizedEditsFileName(1,4)).exists()); + assertTrue(new File(path1, "current/" + getFinalizedEditsFileName(1, 4)).exists(), + "Should have finalized logs in the directory that didn't fail"); // Should not have finalized logs in the failed directories - assertFalse("Should not have finalized logs in the failed directories", - new File(path2, "current/" + getFinalizedEditsFileName(1,4)).exists()); - assertFalse("Should not have finalized logs in the failed directories", - new File(path3, "current/" + getFinalizedEditsFileName(1,4)).exists()); + assertFalse(new File(path2, "current/" + getFinalizedEditsFileName(1, 4)).exists(), + "Should not have finalized logs in the failed directories"); + assertFalse(new File(path3, "current/" + getFinalizedEditsFileName(1, 4)).exists(), + "Should not have finalized logs in the failed directories"); // The new log segment should be in all of the directories. FSImageTestUtil.assertFileContentsSame( @@ -280,19 +281,19 @@ public void testDfsAdminCmd() throws Exception { executor.executeCommand(cmd); restore = fsi.getStorage().getRestoreFailedStorage(); - assertFalse("After set true call restore is " + restore, restore); + assertFalse(restore, "After set true call restore is " + restore); // run one more time - to set it to true again cmd = "-fs NAMENODE -restoreFailedStorage true"; executor.executeCommand(cmd); restore = fsi.getStorage().getRestoreFailedStorage(); - assertTrue("After set false call restore is " + restore, restore); + assertTrue(restore, "After set false call restore is " + restore); // run one more time - no change in value cmd = "-fs NAMENODE -restoreFailedStorage check"; CommandExecutor.Result cmdResult = executor.executeCommand(cmd); restore = fsi.getStorage().getRestoreFailedStorage(); - assertTrue("After check call restore is " + restore, restore); + assertTrue(restore, "After check call restore is " + restore); String commandOutput = cmdResult.getCommandOutput(); assertTrue(commandOutput.contains("restoreFailedStorage is set to true")); @@ -345,7 +346,7 @@ public void testMultipleSecondaryCheckpoint() throws IOException { // The created file should still exist in the in-memory FS state after the // checkpoint. - assertTrue("path exists before restart", fs.exists(testPath)); + assertTrue(fs.exists(testPath), "path exists before restart"); secondary.shutdown(); @@ -353,7 +354,7 @@ public void testMultipleSecondaryCheckpoint() throws IOException { cluster.restartNameNode(); // The created file should still exist after the restart. - assertTrue("path should still exist after restart", fs.exists(testPath)); + assertTrue(fs.exists(testPath), "path should still exist after restart"); } finally { if (cluster != null) { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java index 39e272a59e4a8..9b3deb99bb6a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java @@ -43,25 +43,24 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.Timeout; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.IOException; import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS; import static org.apache.hadoop.hdfs.protocol.BlockType.STRIPED; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * This class tests INodeFile with striped feature. */ +@Timeout(300) public class TestStripedINodeFile { public static final Logger LOG = LoggerFactory.getLogger(TestINodeFile.class); @@ -77,19 +76,13 @@ public class TestStripedINodeFile { private static final ErasureCodingPolicy testECPolicy = StripedFileTestUtil.getDefaultECPolicy(); - @Rule - public Timeout globalTimeout = new Timeout(300000); - private static INodeFile createStripedINodeFile() { return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, null, StripedFileTestUtil.getDefaultECPolicy().getId(), 1024L, HdfsConstants.COLD_STORAGE_POLICY_ID, BlockType.STRIPED); } - @Rule - public ExpectedException thrown = ExpectedException.none(); - - @Before + @BeforeEach public void init() throws IOException { Configuration conf = new HdfsConfiguration(); ErasureCodingPolicyManager.getInstance().init(conf); @@ -97,11 +90,12 @@ public void init() throws IOException { @Test public void testInvalidECPolicy() throws IllegalArgumentException { - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("Could not find EC policy with ID 0xbb"); - new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, - null, null, (byte) 0xBB, 1024L, - HdfsConstants.COLD_STORAGE_POLICY_ID, BlockType.STRIPED); + IllegalArgumentException ex = assertThrows(IllegalArgumentException.class, () -> { + new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, + null, null, (byte) 0xBB, 1024L, + HdfsConstants.COLD_STORAGE_POLICY_ID, STRIPED); + }); + assertTrue(ex.getMessage().contains("Could not find EC policy with ID 0xbb")); } @Test @@ -169,9 +163,8 @@ public void testStripedLayoutRedundancy() { null, perm, 0L, 0L, null, null /*replication*/, ecPolicyID, 1024L, HdfsConstants.WARM_STORAGE_POLICY_ID, STRIPED); - Assert.assertTrue(inodeFile.isStriped()); - Assert.assertEquals(ecPolicyID.byteValue(), - inodeFile.getErasureCodingPolicyID()); + assertTrue(inodeFile.isStriped()); + assertEquals(ecPolicyID.byteValue(), inodeFile.getErasureCodingPolicyID()); } @Test @@ -304,7 +297,8 @@ public void testBlockStripedUCComputeQuotaUsage() /** * Test the behavior of striped and contiguous block deletions. */ - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testDeleteOp() throws Exception { MiniDFSCluster cluster = null; try { @@ -339,37 +333,34 @@ public void testDeleteOp() throws Exception { // Case-1: Verify the behavior of striped blocks // Get blocks of striped file INode inodeStriped = fsd.getINode("/parentDir/ecDir/ecFile"); - assertTrue("Failed to get INodeFile for /parentDir/ecDir/ecFile", - inodeStriped instanceof INodeFile); + assertTrue(inodeStriped instanceof INodeFile, + "Failed to get INodeFile for /parentDir/ecDir/ecFile"); INodeFile inodeStripedFile = (INodeFile) inodeStriped; BlockInfo[] stripedBlks = inodeStripedFile.getBlocks(); for (BlockInfo blockInfo : stripedBlks) { - assertFalse("Mistakenly marked the block as deleted!", - blockInfo.isDeleted()); + assertFalse(blockInfo.isDeleted(), "Mistakenly marked the block as deleted!"); } // delete directory with erasure coding policy dfs.delete(ecDir, true); for (BlockInfo blockInfo : stripedBlks) { - assertTrue("Didn't mark the block as deleted!", blockInfo.isDeleted()); + assertTrue(blockInfo.isDeleted(), "Didn't mark the block as deleted!"); } // Case-2: Verify the behavior of contiguous blocks // Get blocks of contiguous file INode inode = fsd.getINode("/parentDir/someFile"); - assertTrue("Failed to get INodeFile for /parentDir/someFile", - inode instanceof INodeFile); + assertTrue(inode instanceof INodeFile, "Failed to get INodeFile for /parentDir/someFile"); INodeFile inodeFile = (INodeFile) inode; BlockInfo[] contiguousBlks = inodeFile.getBlocks(); for (BlockInfo blockInfo : contiguousBlks) { - assertFalse("Mistakenly marked the block as deleted!", - blockInfo.isDeleted()); + assertFalse(blockInfo.isDeleted(), "Mistakenly marked the block as deleted!"); } // delete parent directory dfs.delete(parentDir, true); for (BlockInfo blockInfo : contiguousBlks) { - assertTrue("Didn't mark the block as deleted!", blockInfo.isDeleted()); + assertTrue(blockInfo.isDeleted(), "Didn't mark the block as deleted!"); } } finally { if (cluster != null) { @@ -384,7 +375,8 @@ public void testDeleteOp() throws Exception { * for EC Striped mode are HOT, COLD and ALL_SSD. For all other policies set * will be ignored and considered default policy. */ - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testUnsuitableStoragePoliciesWithECStripedMode() throws Exception { final Configuration conf = new HdfsConfiguration(); @@ -446,7 +438,7 @@ public void testUnsuitableStoragePoliciesWithECStripedMode() fileLen); for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) { for (StorageType type : lb.getStorageTypes()) { - Assert.assertEquals(StorageType.DISK, type); + assertEquals(StorageType.DISK, type); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java index 1ec08e49153ef..80d9205850513 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.File; import java.io.FileOutputStream; @@ -44,7 +44,8 @@ import org.apache.hadoop.http.HttpServerFunctionalTest; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.StringUtils; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; @@ -79,8 +80,9 @@ public void testClientSideException() throws IOException { } catch (IOException ioe) { Mockito.verify(mockStorage).reportErrorOnFile(localPath.get(0)); assertTrue( - "Unexpected exception: " + StringUtils.stringifyException(ioe), - ioe.getMessage().contains("Unable to download to any storage")); + + ioe.getMessage().contains("Unable to download to any storage"), + "Unexpected exception: " + StringUtils.stringifyException(ioe)); } finally { cluster.shutdown(); } @@ -110,8 +112,7 @@ public void testClientSideExceptionOnJustOneDir() throws IOException { TransferFsImage.getFileClient(fsName, id, localPaths, mockStorage, false); Mockito.verify(mockStorage).reportErrorOnFile(localPaths.get(0)); - assertTrue("The valid local file should get saved properly", - localPaths.get(1).length() > 0); + assertTrue(localPaths.get(1).length() > 0, "The valid local file should get saved properly"); } finally { cluster.shutdown(); } @@ -120,7 +121,8 @@ public void testClientSideExceptionOnJustOneDir() throws IOException { /** * Test to verify the read timeout */ - @Test(timeout = 10000) + @Test + @Timeout(value = 10) public void testGetImageTimeout() throws Exception { HttpServer2 testServer = HttpServerFunctionalTest.createServer("hdfs"); try { @@ -134,7 +136,7 @@ public void testGetImageTimeout() throws Exception { null, false); fail("TransferImage Should fail with timeout"); } catch (SocketTimeoutException e) { - assertEquals("Read should timeout", "Read timed out", e.getMessage()); + assertEquals("Read timed out", e.getMessage(), "Read should timeout"); } } finally { if (testServer != null) { @@ -146,7 +148,8 @@ public void testGetImageTimeout() throws Exception { /** * Test to verify the timeout of Image upload */ - @Test(timeout = 10000) + @Test + @Timeout(value = 10) public void testImageUploadTimeout() throws Exception { Configuration conf = new HdfsConfiguration(); NNStorage mockStorage = Mockito.mock(NNStorage.class); @@ -177,7 +180,7 @@ public void testImageUploadTimeout() throws Exception { NameNodeFile.IMAGE, 1L); fail("TransferImage Should fail with timeout"); } catch (SocketTimeoutException e) { - assertEquals("Upload should timeout", "Read timed out", e.getMessage()); + assertEquals("Read timed out", e.getMessage(), "Upload should timeout"); } } finally { testServer.stop(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java index 06b57f4c39e5e..9555add943fc8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java @@ -28,11 +28,11 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; import org.apache.hadoop.test.Whitebox; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.ArrayList; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -62,19 +62,18 @@ public void testTruncateWithoutSnapshot() { // be -block + (block - 0.5 block) = -0.5 block QuotaCounts count = new QuotaCounts.Builder().build(); file.computeQuotaDeltaForTruncate(BLOCKSIZE + BLOCKSIZE / 2, null, count); - Assert.assertEquals(-BLOCKSIZE / 2 * REPLICATION, count.getStorageSpace()); + assertEquals(-BLOCKSIZE / 2 * REPLICATION, count.getStorageSpace()); // case 2: truncate to 1 block count = new QuotaCounts.Builder().build(); file.computeQuotaDeltaForTruncate(BLOCKSIZE, null, count); - Assert.assertEquals(-(BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, - count.getStorageSpace()); + assertEquals(-(BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, count.getStorageSpace()); // case 3: truncate to 0 count = new QuotaCounts.Builder().build(); file.computeQuotaDeltaForTruncate(0, null, count); - Assert.assertEquals(-(BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, - count.getStorageSpace()); + assertEquals(-(BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, + count.getStorageSpace()); } @Test @@ -87,17 +86,17 @@ public void testTruncateWithSnapshotNoDivergence() { // diff should be +BLOCKSIZE QuotaCounts count = new QuotaCounts.Builder().build(); file.computeQuotaDeltaForTruncate(BLOCKSIZE + BLOCKSIZE / 2, null, count); - Assert.assertEquals(BLOCKSIZE * REPLICATION, count.getStorageSpace()); + assertEquals(BLOCKSIZE * REPLICATION, count.getStorageSpace()); // case 2: truncate to 1 block count = new QuotaCounts.Builder().build(); file.computeQuotaDeltaForTruncate(BLOCKSIZE, null, count); - Assert.assertEquals(0, count.getStorageSpace()); + assertEquals(0, count.getStorageSpace()); // case 3: truncate to 0 count = new QuotaCounts.Builder().build(); file.computeQuotaDeltaForTruncate(0, null, count); - Assert.assertEquals(0, count.getStorageSpace()); + assertEquals(0, count.getStorageSpace()); } @Test @@ -116,20 +115,19 @@ public void testTruncateWithSnapshotAndDivergence() { // as case 1 QuotaCounts count = new QuotaCounts.Builder().build(); file.computeQuotaDeltaForTruncate(BLOCKSIZE + BLOCKSIZE / 2, null, count); - Assert.assertEquals(-BLOCKSIZE / 2 * REPLICATION, count.getStorageSpace()); + assertEquals(-BLOCKSIZE / 2 * REPLICATION, count.getStorageSpace()); // case 8: truncate to 2 blocks // the original 2.5 blocks are in snapshot. the block truncated is not // in snapshot. diff should be -0.5 block count = new QuotaCounts.Builder().build(); file.computeQuotaDeltaForTruncate(BLOCKSIZE + BLOCKSIZE / 2, null, count); - Assert.assertEquals(-BLOCKSIZE / 2 * REPLICATION, count.getStorageSpace()); + assertEquals(-BLOCKSIZE / 2 * REPLICATION, count.getStorageSpace()); // case 9: truncate to 0 count = new QuotaCounts.Builder().build(); file.computeQuotaDeltaForTruncate(0, null, count); - Assert.assertEquals(-(BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, count - .getStorageSpace()); + assertEquals(-(BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, count.getStorageSpace()); } private INodeFile createMockFile(long size, short replication) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java index abd26d4f5a4f8..d3729c1ed1446 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java @@ -41,10 +41,12 @@ import org.apache.hadoop.hdfs.util.HostsFileWriter; import org.apache.hadoop.net.StaticMapping; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import static org.junit.jupiter.api.Assertions.assertTrue; /** * End-to-end test case for upgrade domain @@ -71,7 +73,7 @@ public class TestUpgradeDomainBlockPlacementPolicy { private HostsFileWriter hostsFileWriter = new HostsFileWriter(); private Configuration conf = new HdfsConfiguration(); - @Before + @BeforeEach public void setup() throws IOException { StaticMapping.resetMap(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); @@ -89,7 +91,7 @@ public void setup() throws IOException { refreshDatanodeAdminProperties(); } - @After + @AfterEach public void teardown() throws IOException { hostsFileWriter.cleanup(); if (cluster != null) { @@ -203,12 +205,13 @@ public void testPlacement() throws Exception { } } for (DatanodeID datanodeID : expectedDatanodeIDs) { - Assert.assertTrue(locs.contains(datanodeID)); + assertTrue(locs.contains(datanodeID)); } } } - @Test(timeout = 300000) + @Test + @Timeout(value = 300) public void testPlacementAfterDecommission() throws Exception { final long fileSize = FILE_SIZE; final String testFile = "/testfile-afterdecomm"; @@ -256,7 +259,7 @@ public Boolean get() { cluster.getNamesystem().getBlockManager() .getBlockPlacementPolicy() .verifyBlockPlacement(block.getLocations(), REPLICATION_FACTOR); - Assert.assertTrue(status.isPlacementPolicySatisfied()); + assertTrue(status.isPlacementPolicySatisfied()); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java index 0cf1fed81e9de..a8e65cfe910f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java @@ -25,21 +25,24 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.File; import java.io.IOException; import java.net.BindException; import java.util.Random; +import static org.junit.jupiter.api.Assertions.assertThrows; + /** * This class tests the validation of the configuration object when passed * to the NameNode */ public class TestValidateConfigurationSettings { - @After + @AfterEach public void cleanUp() { FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory())); } @@ -49,37 +52,41 @@ public void cleanUp() { * an exception * is thrown when trying to re-use the same port */ - @Test(expected = BindException.class, timeout = 300000) + @Test + @Timeout(value = 300) public void testThatMatchingRPCandHttpPortsThrowException() throws IOException { - - NameNode nameNode = null; - try { - Configuration conf = new HdfsConfiguration(); - File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name"); - conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, - nameDir.getAbsolutePath()); - - Random rand = new Random(); - final int port = 30000 + rand.nextInt(30000); - - // set both of these to the same port. It should fail. - FileSystem.setDefaultUri(conf, "hdfs://localhost:" + port); - conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + port); - DFSTestUtil.formatNameNode(conf); - nameNode = new NameNode(conf); - } finally { - if (nameNode != null) { - nameNode.stop(); + assertThrows(BindException.class, () -> { + NameNode nameNode = null; + try { + Configuration conf = new HdfsConfiguration(); + File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name"); + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, + nameDir.getAbsolutePath()); + + Random rand = new Random(); + final int port = 30000 + rand.nextInt(30000); + + // set both of these to the same port. It should fail. + FileSystem.setDefaultUri(conf, "hdfs://localhost:" + port); + conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + port); + DFSTestUtil.formatNameNode(conf); + nameNode = new NameNode(conf); + } finally { + if (nameNode != null) { + nameNode.stop(); + } } - } + }); + } /** * Tests setting the rpc port to a different as the web port that an * exception is NOT thrown */ - @Test(timeout = 300000) + @Test + @Timeout(value = 300) public void testThatDifferentRPCandHttpPortsAreOK() throws IOException { @@ -117,7 +124,8 @@ public void testThatDifferentRPCandHttpPortsAreOK() * HDFS-3013: NameNode format command doesn't pick up * dfs.namenode.name.dir.NameServiceId configuration. */ - @Test(timeout = 300000) + @Test + @Timeout(value = 300) public void testGenericKeysForNameNodeFormat() throws IOException { Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java index c5278371c8bc7..a190ca177cd01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java @@ -27,10 +27,12 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.io.IOUtils; -import org.junit.After; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.function.Executable; + +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests that the configuration flag that controls support for XAttrs is off @@ -43,10 +45,8 @@ public class TestXAttrConfigFlag { private MiniDFSCluster cluster; private DistributedFileSystem fs; - @Rule - public ExpectedException exception = ExpectedException.none(); - @After + @AfterEach public void shutdown() throws Exception { IOUtils.cleanupWithLogger(null, fs); if (cluster != null) { @@ -59,24 +59,21 @@ public void shutdown() throws Exception { public void testSetXAttr() throws Exception { initCluster(true, false); fs.mkdirs(PATH); - expectException(); - fs.setXAttr(PATH, "user.foo", null); + expectException(() -> fs.setXAttr(PATH, "user.foo", null)); } @Test public void testGetXAttrs() throws Exception { initCluster(true, false); fs.mkdirs(PATH); - expectException(); - fs.getXAttrs(PATH); + expectException(() -> fs.getXAttrs(PATH)); } @Test public void testRemoveXAttr() throws Exception { initCluster(true, false); fs.mkdirs(PATH); - expectException(); - fs.removeXAttr(PATH, "user.foo"); + expectException(() -> fs.removeXAttr(PATH, "user.foo")); } @Test @@ -108,9 +105,9 @@ public void testFsImage() throws Exception { * We expect an IOException, and we want the exception text to state the * configuration key that controls XAttr support. */ - private void expectException() { - exception.expect(IOException.class); - exception.expectMessage(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY); + private void expectException(Executable exec) { + IOException ex = assertThrows(IOException.class, exec); + assertTrue(ex.getMessage().contains(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY)); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java index 5b0922d0913a8..a68fd7ad19a5c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java @@ -23,9 +23,10 @@ import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.hdfs.XAttrHelper; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; public class TestXAttrFeature {