Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
import static org.apache.hadoop.test.GenericTestUtils.assertGlobEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;

import java.io.File;
import java.io.IOException;
Expand All @@ -34,7 +34,7 @@
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.junit.Test;
import org.junit.jupiter.api.Test;

import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,9 @@
import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger;
import org.apache.hadoop.util.Lists;

import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;

Expand All @@ -62,7 +62,7 @@ public class TestNNStorageRetentionManager {
* For the purpose of this test, purge as many edits as we can
* with no extra "safety cushion"
*/
@Before
@BeforeEach
public void setNoExtraEditRetention() {
conf.setLong(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);
}
Expand Down Expand Up @@ -310,27 +310,24 @@ private void runTest(TestCaseDescription tc) throws IOException {
for (FSImageFile captured : imagesPurgedCaptor.getAllValues()) {
capturedPaths.add(fileToPath(captured.getFile()));
}
Assert.assertEquals("Image file check.",
Joiner.on(",").join(filesToPaths(tc.expectedPurgedImages)),
Joiner.on(",").join(capturedPaths));
Assertions.assertEquals(Joiner.on(",").join(filesToPaths(tc.expectedPurgedImages)),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Extract to import

Joiner.on(",").join(capturedPaths), "Image file check.");

capturedPaths.clear();
// Check edit logs, and also in progress edits older than minTxIdToKeep
for (EditLogFile captured : logsPurgedCaptor.getAllValues()) {
capturedPaths.add(fileToPath(captured.getFile()));
}
Assert.assertEquals("Check old edits are removed.",
Joiner.on(",").join(filesToPaths(tc.expectedPurgedLogs)),
Joiner.on(",").join(capturedPaths));
Assertions.assertEquals(Joiner.on(",").join(filesToPaths(tc.expectedPurgedLogs)),
Joiner.on(",").join(capturedPaths), "Check old edits are removed.");

capturedPaths.clear();
// Check in progress edits to keep are marked as stale
for (EditLogFile captured : staleLogsCaptor.getAllValues()) {
capturedPaths.add(fileToPath(captured.getFile()));
}
Assert.assertEquals("Check unnecessary but kept edits are marked stale",
Joiner.on(",").join(filesToPaths(tc.expectedStaleLogs)),
Joiner.on(",").join(capturedPaths));
Assertions.assertEquals(Joiner.on(",").join(filesToPaths(tc.expectedStaleLogs)),
Joiner.on(",").join(capturedPaths), "Check unnecessary but kept edits are marked stale");
}

private class TestCaseDescription {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,19 +31,20 @@
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.util.ExitUtil;
import org.junit.After;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;

public class TestNNThroughputBenchmark {

@BeforeClass
public static void setUp() {
@BeforeAll
public static void setUp() {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

check style issue.

ExitUtil.disableSystemExit();
}

@After
@AfterEach
public void cleanUp() {
FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
}
Expand All @@ -66,7 +67,8 @@ public void testNNThroughput() throws Exception {
* This test runs all benchmarks defined in {@link NNThroughputBenchmark},
* with explicit local -fs option.
*/
@Test(timeout = 120000)
@Test
@Timeout(value = 120)
public void testNNThroughputWithFsOption() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16);
Expand All @@ -81,7 +83,8 @@ public void testNNThroughputWithFsOption() throws Exception {
/**
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster.
*/
@Test(timeout = 120000)
@Test
@Timeout(value = 120)
public void testNNThroughputAgainstRemoteNN() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
Expand All @@ -106,7 +109,8 @@ public void testNNThroughputAgainstRemoteNN() throws Exception {
* Ranger since only super user e.g. hdfs can enter/exit safemode
* but any request from super user is not sent for authorization).
*/
@Test(timeout = 120000)
@Test
@Timeout(value = 120)
public void testNNThroughputAgainstRemoteNNNonSuperUser() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
Expand All @@ -128,7 +132,8 @@ public void testNNThroughputAgainstRemoteNNNonSuperUser() throws Exception {
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
* with explicit -fs option.
*/
@Test(timeout = 120000)
@Test
@Timeout(value = 120)
public void testNNThroughputRemoteAgainstNNWithFsOption() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
Expand All @@ -153,7 +158,8 @@ public void testNNThroughputRemoteAgainstNNWithFsOption() throws Exception {
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
* for append operation.
*/
@Test(timeout = 120000)
@Test
@Timeout(value = 120)
public void testNNThroughputForAppendOp() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
Expand All @@ -178,10 +184,10 @@ public void testNNThroughputForAppendOp() throws Exception {
listing = fsNamesystem.getListing("/", HdfsFileStatus.EMPTY_NAME, false);
HdfsFileStatus[] partialListingAfter = listing.getPartialListing();

Assert.assertEquals(partialListing.length, partialListingAfter.length);
Assertions.assertEquals(partialListing.length, partialListingAfter.length);
for (int i = 0; i < partialListing.length; i++) {
//Check the modification time after append operation
Assert.assertNotEquals(partialListing[i].getModificationTime(),
Assertions.assertNotEquals(partialListing[i].getModificationTime(),
partialListingAfter[i].getModificationTime());
}

Expand All @@ -196,7 +202,8 @@ public void testNNThroughputForAppendOp() throws Exception {
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
* for block report operation.
*/
@Test(timeout = 120000)
@Test
@Timeout(value = 120)
public void testNNThroughputForBlockReportOp() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
Expand All @@ -217,7 +224,8 @@ public void testNNThroughputForBlockReportOp() throws Exception {
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
* with explicit -baseDirName option.
*/
@Test(timeout = 120000)
@Test
@Timeout(value = 120)
public void testNNThroughputWithBaseDir() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
Expand All @@ -233,13 +241,13 @@ public void testNNThroughputWithBaseDir() throws Exception {
NNThroughputBenchmark.runBenchmark(benchConf,
new String[] {"-op", "create", "-keepResults", "-files", "3", "-baseDirName",
"/nnThroughputBenchmark1", "-close"});
Assert.assertTrue(fs.exists(new Path("/nnThroughputBenchmark1")));
Assert.assertFalse(fs.exists(new Path("/nnThroughputBenchmark")));
Assertions.assertTrue(fs.exists(new Path("/nnThroughputBenchmark1")));
Assertions.assertFalse(fs.exists(new Path("/nnThroughputBenchmark")));

NNThroughputBenchmark.runBenchmark(benchConf,
new String[] {"-op", "all", "-baseDirName", "/nnThroughputBenchmark1"});
Assert.assertTrue(fs.exists(new Path("/nnThroughputBenchmark1")));
Assert.assertFalse(fs.exists(new Path("/nnThroughputBenchmark")));
Assertions.assertTrue(fs.exists(new Path("/nnThroughputBenchmark1")));
Assertions.assertFalse(fs.exists(new Path("/nnThroughputBenchmark")));
} finally {
if (cluster != null) {
cluster.shutdown();
Expand All @@ -251,7 +259,8 @@ public void testNNThroughputWithBaseDir() throws Exception {
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
* for blockSize with letter suffix.
*/
@Test(timeout = 120000)
@Test
@Timeout(value = 120)
public void testNNThroughputForBlockSizeWithLetterSuffix() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
Expand All @@ -271,7 +280,8 @@ public void testNNThroughputForBlockSizeWithLetterSuffix() throws Exception {
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
* with explicit -blockSize option.
*/
@Test(timeout = 120000)
@Test
@Timeout(value = 120)
public void testNNThroughputWithBlockSize() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
Expand All @@ -290,7 +300,8 @@ public void testNNThroughputWithBlockSize() throws Exception {
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
* with explicit -blockSize option like 1m.
*/
@Test(timeout = 120000)
@Test
@Timeout(value = 120)
public void testNNThroughputBlockSizeArgWithLetterSuffix() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,19 +18,21 @@
package org.apache.hadoop.hdfs.server.namenode;

import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.junit.Assert;
import org.junit.Test;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;

public class TestNameNodeOptionParsing {

@Test(timeout = 10000)
@Test
@Timeout(value = 10)
public void testUpgrade() {
StartupOption opt = null;
// UPGRADE is set, but nothing else
Expand Down Expand Up @@ -104,7 +106,8 @@ public void testUpgrade() {
assertNull(opt);
}

@Test(timeout = 10000)
@Test
@Timeout(value = 10)
public void testRollingUpgrade() {
{
final String[] args = {"-rollingUpgrade"};
Expand Down Expand Up @@ -132,7 +135,7 @@ public void testRollingUpgrade() {
final String[] args = {"-rollingUpgrade", "foo"};
try {
NameNode.parseArguments(args);
Assert.fail();
Assertions.fail();
} catch(IllegalArgumentException iae) {
// the exception is expected.
}
Expand Down
Loading