Skip to content

Commit 5bd8b2a

Browse files
authored
HDFS-12431. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-hdfs Part11. (#7864)
* HDFS-12431. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-hdfs Part11. Reviewed-by: Shilun Fan <[email protected]> Signed-off-by: Shilun Fan <[email protected]>
1 parent a414df2 commit 5bd8b2a

File tree

50 files changed

+1008
-936
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

50 files changed

+1008
-936
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
2222
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
2323
import static org.apache.hadoop.test.GenericTestUtils.assertGlobEquals;
24-
import static org.junit.Assert.assertEquals;
24+
import static org.junit.jupiter.api.Assertions.assertEquals;
2525

2626
import java.io.File;
2727
import java.io.IOException;
@@ -34,7 +34,7 @@
3434
import org.apache.hadoop.hdfs.HdfsConfiguration;
3535
import org.apache.hadoop.hdfs.MiniDFSCluster;
3636
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
37-
import org.junit.Test;
37+
import org.junit.jupiter.api.Test;
3838

3939
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
4040

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java

Lines changed: 10 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName;
2121
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
2222
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
23+
import static org.junit.jupiter.api.Assertions.assertEquals;
2324
import static org.mockito.ArgumentMatchers.any;
2425

2526
import java.io.File;
@@ -45,9 +46,8 @@
4546
import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger;
4647
import org.apache.hadoop.util.Lists;
4748

48-
import org.junit.Assert;
49-
import org.junit.Before;
50-
import org.junit.Test;
49+
import org.junit.jupiter.api.BeforeEach;
50+
import org.junit.jupiter.api.Test;
5151
import org.mockito.ArgumentCaptor;
5252
import org.mockito.Mockito;
5353

@@ -62,7 +62,7 @@ public class TestNNStorageRetentionManager {
6262
* For the purpose of this test, purge as many edits as we can
6363
* with no extra "safety cushion"
6464
*/
65-
@Before
65+
@BeforeEach
6666
public void setNoExtraEditRetention() {
6767
conf.setLong(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);
6868
}
@@ -310,27 +310,24 @@ private void runTest(TestCaseDescription tc) throws IOException {
310310
for (FSImageFile captured : imagesPurgedCaptor.getAllValues()) {
311311
capturedPaths.add(fileToPath(captured.getFile()));
312312
}
313-
Assert.assertEquals("Image file check.",
314-
Joiner.on(",").join(filesToPaths(tc.expectedPurgedImages)),
315-
Joiner.on(",").join(capturedPaths));
313+
assertEquals(Joiner.on(",").join(filesToPaths(tc.expectedPurgedImages)),
314+
Joiner.on(",").join(capturedPaths), "Image file check.");
316315

317316
capturedPaths.clear();
318317
// Check edit logs, and also in progress edits older than minTxIdToKeep
319318
for (EditLogFile captured : logsPurgedCaptor.getAllValues()) {
320319
capturedPaths.add(fileToPath(captured.getFile()));
321320
}
322-
Assert.assertEquals("Check old edits are removed.",
323-
Joiner.on(",").join(filesToPaths(tc.expectedPurgedLogs)),
324-
Joiner.on(",").join(capturedPaths));
321+
assertEquals(Joiner.on(",").join(filesToPaths(tc.expectedPurgedLogs)),
322+
Joiner.on(",").join(capturedPaths), "Check old edits are removed.");
325323

326324
capturedPaths.clear();
327325
// Check in progress edits to keep are marked as stale
328326
for (EditLogFile captured : staleLogsCaptor.getAllValues()) {
329327
capturedPaths.add(fileToPath(captured.getFile()));
330328
}
331-
Assert.assertEquals("Check unnecessary but kept edits are marked stale",
332-
Joiner.on(",").join(filesToPaths(tc.expectedStaleLogs)),
333-
Joiner.on(",").join(capturedPaths));
329+
assertEquals(Joiner.on(",").join(filesToPaths(tc.expectedStaleLogs)),
330+
Joiner.on(",").join(capturedPaths), "Check unnecessary but kept edits are marked stale");
334331
}
335332

336333
private class TestCaseDescription {

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java

Lines changed: 37 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -31,19 +31,24 @@
3131
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
3232
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
3333
import org.apache.hadoop.util.ExitUtil;
34-
import org.junit.After;
35-
import org.junit.Assert;
36-
import org.junit.BeforeClass;
37-
import org.junit.Test;
34+
import org.junit.jupiter.api.AfterEach;
35+
import org.junit.jupiter.api.BeforeAll;
36+
import org.junit.jupiter.api.Test;
37+
import org.junit.jupiter.api.Timeout;
38+
39+
import static org.junit.jupiter.api.Assertions.assertEquals;
40+
import static org.junit.jupiter.api.Assertions.assertFalse;
41+
import static org.junit.jupiter.api.Assertions.assertNotEquals;
42+
import static org.junit.jupiter.api.Assertions.assertTrue;
3843

3944
public class TestNNThroughputBenchmark {
4045

41-
@BeforeClass
46+
@BeforeAll
4247
public static void setUp() {
4348
ExitUtil.disableSystemExit();
4449
}
4550

46-
@After
51+
@AfterEach
4752
public void cleanUp() {
4853
FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
4954
}
@@ -66,7 +71,8 @@ public void testNNThroughput() throws Exception {
6671
* This test runs all benchmarks defined in {@link NNThroughputBenchmark},
6772
* with explicit local -fs option.
6873
*/
69-
@Test(timeout = 120000)
74+
@Test
75+
@Timeout(value = 120)
7076
public void testNNThroughputWithFsOption() throws Exception {
7177
Configuration conf = new HdfsConfiguration();
7278
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16);
@@ -81,7 +87,8 @@ public void testNNThroughputWithFsOption() throws Exception {
8187
/**
8288
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster.
8389
*/
84-
@Test(timeout = 120000)
90+
@Test
91+
@Timeout(value = 120)
8592
public void testNNThroughputAgainstRemoteNN() throws Exception {
8693
final Configuration conf = new HdfsConfiguration();
8794
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
@@ -106,7 +113,8 @@ public void testNNThroughputAgainstRemoteNN() throws Exception {
106113
* Ranger since only super user e.g. hdfs can enter/exit safemode
107114
* but any request from super user is not sent for authorization).
108115
*/
109-
@Test(timeout = 120000)
116+
@Test
117+
@Timeout(value = 120)
110118
public void testNNThroughputAgainstRemoteNNNonSuperUser() throws Exception {
111119
final Configuration conf = new HdfsConfiguration();
112120
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
@@ -128,7 +136,8 @@ public void testNNThroughputAgainstRemoteNNNonSuperUser() throws Exception {
128136
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
129137
* with explicit -fs option.
130138
*/
131-
@Test(timeout = 120000)
139+
@Test
140+
@Timeout(value = 120)
132141
public void testNNThroughputRemoteAgainstNNWithFsOption() throws Exception {
133142
final Configuration conf = new HdfsConfiguration();
134143
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
@@ -153,7 +162,8 @@ public void testNNThroughputRemoteAgainstNNWithFsOption() throws Exception {
153162
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
154163
* for append operation.
155164
*/
156-
@Test(timeout = 120000)
165+
@Test
166+
@Timeout(value = 120)
157167
public void testNNThroughputForAppendOp() throws Exception {
158168
final Configuration conf = new HdfsConfiguration();
159169
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
@@ -178,10 +188,10 @@ public void testNNThroughputForAppendOp() throws Exception {
178188
listing = fsNamesystem.getListing("/", HdfsFileStatus.EMPTY_NAME, false);
179189
HdfsFileStatus[] partialListingAfter = listing.getPartialListing();
180190

181-
Assert.assertEquals(partialListing.length, partialListingAfter.length);
191+
assertEquals(partialListing.length, partialListingAfter.length);
182192
for (int i = 0; i < partialListing.length; i++) {
183193
//Check the modification time after append operation
184-
Assert.assertNotEquals(partialListing[i].getModificationTime(),
194+
assertNotEquals(partialListing[i].getModificationTime(),
185195
partialListingAfter[i].getModificationTime());
186196
}
187197

@@ -196,7 +206,8 @@ public void testNNThroughputForAppendOp() throws Exception {
196206
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
197207
* for block report operation.
198208
*/
199-
@Test(timeout = 120000)
209+
@Test
210+
@Timeout(value = 120)
200211
public void testNNThroughputForBlockReportOp() throws Exception {
201212
final Configuration conf = new HdfsConfiguration();
202213
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
@@ -217,7 +228,8 @@ public void testNNThroughputForBlockReportOp() throws Exception {
217228
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
218229
* with explicit -baseDirName option.
219230
*/
220-
@Test(timeout = 120000)
231+
@Test
232+
@Timeout(value = 120)
221233
public void testNNThroughputWithBaseDir() throws Exception {
222234
final Configuration conf = new HdfsConfiguration();
223235
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
@@ -233,13 +245,13 @@ public void testNNThroughputWithBaseDir() throws Exception {
233245
NNThroughputBenchmark.runBenchmark(benchConf,
234246
new String[] {"-op", "create", "-keepResults", "-files", "3", "-baseDirName",
235247
"/nnThroughputBenchmark1", "-close"});
236-
Assert.assertTrue(fs.exists(new Path("/nnThroughputBenchmark1")));
237-
Assert.assertFalse(fs.exists(new Path("/nnThroughputBenchmark")));
248+
assertTrue(fs.exists(new Path("/nnThroughputBenchmark1")));
249+
assertFalse(fs.exists(new Path("/nnThroughputBenchmark")));
238250

239251
NNThroughputBenchmark.runBenchmark(benchConf,
240252
new String[] {"-op", "all", "-baseDirName", "/nnThroughputBenchmark1"});
241-
Assert.assertTrue(fs.exists(new Path("/nnThroughputBenchmark1")));
242-
Assert.assertFalse(fs.exists(new Path("/nnThroughputBenchmark")));
253+
assertTrue(fs.exists(new Path("/nnThroughputBenchmark1")));
254+
assertFalse(fs.exists(new Path("/nnThroughputBenchmark")));
243255
} finally {
244256
if (cluster != null) {
245257
cluster.shutdown();
@@ -251,7 +263,8 @@ public void testNNThroughputWithBaseDir() throws Exception {
251263
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
252264
* for blockSize with letter suffix.
253265
*/
254-
@Test(timeout = 120000)
266+
@Test
267+
@Timeout(value = 120)
255268
public void testNNThroughputForBlockSizeWithLetterSuffix() throws Exception {
256269
final Configuration conf = new HdfsConfiguration();
257270
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
@@ -271,7 +284,8 @@ public void testNNThroughputForBlockSizeWithLetterSuffix() throws Exception {
271284
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
272285
* with explicit -blockSize option.
273286
*/
274-
@Test(timeout = 120000)
287+
@Test
288+
@Timeout(value = 120)
275289
public void testNNThroughputWithBlockSize() throws Exception {
276290
final Configuration conf = new HdfsConfiguration();
277291
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
@@ -290,7 +304,8 @@ public void testNNThroughputWithBlockSize() throws Exception {
290304
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
291305
* with explicit -blockSize option like 1m.
292306
*/
293-
@Test(timeout = 120000)
307+
@Test
308+
@Timeout(value = 120)
294309
public void testNNThroughputBlockSizeArgWithLetterSuffix() throws Exception {
295310
final Configuration conf = new HdfsConfiguration();
296311
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -18,19 +18,21 @@
1818
package org.apache.hadoop.hdfs.server.namenode;
1919

2020
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
21-
import static org.junit.Assert.assertEquals;
22-
import static org.junit.Assert.assertNull;
23-
import static org.junit.Assert.assertTrue;
21+
import static org.junit.jupiter.api.Assertions.assertEquals;
22+
import static org.junit.jupiter.api.Assertions.assertNull;
23+
import static org.junit.jupiter.api.Assertions.assertTrue;
24+
import static org.junit.jupiter.api.Assertions.fail;
2425

2526
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
2627
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
2728
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
28-
import org.junit.Assert;
29-
import org.junit.Test;
29+
import org.junit.jupiter.api.Test;
30+
import org.junit.jupiter.api.Timeout;
3031

3132
public class TestNameNodeOptionParsing {
3233

33-
@Test(timeout = 10000)
34+
@Test
35+
@Timeout(value = 10)
3436
public void testUpgrade() {
3537
StartupOption opt = null;
3638
// UPGRADE is set, but nothing else
@@ -104,7 +106,8 @@ public void testUpgrade() {
104106
assertNull(opt);
105107
}
106108

107-
@Test(timeout = 10000)
109+
@Test
110+
@Timeout(value = 10)
108111
public void testRollingUpgrade() {
109112
{
110113
final String[] args = {"-rollingUpgrade"};
@@ -132,7 +135,7 @@ public void testRollingUpgrade() {
132135
final String[] args = {"-rollingUpgrade", "foo"};
133136
try {
134137
NameNode.parseArguments(args);
135-
Assert.fail();
138+
fail();
136139
} catch(IllegalArgumentException iae) {
137140
// the exception is expected.
138141
}

0 commit comments

Comments
 (0)