Skip to content

Commit 93b0453

Browse files
zhtttylzslfan1989
andauthored
HDFS-12431. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-hdfs Part5. (#7733)
* HDFS-12431. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-hdfs Part5. Co-authored-by: Shilun Fan <[email protected]> Reviewed-by: Shilun Fan <[email protected]> Signed-off-by: Shilun Fan <[email protected]>
1 parent 8345235 commit 93b0453

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+1092
-1003
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,9 @@
1717
*/
1818
package org.apache.hadoop.hdfs;
1919

20-
import static org.junit.Assert.assertArrayEquals;
21-
import static org.junit.Assert.assertEquals;
22-
import static org.junit.Assert.assertTrue;
20+
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
21+
import static org.junit.jupiter.api.Assertions.assertEquals;
22+
import static org.junit.jupiter.api.Assertions.assertTrue;
2323

2424
import java.io.IOException;
2525
import java.io.OutputStream;
@@ -97,9 +97,9 @@ public static int[] randomFilePartition(int n, int parts) {
9797
}
9898

9999
LOG.info("partition=" + Arrays.toString(p));
100-
assertTrue("i=0", p[0] > 0 && p[0] < n);
100+
assertTrue(p[0] > 0 && p[0] < n, "i=0");
101101
for(int i = 1; i < p.length; i++) {
102-
assertTrue("i=" + i, p[i] > p[i - 1] && p[i] < n);
102+
assertTrue(p[i] > p[i - 1] && p[i] < n, "i=" + i);
103103
}
104104
return p;
105105
}
@@ -217,8 +217,7 @@ public static void checkFullFile(FileSystem fs, Path name, int len,
217217
boolean checkFileStatus) throws IOException {
218218
if (checkFileStatus) {
219219
final FileStatus status = fs.getFileStatus(name);
220-
assertEquals("len=" + len + " but status.getLen()=" + status.getLen(),
221-
len, status.getLen());
220+
assertEquals(len, status.getLen(), "len=" + len + " but status.getLen()=" + status.getLen());
222221
}
223222

224223
FSDataInputStream stm = fs.open(name);
@@ -231,9 +230,9 @@ public static void checkFullFile(FileSystem fs, Path name, int len,
231230
private static void checkData(final byte[] actual, int from,
232231
final byte[] expected, String message) {
233232
for (int idx = 0; idx < actual.length; idx++) {
234-
assertEquals(message+" byte "+(from+idx)+" differs. expected "+
235-
expected[from+idx]+" actual "+actual[idx],
236-
expected[from+idx], actual[idx]);
233+
assertEquals(expected[from + idx], actual[idx],
234+
message + " byte " + (from + idx) + " differs. expected " +
235+
expected[from + idx] + " actual " + actual[idx]);
237236
actual[idx] = 0;
238237
}
239238
}

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -26,9 +26,9 @@
2626
import org.apache.hadoop.fs.FSDataOutputStream;
2727
import org.apache.hadoop.fs.Path;
2828
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
29-
import org.junit.AfterClass;
30-
import org.junit.BeforeClass;
31-
import org.junit.Test;
29+
import org.junit.jupiter.api.AfterAll;
30+
import org.junit.jupiter.api.BeforeAll;
31+
import org.junit.jupiter.api.Test;
3232

3333
/** This is a comprehensive append test that tries
3434
* all combinations of file length and number of appended bytes
@@ -59,15 +59,15 @@ private static void init(Configuration conf) {
5959
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, PACKET_SIZE);
6060
}
6161

62-
@BeforeClass
62+
@BeforeAll
6363
public static void startUp () throws IOException {
6464
conf = new HdfsConfiguration();
6565
init(conf);
6666
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
6767
fs = cluster.getFileSystem();
6868
}
6969

70-
@AfterClass
70+
@AfterAll
7171
public static void tearDown() {
7272
if (cluster != null) {
7373
cluster.shutdown();

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@
3030
import org.apache.hadoop.hdfs.server.namenode.NameNode;
3131
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
3232
import org.apache.hadoop.test.GenericTestUtils;
33-
import org.junit.Assert;
3433
import org.slf4j.Logger;
3534
import org.slf4j.LoggerFactory;
3635
import org.slf4j.event.Level;
@@ -40,6 +39,9 @@
4039
import java.util.ArrayList;
4140
import java.util.Collection;
4241

42+
import static org.junit.jupiter.api.Assertions.assertNotNull;
43+
import static org.junit.jupiter.api.Assertions.assertTrue;
44+
4345
/**
4446
* Utility class for testing online recovery of striped files.
4547
*/
@@ -216,11 +218,11 @@ public static void testReadWithBlockCorrupted(MiniDFSCluster cluster,
216218
+ ", parityBlkDelNum = " + parityBlkDelNum
217219
+ ", deleteBlockFile? " + deleteBlockFile);
218220
int recoverBlkNum = dataBlkDelNum + parityBlkDelNum;
219-
Assert.assertTrue("dataBlkDelNum and parityBlkDelNum should be positive",
220-
dataBlkDelNum >= 0 && parityBlkDelNum >= 0);
221-
Assert.assertTrue("The sum of dataBlkDelNum and parityBlkDelNum " +
222-
"should be between 1 ~ " + NUM_PARITY_UNITS, recoverBlkNum <=
223-
NUM_PARITY_UNITS);
221+
assertTrue(dataBlkDelNum >= 0 && parityBlkDelNum >= 0,
222+
"dataBlkDelNum and parityBlkDelNum should be positive");
223+
assertTrue(recoverBlkNum <=
224+
NUM_PARITY_UNITS, "The sum of dataBlkDelNum and parityBlkDelNum " +
225+
"should be between 1 ~ " + NUM_PARITY_UNITS);
224226

225227
// write a file with the length of writeLen
226228
Path srcPath = new Path(src);
@@ -248,10 +250,10 @@ public static void corruptBlocks(MiniDFSCluster cluster,
248250

249251
int[] delDataBlkIndices = StripedFileTestUtil.randomArray(0, NUM_DATA_UNITS,
250252
dataBlkDelNum);
251-
Assert.assertNotNull(delDataBlkIndices);
253+
assertNotNull(delDataBlkIndices);
252254
int[] delParityBlkIndices = StripedFileTestUtil.randomArray(NUM_DATA_UNITS,
253255
NUM_DATA_UNITS + NUM_PARITY_UNITS, parityBlkDelNum);
254-
Assert.assertNotNull(delParityBlkIndices);
256+
assertNotNull(delParityBlkIndices);
255257

256258
int[] delBlkIndices = new int[recoverBlkNum];
257259
System.arraycopy(delDataBlkIndices, 0,

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java

Lines changed: 24 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@
3737
import org.apache.hadoop.io.erasurecode.CodecUtil;
3838
import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
3939
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
40-
import org.junit.Assert;
4140
import org.slf4j.Logger;
4241
import org.slf4j.LoggerFactory;
4342

@@ -55,7 +54,11 @@
5554
import java.util.concurrent.TimeoutException;
5655
import java.util.concurrent.atomic.AtomicInteger;
5756

58-
import static org.junit.Assert.assertEquals;
57+
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
58+
import static org.junit.jupiter.api.Assertions.assertEquals;
59+
import static org.junit.jupiter.api.Assertions.assertNotNull;
60+
import static org.junit.jupiter.api.Assertions.assertTrue;
61+
import static org.junit.jupiter.api.Assertions.fail;
5962

6063
public class StripedFileTestUtil {
6164
public static final Logger LOG =
@@ -77,7 +80,7 @@ static byte getByte(long pos) {
7780
static void verifyLength(FileSystem fs, Path srcPath, int fileLength)
7881
throws IOException {
7982
FileStatus status = fs.getFileStatus(srcPath);
80-
assertEquals("File length should be the same", fileLength, status.getLen());
83+
assertEquals(fileLength, status.getLen(), "File length should be the same");
8184
}
8285

8386
static void verifyPread(DistributedFileSystem fs, Path srcPath,
@@ -109,9 +112,8 @@ static void verifyPread(FileSystem fs, Path srcPath, int fileLength,
109112
offset += target;
110113
}
111114
for (int i = 0; i < fileLength - startOffset; i++) {
112-
assertEquals("Byte at " + (startOffset + i) + " is different, "
113-
+ "the startOffset is " + startOffset, expected[startOffset + i],
114-
result[i]);
115+
assertEquals(expected[startOffset + i], result[i], "Byte at " + (startOffset + i) +
116+
" is different, " + "the startOffset is " + startOffset);
115117
}
116118
}
117119
}
@@ -127,8 +129,8 @@ static void verifyStatefulRead(FileSystem fs, Path srcPath, int fileLength,
127129
System.arraycopy(buf, 0, result, readLen, ret);
128130
readLen += ret;
129131
}
130-
assertEquals("The length of file should be the same to write size", fileLength, readLen);
131-
Assert.assertArrayEquals(expected, result);
132+
assertEquals(fileLength, readLen, "The length of file should be the same to write size");
133+
assertArrayEquals(expected, result);
132134
}
133135
}
134136

@@ -144,8 +146,8 @@ static void verifyStatefulRead(FileSystem fs, Path srcPath, int fileLength,
144146
result.put(buf);
145147
buf.clear();
146148
}
147-
assertEquals("The length of file should be the same to write size", fileLength, readLen);
148-
Assert.assertArrayEquals(expected, result.array());
149+
assertEquals(fileLength, readLen, "The length of file should be the same to write size");
150+
assertArrayEquals(expected, result.array());
149151
}
150152
}
151153

@@ -185,14 +187,14 @@ static void verifySeek(FileSystem fs, Path srcPath, int fileLength,
185187
if (!(in.getWrappedStream() instanceof WebHdfsInputStream)) {
186188
try {
187189
in.seek(-1);
188-
Assert.fail("Should be failed if seek to negative offset");
190+
fail("Should be failed if seek to negative offset");
189191
} catch (EOFException e) {
190192
// expected
191193
}
192194

193195
try {
194196
in.seek(fileLength + 1);
195-
Assert.fail("Should be failed if seek after EOF");
197+
fail("Should be failed if seek after EOF");
196198
} catch (EOFException e) {
197199
// expected
198200
}
@@ -206,8 +208,8 @@ static void assertSeekAndRead(FSDataInputStream fsdis, int pos,
206208
byte[] buf = new byte[writeBytes - pos];
207209
IOUtils.readFully(fsdis, buf, 0, buf.length);
208210
for (int i = 0; i < buf.length; i++) {
209-
assertEquals("Byte at " + i + " should be the same",
210-
StripedFileTestUtil.getByte(pos + i), buf[i]);
211+
assertEquals(StripedFileTestUtil.getByte(pos + i),
212+
buf[i], "Byte at " + i + " should be the same");
211213
}
212214
}
213215

@@ -225,7 +227,7 @@ static DatanodeInfo getDatanodes(StripedDataStreamer streamer) {
225227
final DatanodeInfo[] datanodes = streamer.getNodes();
226228
if (datanodes != null) {
227229
assertEquals(1, datanodes.length);
228-
Assert.assertNotNull(datanodes[0]);
230+
assertNotNull(datanodes[0]);
229231
return datanodes[0];
230232
}
231233
try {
@@ -377,13 +379,13 @@ static void checkData(DistributedFileSystem dfs, Path srcPath, int length,
377379
final int parityBlkNum = ecPolicy.getNumParityUnits();
378380
int index = 0;
379381
for (LocatedBlock firstBlock : lbs.getLocatedBlocks()) {
380-
Assert.assertTrue(firstBlock instanceof LocatedStripedBlock);
382+
assertTrue(firstBlock instanceof LocatedStripedBlock);
381383

382384
final long gs = firstBlock.getBlock().getGenerationStamp();
383385
final long oldGS = oldGSList != null ? oldGSList.get(index++) : -1L;
384386
final String s = "gs=" + gs + ", oldGS=" + oldGS;
385387
LOG.info(s);
386-
Assert.assertTrue(s, gs >= oldGS);
388+
assertTrue(gs >= oldGS, s);
387389

388390
LocatedBlock[] blocks = StripedBlockUtil.parseStripedBlockGroup(
389391
(LocatedStripedBlock) firstBlock, cellSize,
@@ -456,7 +458,7 @@ static void checkData(DistributedFileSystem dfs, Path srcPath, int length,
456458
for (int posInBlk = 0; posInBlk < actual.length; posInBlk++) {
457459
final long posInFile = StripedBlockUtil.offsetInBlkToOffsetInBG(
458460
cellSize, dataBlkNum, posInBlk, i) + groupPosInFile;
459-
Assert.assertTrue(posInFile < length);
461+
assertTrue(posInFile < length);
460462
final byte expected = getByte(posInFile);
461463

462464
if (killed) {
@@ -466,7 +468,7 @@ static void checkData(DistributedFileSystem dfs, Path srcPath, int length,
466468
String s = "expected=" + expected + " but actual=" + actual[posInBlk]
467469
+ ", posInFile=" + posInFile + ", posInBlk=" + posInBlk
468470
+ ". group=" + group + ", i=" + i;
469-
Assert.fail(s);
471+
fail(s);
470472
}
471473
}
472474
}
@@ -507,12 +509,12 @@ static void verifyParityBlocks(Configuration conf, final long size,
507509
try {
508510
encoder.encode(dataBytes, expectedParityBytes);
509511
} catch (IOException e) {
510-
Assert.fail("Unexpected IOException: " + e.getMessage());
512+
fail("Unexpected IOException: " + e.getMessage());
511513
}
512514
for (int i = 0; i < parityBytes.length; i++) {
513515
if (checkSet.contains(i + dataBytes.length)){
514-
Assert.assertArrayEquals("i=" + i, expectedParityBytes[i],
515-
parityBytes[i]);
516+
assertArrayEquals(expectedParityBytes[i],
517+
parityBytes[i], "i=" + i);
516518
}
517519
}
518520
}

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,8 @@
1717
*/
1818
package org.apache.hadoop.hdfs;
1919

20-
import static org.junit.Assert.fail;
20+
import static org.junit.jupiter.api.Assertions.assertEquals;
21+
import static org.junit.jupiter.api.Assertions.fail;
2122

2223
import java.io.IOException;
2324

@@ -30,10 +31,9 @@
3031
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
3132
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
3233
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
33-
import org.junit.After;
34-
import org.junit.Assert;
35-
import org.junit.Before;
36-
import org.junit.Test;
34+
import org.junit.jupiter.api.AfterEach;
35+
import org.junit.jupiter.api.BeforeEach;
36+
import org.junit.jupiter.api.Test;
3737

3838
/**
3939
* Test abandoning blocks, which clients do on pipeline creation failure.
@@ -48,14 +48,14 @@ public class TestAbandonBlock {
4848
private MiniDFSCluster cluster;
4949
private DistributedFileSystem fs;
5050

51-
@Before
51+
@BeforeEach
5252
public void setUp() throws Exception {
5353
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
5454
fs = cluster.getFileSystem();
5555
cluster.waitActive();
5656
}
5757

58-
@After
58+
@AfterEach
5959
public void tearDown() throws Exception {
6060
if (fs != null) {
6161
fs.close();
@@ -100,8 +100,8 @@ public void testAbandonBlock() throws IOException {
100100
cluster.restartNameNode();
101101
blocks = dfsclient.getNamenode().getBlockLocations(src, 0,
102102
Integer.MAX_VALUE);
103-
Assert.assertEquals("Blocks " + b + " has not been abandoned.",
104-
orginalNumBlocks, blocks.locatedBlockCount() + 1);
103+
assertEquals(orginalNumBlocks, blocks.locatedBlockCount() + 1, "Blocks " +
104+
b + " has not been abandoned.");
105105
}
106106

107107
@Test

0 commit comments

Comments
 (0)